repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lispc/Paddle | v1_api_demo/quick_start/dataprovider_emb.py | 10 | 1953 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.PyDataProvider2 import *
UNK_IDX = 0
def initializer(settings, dictionary, **kwargs):
settings.word_dict = dictionary
settings.input_types = {
# Define the type of the first input as sequence of integer.
# The value of the integers range from 0 to len(dictrionary)-1
'word': integer_value_sequence(len(dictionary)),
# Define the second input for label id
'label': integer_value(2)
}
@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM)
def process(settings, file_name):
with open(file_name, 'r') as f:
for line in f:
label, comment = line.strip().split('\t')
words = comment.split()
word_slot = [settings.word_dict.get(w, UNK_IDX) for w in words]
yield {'word': word_slot, 'label': int(label)}
def predict_initializer(settings, dictionary, **kwargs):
settings.word_dict = dictionary
settings.input_types = {'word': integer_value_sequence(len(dictionary))}
@provider(init_hook=predict_initializer, should_shuffle=False)
def process_predict(settings, file_name):
with open(file_name, 'r') as f:
for line in f:
comment = line.strip().split()
word_slot = [settings.word_dict.get(w, UNK_IDX) for w in comment]
yield {'word': word_slot}
| apache-2.0 |
asadziach/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
tareqalayan/pytest | _pytest/monkeypatch.py | 1 | 9043 | """ monkeypatching and mocking functionality. """
from __future__ import absolute_import, division, print_function
import os
import sys
import re
import six
from _pytest.fixtures import fixture
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
@fixture
def monkeypatch():
"""The returned ``monkeypatch`` fixture provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, value, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function or fixture has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name):
# simplified from zope.dottedname
parts = name.split('.')
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += '.' + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# we use explicit un-nesting of the handling block in order
# to avoid nested exceptions on python 3
try:
__import__(used)
except ImportError as ex:
# str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError(
'import error in %s: %s' % (used, ex)
)
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj, name, ann):
try:
obj = getattr(obj, name)
except AttributeError:
raise AttributeError(
'%r object at %s has no attribute %r' % (
type(obj).__name__, ann, name
)
)
return obj
def derive_importpath(import_path, raising):
if not isinstance(import_path, six.string_types) or "." not in import_path:
raise TypeError("must be absolute import path string, not %r" %
(import_path,))
module, attr = import_path.rsplit('.', 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset(object):
def __repr__(self):
return "<notset>"
notset = Notset()
class MonkeyPatch(object):
""" Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
"""
def __init__(self):
self._setattr = []
self._setitem = []
self._cwd = None
self._savesyspath = None
def setattr(self, target, name, value=notset, raising=True):
""" Set attribute value on target, memorizing the old value.
By default raise AttributeError if the attribute did not exist.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. Example:
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
The ``raising`` value determines if the setattr should fail
if the attribute is not already present (defaults to True
which means it will raise).
"""
__tracebackhide__ = True
import inspect
if value is notset:
if not isinstance(target, six.string_types):
raise TypeError("use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string")
value = name
name, target = derive_importpath(target, raising)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" % (target, name))
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target, name=notset, raising=True):
""" Delete attribute ``name`` from ``target``, by default raise
AttributeError it the attribute did not previously exist.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
If ``raising`` is set to False, no exception will be raised if the
attribute is missing.
"""
__tracebackhide__ = True
if name is notset:
if not isinstance(target, six.string_types):
raise TypeError("use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string")
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
self._setattr.append((target, name, getattr(target, name, notset)))
delattr(target, name)
def setitem(self, dic, name, value):
""" Set dictionary entry ``name`` to value. """
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
""" Delete ``name`` from dict. Raise KeyError if it doesn't exist.
If ``raising`` is set to False, no exception will be raised if the
key is missing.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name, value, prepend=None):
""" Set environment variable ``name`` to ``value``. If ``prepend``
is a character, read the current environment variable value
and prepend the ``value`` adjoined with the ``prepend`` character."""
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
""" Delete ``name`` from the environment. Raise KeyError it does not
exist.
If ``raising`` is set to False, no exception will be raised if the
environment variable is missing.
"""
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
""" Prepend ``path`` to ``sys.path`` list of import locations. """
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
def chdir(self, path):
""" Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
else:
os.chdir(path)
def undo(self):
""" Undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[name]
except KeyError:
pass # was already deleted, so we have the desired state
else:
dictionary[name] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None
| mit |
mpobrien/see | see.py | 1 | 6339 | #!/usr/bin/env python
"""
see
A human alternative to dir().
>>> from see import see
>>> help(see)
Copyright (c) 2009 Liam Cooke
http://inky.github.com/see/
Licensed under the GNU General Public License v3. {{{
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
}}}
"""
import fnmatch
import inspect
import re
import sys
import textwrap
__all__ = ['see']
__author__ = 'Liam Cooke'
__contributors__ = [
'Bob Farrell',
'Gabriel Genellina',
'Baishampayan Ghose',
'Charlie Nolan',
'Ed Page',
'guff',
'jdunck',
]
__version__ = '0.5.4'
__copyright__ = 'Copyright (c) 2009 Liam Cooke'
__license__ = 'GNU General Public License v3'
def regex_filter(names, pat):
pat = re.compile(pat)
def match(name, fn=pat.search):
return fn(name) is not None
return tuple(filter(match, names))
def fn_filter(names, pat):
def match(name, fn=fnmatch.fnmatch, pat=pat):
return fn(name, pat)
return tuple(filter(match, names))
class _SeeOutput(tuple):
"""Tuple-like object with a pretty string representation."""
def __new__(self, actions=None):
return tuple.__new__(self, actions or [])
def __repr__(self):
return textwrap.fill(' '.join(self), 78,
initial_indent=' ',
subsequent_indent=' ')
class _SeeDefault(object):
def __repr__(self):
return 'anything'
_LOCALS = _SeeDefault()
def see(obj=_LOCALS, pattern=None, r=None):
"""
Inspect an object. Like the dir() builtin, but easier on the eyes.
Keyword arguments (all optional):
obj -- object to be inspected
pattern -- shell-style search pattern (e.g. '*len*')
r -- regular expression
If obj is omitted, objects in the current scope are listed instead.
Some unique symbols are used:
.* implements obj.anything
[] implements obj[key]
in implements membership tests (e.g. x in obj)
+obj unary positive operator (e.g. +2)
-obj unary negative operator (e.g. -2)
"""
use_locals = obj is _LOCALS
actions = []
dot = not use_locals and '.' or ''
func = lambda f: hasattr(f, '__call__') and '()' or ''
name = lambda a, f: ''.join((dot, a, func(f)))
if use_locals:
obj.__dict__ = inspect.currentframe().f_back.f_locals
attrs = dir(obj)
if not use_locals:
for var, symbol in SYMBOLS:
if var not in attrs or symbol in actions:
continue
elif var == '__doc__':
if not obj.__doc__ or not obj.__doc__.strip():
continue
actions.append(symbol)
for attr in filter(lambda a: not a.startswith('_'), attrs):
try:
prop = getattr(obj, attr)
except AttributeError:
continue
actions.append(name(attr, prop))
if pattern is not None:
actions = fn_filter(actions, pattern)
if r is not None:
actions = regex_filter(actions, r)
return _SeeOutput(actions)
PY_300 = sys.version_info >= (3, 0)
PY_301 = sys.version_info >= (3, 0, 1)
SYMBOLS = tuple(filter(lambda x: x[0], (
# callable
('__call__', '()'),
# element/attribute access
('__getattr__', '.*'),
('__getitem__', '[]'),
('__setitem__', '[]'),
('__delitem__', '[]'),
# iteration
('__enter__', 'with'),
('__exit__', 'with'),
('__contains__', 'in'),
# operators
('__add__', '+'),
('__radd__', '+'),
('__iadd__', '+='),
('__sub__', '-'),
('__rsub__', '-'),
('__isub__', '-='),
('__mul__', '*'),
('__rmul__', '*'),
('__imul__', '*='),
(not PY_300 and '__div__', '/'),
(not PY_301 and '__rdiv__', '/'),
('__truediv__', '/'),
('__rtruediv__', '/'),
('__floordiv__', '//'),
('__rfloordiv__', '//'),
(not PY_300 and '__idiv__', '/='),
('__itruediv__', '/='),
('__ifloordiv__', '//='),
('__mod__', '%'),
('__rmod__', '%'),
('__divmod__', '%'),
('__imod__', '%='),
('__pow__', '**'),
('__rpow__', '**'),
('__ipow__', '**='),
('__lshift__', '<<'),
('__rlshift__', '<<'),
('__ilshift__', '<<='),
('__rshift__', '>>'),
('__rrshift__', '>>'),
('__irshift__', '>>='),
('__and__', '&'),
('__rand__', '&'),
('__iand__', '&='),
('__xor__', '^'),
('__rxor__', '^'),
('__ixor__', '^='),
('__or__', '|'),
('__ror__', '|'),
('__ior__', '|='),
('__pos__', '+obj'),
('__neg__', '-obj'),
('__invert__', '~'),
('__lt__', '<'),
(not PY_301 and '__cmp__', '<'),
('__le__', '<='),
(not PY_301 and '__cmp__', '<='),
('__eq__', '=='),
(not PY_301 and '__cmp__', '=='),
('__ne__', '!='),
(not PY_301 and '__cmp__', '!='),
('__gt__', '>'),
(not PY_301 and '__cmp__', '>'),
('__ge__', '>='),
(not PY_301 and '__cmp__', '>='),
# built-in functions
('__abs__', 'abs()'),
(PY_300 and '__bool__' or '__nonzero__', 'bool()'),
('__complex__', 'complex()'),
(PY_300 and '__dir__', 'dir()'),
('__divmod__', 'divmod()'),
('__rdivmod__', 'divmod()'),
('__float__', 'float()'),
('__hash__', 'hash()'),
('__doc__', 'help()'),
(PY_300 and '__index__' or '__hex__', 'hex()'),
('__int__', 'int()'),
('__iter__', 'iter()'),
('__len__', 'len()'),
(not PY_300 and '__long__', 'long()'),
(PY_300 and '__index__' or '__oct__', 'oct()'),
('__repr__', 'repr()'),
('__reversed__', 'reversed()'),
(PY_300 and '__round__', 'round()'),
('__str__', 'str()'),
(PY_300 and '__unicode__', 'unicode()'),
)))
if __name__ == '__main__':
help(see)
# vim: expandtab tabstop=4 shiftround shiftwidth=4 fdm=marker
| gpl-3.0 |
bkillenit/AbletonAPI | python-api-materials/code/Hack_LiveCurses/rpyc/utils/ssh.py | 3 | 10267 | import os
from subprocess import Popen, PIPE
from rpyc.lib import safe_import
from rpyc.lib.compat import BYTES_LITERAL
signal = safe_import("signal")
# modified from the stdlib pipes module for windows
_safechars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@%_-+=:,./'
_funnychars = '"`$\\'
def shquote(text):
if not text:
return "''"
for c in text:
if c not in _safechars:
break
else:
return text
if "'" not in text:
return "'" + text + "'"
def escaped(c):
if c in _funnychars:
return '\\' + c
else:
return c
res = "".join(escaped(c) for c in text)
return '"' + res + '"'
class ProcessExecutionError(Exception):
"""raised by :func:`SshContext.execute` should the executed process
terminate with an error"""
pass
import subprocess
def _get_startupinfo():
if subprocess.mswindows:
import _subprocess
sui = subprocess.STARTUPINFO()
sui.dwFlags |= _subprocess.STARTF_USESHOWWINDOW #@UndefinedVariable
sui.wShowWindow = _subprocess.SW_HIDE #@UndefinedVariable
return sui
else:
return None
class SshTunnel(object):
"""
Represents an active SSH tunnel (as created by ``ssh -L``).
.. note::
Do not instantiate this class yourself -- use the :func:`SshContext.tunnel`
function for that.
"""
PROGRAM = r"""import sys;sys.stdout.write("ready\n\n\n");sys.stdout.flush();sys.stdin.readline()"""
def __init__(self, sshctx, loc_host, loc_port, rem_host, rem_port):
self.loc_host = loc_host
self.loc_port = loc_port
self.rem_host = rem_host
self.rem_port = rem_port
self.sshctx = sshctx
self.proc = sshctx.popen("python", "-u", "-c", self.PROGRAM,
L = "[%s]:%s:[%s]:%s" % (loc_host, loc_port, rem_host, rem_port))
banner = self.proc.stdout.readline().strip()
if banner != BYTES_LITERAL("ready"):
raise ValueError("tunnel failed", banner)
def __del__(self):
try:
self.close()
except Exception:
pass
def __str__(self):
return "%s:%s --> (%s)%s:%s" % (self.loc_host, self.loc_port, self.sshctx.host,
self.rem_host, self.rem_port)
def is_open(self):
"""returns True if the ``ssh`` process is alive, False otherwise"""
return self.proc and self.proc.poll() is None
def close(self):
"""closes (terminates) the SSH tunnel"""
if not self.is_open():
return
self.proc.stdin.write(BYTES_LITERAL("foo\n\n\n"))
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.stderr.close()
try:
self.proc.kill()
except AttributeError:
if signal:
os.kill(self.proc.pid, signal.SIGTERM)
self.proc.wait()
self.proc = None
class SshContext(object):
"""
An *SSH context* encapsulates all the details required to establish an SSH
connection to other host. It includes the host name, user name, TCP port,
identity file, etc.
Once constructed, it can serve as a factory for SSH operations, such as
executing a remote program and getting its stdout, or uploading/downloading
files using ``scp``. It also serves for creating SSH tunnels.
Example::
>>> sshctx = SshContext("mymachine", username="borg", keyfile="/home/foo/.ssh/mymachine-id")
>>> sshctx.execute("ls")
(0, "...", "")
"""
def __init__(self, host, user = None, port = None, keyfile = None,
ssh_program = "ssh", ssh_env = None, ssh_cwd = None,
scp_program = "scp", scp_env = None, scp_cwd = None):
self.host = host
self.user = user
self.port = port
self.keyfile = keyfile
self.ssh_program = ssh_program
self.ssh_env = ssh_env
self.ssh_cwd = ssh_cwd
self.scp_program = scp_program
self.scp_env = scp_env
self.scp_cwd = scp_cwd
def __str__(self):
uri = "ssh://"
if self.user:
uri += "%s@%s" % (self.user, self.host)
else:
uri += self.host
if self.port:
uri += ":%d" % (self.port)
return uri
def _convert_kwargs_to_args(self, kwargs):
args = []
for k, v in kwargs.items():
if v is True:
args.append("-%s" % (k,))
elif v is False:
pass
else:
args.append("-%s" % (k,))
args.append(str(v))
return args
def _process_scp_cmdline(self, kwargs):
args = [self.scp_program]
if "r" not in kwargs:
kwargs["r"] = True
if self.keyfile and "i" not in kwargs:
kwargs["i"] = self.keyfile
if self.port and "P" not in kwargs:
kwargs["P"] = self.port
args.extend(self._convert_kwargs_to_args(kwargs))
if self.user:
host = "%s@%s" % (self.user, self.host)
else:
host = self.host
return args, host
def _process_ssh_cmdline(self, kwargs):
args = [self.ssh_program]
if self.keyfile and "i" not in kwargs:
kwargs["i"] = self.keyfile
if self.port and "p" not in kwargs:
kwargs["p"] = self.port
args.extend(self._convert_kwargs_to_args(kwargs))
if self.user:
args.append("%s@%s" % (self.user, self.host))
else:
args.append(self.host)
return args
def popen(self, *args, **kwargs):
"""Runs the given command line remotely (over SSH), returning the
``subprocess.Popen`` instance of the command
:param args: the command line arguments
:param kwargs: additional keyword arguments passed to ``ssh``
:returns: a ``Popen`` instance
Example::
proc = ctx.popen("ls", "-la")
proc.wait()
"""
cmdline = self._process_ssh_cmdline(kwargs)
cmdline.extend(shquote(a) for a in args)
return Popen(cmdline, stdin = PIPE, stdout = PIPE, stderr = PIPE,
cwd = self.ssh_cwd, env = self.ssh_env, shell = False,
startupinfo = _get_startupinfo())
def execute(self, *args, **kwargs):
"""Runs the given command line remotely (over SSH), waits for it to finish,
returning the return code, stdout, and stderr of the executed process.
:param args: the command line arguments
:param kwargs: additional keyword arguments passed to ``ssh``, except for
``retcode`` and ``input``.
:param retcode: *keyword only*, the expected return code (Defaults to 0
-- success). An exception is raised if the return code does
not match the expected one, unless it is ``None``, in
which case it will not be tested.
:param input: *keyword only*, an input string that will be passed to
``Popen.communicate``. Defaults to ``None``
:raises: :class:`ProcessExecutionError` if the expected return code
is not matched
:returns: a tuple of (return code, stdout, stderr)
Example::
rc, out, err = ctx.execute("ls", "-la")
"""
retcode = kwargs.pop("retcode", 0)
input = kwargs.pop("input", None)
proc = self.popen(*args, **kwargs)
stdout, stderr = proc.communicate(input)
if retcode is not None and proc.returncode != retcode:
raise ProcessExecutionError(proc.returncode, stdout, stderr)
return proc.returncode, stdout, stderr
def upload(self, src, dst, **kwargs):
"""
Uploads *src* from the local machine to *dst* on the other side. By default,
``-r`` (recursive copy) is given to ``scp``, so *src* can be either a file or
a directory. To override this behavior, pass ``r = False`` as a keyword argument.
:param src: the source path (on the local side)
:param dst: the destination path (on the remote side)
:param kwargs: any additional keyword arguments, passed to ``scp``.
"""
cmdline, host = self._process_scp_cmdline(kwargs)
cmdline.append(src)
cmdline.append("%s:%s" % (host, dst))
proc = Popen(cmdline, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell = False,
cwd = self.scp_cwd, env = self.scp_env, startupinfo = _get_startupinfo())
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise ValueError("upload failed", stdout, stderr)
def download(self, src, dst, **kwargs):
"""
Downloads *src* from the other side to *dst* on the local side. By default,
``-r`` (recursive copy) is given to ``scp``, so *src* can be either a file or
a directory. To override this behavior, pass ``r = False`` as a keyword argument.
:param src: the source path (on the other side)
:param dst: the destination path (on the local side)
:param kwargs: any additional keyword arguments, passed to ``scp``.
"""
cmdline, host = self._process_scp_cmdline(kwargs)
cmdline.append("%s:%s" % (host, src))
cmdline.append(dst)
proc = Popen(cmdline, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell = False,
cwd = self.scp_cwd, env = self.scp_env, startupinfo = _get_startupinfo())
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise ValueError("upload failed", stdout, stderr)
def tunnel(self, loc_port, rem_port, loc_host = "localhost", rem_host = "localhost"):
"""
Creates an SSH tunnel from the local port to the remote one. This is
translated to ``ssh -L loc_host:loc_port:rem_host:rem_port``.
:param loc_port: the local TCP port to forward
:param rem_port: the remote (server) TCP port, to which the local port
will be forwarded
:returns: an :class:`SshTunnel` instance
"""
return SshTunnel(self, loc_host, loc_port, rem_host, rem_port)
| mit |
crepererum/invenio | invenio/legacy/bibdocfile/config.py | 12 | 2801 | # This file is part of Invenio.
# Copyright (C) 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import re
try:
from invenio.config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC
except ImportError:
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC = {
'can_revise_doctypes': ['*'],
'can_comment_doctypes': ['*'],
'can_describe_doctypes': ['*'],
'can_delete_doctypes': ['*'],
'can_keep_doctypes': ['*'],
'can_rename_doctypes': ['*'],
'can_add_format_to_doctypes': ['*'],
'can_restrict_doctypes': ['*']}
try:
from invenio.config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES
except ImportError:
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES = [
('Main', 'Main document'),
('LaTeX', 'LaTeX'),
('Source', 'Source'),
('Additional', 'Additional File'),
('Audio', 'Audio file'),
('Video', 'Video file'),
('Script', 'Script'),
('Data', 'Data'),
('Figure', 'Figure'),
('Schema', 'Schema'),
('Graph', 'Graph'),
('Image', 'Image'),
('Drawing', 'Drawing'),
('Slides', 'Slides')]
try:
from invenio.config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS
except ImportError:
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS = [
('', 'Public'),
('restricted', 'Restricted')]
# CFG_BIBDOCFILE_ICON_SUBFORMAT_RE -- a subformat is an Invenio concept to give
# file formats more semantic. For example "foo.gif;icon" has ".gif;icon"
# 'format', ".gif" 'superformat' and "icon" 'subformat'. That means that this
# particular format/instance of the "foo" document, not only is a ".gif" but
# is in the shape of an "icon", i.e. most probably it will be low-resolution.
# This configuration variable let the administrator to decide which implicit
# convention will be used to know which formats will be meant to be used
# as an icon.
CFG_BIBDOCFILE_ICON_SUBFORMAT_RE = re.compile(r"icon.*")
# CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT -- this is the default subformat used
# when creating new icons.
CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT = "icon"
| gpl-2.0 |
JASchilz/RoverMUD | simple_universe/attachments/ooc_commands.py | 1 | 1072 | from basics import BaseAttachment
class OOCComands(BaseAttachment):
character = False
def __init__(self, character):
self.character = character
self.action_matrix = [
["help", self.do_help,
"Open the help screen, or receive help on a specific command. \
\n\tExamples: 'help', 'help quit'"],
["quit", self.do_quit, "Quit the game."],
["health", self.do_health, "Assess your health.\n\tAliases: 'h'."]
]
def do_help(self, rest):
output = "Help Information\n\nCOMMAND\tDESCRIPTION"
for attachment in self.character.attachments:
output += "\n"
for action in attachment.action_matrix:
output += action[0] + "\t" + action[2] + "\n"
self.character.brain.to_client.append(output)
def do_quit(self, rest):
self.character.brain.client.active = False
def do_health(self, rest):
self.character.brain.to_client.append("You have " + str(self.character.current_hp) + " hit points.")
| apache-2.0 |
ylatuya/Flumotion | flumotion/test/test_component.py | 4 | 5295 | # -*- Mode: Python; test-case-name: flumotion.test.test_component -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import gobject
from twisted.trial import unittest
from flumotion.common import testsuite
from flumotion.common import errors
from flumotion.component.feedcomponent import ParseLaunchComponent
class PipelineTest(ParseLaunchComponent):
def __init__(self, eaters=None, feeders=None, pipeline='test-pipeline'):
self.__pipeline = pipeline
self._eater = eaters or {}
self._feed = feeders or []
config = {'name': 'fake',
'avatarId': '/default/fake',
'eater': self._eater,
'feed': self._feed,
'plugs': {},
'properties': {},
# clock master prevents the comp from being
# instantiated
'clock-master': '/some/component'}
ParseLaunchComponent.__init__(self, config)
def create_pipeline(self):
unparsed = self.__pipeline
self.pipeline_string = self.parse_pipeline(unparsed)
try:
# don't bother creating a gstreamer pipeline
# pipeline = gst.parse_launch(self.pipeline_string)
return None
except gobject.GError, e:
self.warning('Could not parse pipeline: %s' % e.message)
raise errors.PipelineParseError(e.message)
def connect_feeders(self, pipeline):
pass
def set_pipeline(self, pipeline):
self.pipeline = pipeline
class TestExpandElementNames(testsuite.TestCase):
def setUp(self):
self.p = PipelineTest([], [])
def tearDown(self):
return self.p.stop()
def testOddDelimeters(self):
self.assertRaises(TypeError, self.p.parse_pipeline,
'@ this:is:wrong @ ! because ! @')
class TestParser(testsuite.TestCase):
def parse(self, unparsed, correctresultproc, eaters=None, feeders=None):
comp = PipelineTest(eaters, feeders, unparsed)
result = comp.parse_pipeline(unparsed)
self.assertEquals(result, correctresultproc(comp))
comp.stop()
def testSimpleOneElement(self):
self.parse('foobar', lambda p: 'foobar')
def testSimpleTwoElements(self):
self.parse('foo ! bar', lambda p: 'foo ! bar')
def testOneSource(self):
self.parse('@eater:default@ ! bar',
lambda p: '%s ! bar' % (p.get_eater_template('default')),
{'qux': [('foo:bar', 'default')]})
def testOneSourceWithout(self):
self.parse('bar',
lambda p: '%s ! bar' % (p.get_eater_template('default')),
{'qux': [('foo:quoi', 'default')]})
def testOneFeed(self):
self.parse('foo ! @feeder:bar@',
lambda p: 'foo ! %s' % (p.get_feeder_template('bar')),
{}, ['bar'])
def testOneFeedWithout(self):
self.parse('foo',
lambda p: 'foo ! %s' % (p.get_feeder_template('bar')),
{}, ['bar'])
def testTwoSources(self):
self.parse('@eater:foo@ ! @eater:bar@ ! baz',
lambda p: '%s ! %s ! baz' % (p.get_eater_template('foo'),
p.get_eater_template('bar')),
{'qux': [('baz:default', 'foo')],
'zag': [('qux:default', 'bar')]})
def testTwoFeeds(self):
self.parse('foo ! @feeder:bar@ ! @feeder:baz@',
lambda p: 'foo ! %s ! %s' % (p.get_feeder_template('bar'),
p.get_feeder_template('baz')),
{}, ['bar', 'baz'])
def testTwoBoth(self):
self.parse(
'@eater:src1@ ! @eater:src2@ ! @feeder:feed1@ ! @feeder:feed2@',
lambda p: '%s ! %s ! %s ! %s' % (p.get_eater_template('src1'),
p.get_eater_template('src2'),
p.get_feeder_template('feed1'),
p.get_feeder_template('feed2')),
{'qux': [('comp1:default', 'src1')],
'zag': [('comp2:default', 'src2')]},
['feed1', 'feed2'])
def testErrors(self):
comp = PipelineTest(None, None, '')
d = self.assertFailure(comp.waitForHappy(), errors.ComponentStartError)
d.addCallback(lambda _: comp.stop())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
appliedx/edx-platform | common/lib/capa/capa/safe_exec/tests/test_lazymod.py | 152 | 1667 | """Test lazymod.py"""
import sys
import unittest
from capa.safe_exec.lazymod import LazyModule
class ModuleIsolation(object):
"""
Manage changes to sys.modules so that we can roll back imported modules.
Create this object, it will snapshot the currently imported modules. When
you call `clean_up()`, it will delete any module imported since its creation.
"""
def __init__(self):
# Save all the names of all the imported modules.
self.mods = set(sys.modules)
def clean_up(self):
# Get a list of modules that didn't exist when we were created
new_mods = [m for m in sys.modules if m not in self.mods]
# and delete them all so another import will run code for real again.
for m in new_mods:
del sys.modules[m]
class TestLazyMod(unittest.TestCase):
def setUp(self):
super(TestLazyMod, self).setUp()
# Each test will remove modules that it imported.
self.addCleanup(ModuleIsolation().clean_up)
def test_simple(self):
# Import some stdlib module that has not been imported before
self.assertNotIn("colorsys", sys.modules)
colorsys = LazyModule("colorsys")
hsv = colorsys.rgb_to_hsv(.3, .4, .2)
self.assertEqual(hsv[0], 0.25)
def test_dotted(self):
# wsgiref is a module with submodules that is not already imported.
# Any similar module would do. This test demonstrates that the module
# is not already im
self.assertNotIn("wsgiref.util", sys.modules)
wsgiref_util = LazyModule("wsgiref.util")
self.assertEqual(wsgiref_util.guess_scheme({}), "http")
| agpl-3.0 |
nophead/Skeinforge50plus | fabmetheus_utilities/geometry/geometry_utilities/evaluate_fundamentals/_math.py | 13 | 2590 | """
Boolean geometry utilities.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalNativeFunctions = 'acos asin atan atan2 ceil cos cosh degrees e exp fabs floor fmod frexp hypot'.split()
globalNativeFunctions += 'ldexp log log10 modf pi pow radians sin sinh sqrt tan tanh trunc'.split()
globalNativeFunctionSet = set(globalNativeFunctions)
#Constants from: http://www.physlink.com/reference/MathConstants.cfm
#Tau is from: http://tauday.com/
#If anyone wants to add stuff, more constants are at: http://en.wikipedia.org/wiki/Mathematical_constant
globalMathConstantDictionary = {
'euler' : 0.5772156649015328606065120,
'golden' : euclidean.globalGoldenRatio,
'goldenAngle' : euclidean.globalGoldenAngle,
'goldenRatio' : euclidean.globalGoldenRatio,
'tau' : euclidean.globalTau}
def _getAccessibleAttribute(attributeName):
'Get the accessible attribute.'
if attributeName in globalMathConstantDictionary:
return globalMathConstantDictionary[attributeName]
if attributeName in globalNativeFunctionSet:
return math.__dict__[attributeName]
if attributeName in globalAccessibleAttributeDictionary:
return globalAccessibleAttributeDictionary[attributeName]
return None
def getAbs(value):
'Get the abs.'
return abs(value)
def getBoolean(value):
'Get the boolean.'
return bool(value)
def getDivmod(x, y):
'Get the divmod.'
return divmod(x, y)
def getFloat(value):
'Get the float.'
return float(value)
def getHex(value):
'Get the hex.'
return hex(value)
def getInt(value):
'Get the int.'
return int(value)
def getLong(value):
'Get the long.'
return long(value)
def getMax(first, second):
'Get the max.'
return max(first, second)
def getMin(first, second):
'Get the min.'
return min(first, second)
def getRound(value):
'Get the round.'
return round(value)
def getString(value):
'Get the string.'
return str(value)
globalAccessibleAttributeDictionary = {
'abs' : getAbs,
'boolean' : getBoolean,
'divmod' : getDivmod,
'float' : getFloat,
'hex' : getHex,
'int' : getInt,
'long' : getLong,
'max' : getMax,
'min' : getMin,
'round' : getRound,
'string' : getString}
| agpl-3.0 |
kushG/osf.io | website/addons/dataverse/views/crud.py | 1 | 13076 | # -*- coding: utf-8 -*-
import os
import httplib
import logging
import datetime
import requests
from bs4 import BeautifulSoup
from flask import request, make_response
from framework.flask import redirect
from framework.exceptions import HTTPError
from framework.utils import secure_filename
from framework.auth.utils import privacy_info_handle
from website.addons.dataverse.client import delete_file, upload_file, \
get_file, get_file_by_id, release_study, get_study, get_dataverse, \
connect_from_settings_or_403, get_files
from website.project.decorators import must_have_permission
from website.project.decorators import must_be_contributor_or_public
from website.project.decorators import must_not_be_registration
from website.project.decorators import must_have_addon
from website.project.views.node import _view_project
from website.project.views.file import get_cache_content
from website.project.model import has_anonymous_link
from website.util import rubeus
from website.addons.dataverse.model import DataverseFile
from website.addons.dataverse.settings import HOST
from website.addons.base.views import check_file_guid
logger = logging.getLogger(__name__)
session = requests.Session()
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('dataverse', 'node')
def dataverse_release_study(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
now = datetime.datetime.utcnow()
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == 403:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
if study.get_state() == 'RELEASED':
raise HTTPError(httplib.CONFLICT)
release_study(study)
# Add a log
node.add_log(
action='dataverse_study_released',
params={
'project': node.parent_id,
'node': node._primary_key,
'study': study.title,
},
auth=auth,
log_date=now,
)
return {'study': study.title}, httplib.OK
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_download_file(node_addon, auth, **kwargs):
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
url = 'http://{0}/dvn/FileDownload/?fileId={1}'.format(HOST, file_id)
return redirect(url)
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_download_file_proxy(node_addon, auth, **kwargs):
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
filename, content = scrape_dataverse(file_id)
# Build response
resp = make_response(content)
resp.headers['Content-Disposition'] = 'attachment; filename={0}'.format(
filename
)
resp.headers['Content-Type'] = 'application/octet-stream'
return resp
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_get_file_info(node_addon, auth, **kwargs):
"""API view that gets info for a file."""
node = node_addon.owner
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
anonymous = has_anonymous_link(node, auth)
download_url = node.web_url_for('dataverse_download_file', path=file_id)
dataverse_url = 'http://{0}/dvn/dv/'.format(HOST) + node_addon.dataverse_alias
study_url = 'http://dx.doi.org/' + node_addon.study_hdl
delete_url = node.api_url_for('dataverse_delete_file', path=file_id)
data = {
'node': {
'id': node._id,
'title': node.title
},
'filename': scrape_dataverse(file_id, name_only=True)[0],
'dataverse': privacy_info_handle(node_addon.dataverse, anonymous),
'study': privacy_info_handle(node_addon.study, anonymous),
'urls': {
'dataverse': privacy_info_handle(dataverse_url, anonymous),
'study': privacy_info_handle(study_url, anonymous),
'download': privacy_info_handle(download_url, anonymous),
'delete': privacy_info_handle(delete_url, anonymous),
'files': node.web_url_for('collect_file_trees'),
}
}
return {'data': data}, httplib.OK
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_view_file(node_addon, auth, **kwargs):
node = node_addon.owner
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
# lazily create a file GUID record
file_obj, created = DataverseFile.get_or_create(node=node, path=file_id)
redirect_url = check_file_guid(file_obj)
if redirect_url:
return redirect(redirect_url)
# Get or create rendered file
cache_file_name = '{0}.html'.format(file_id)
rendered = get_cache_content(node_addon, cache_file_name)
if rendered is None:
filename, content = scrape_dataverse(file_id)
_, ext = os.path.splitext(filename)
download_url = node.api_url_for(
'dataverse_download_file_proxy', path=file_id
)
rendered = get_cache_content(
node_addon,
cache_file_name,
start_render=True,
remote_path=file_obj.file_id + ext, # Include extension for MFR
file_content=content,
download_url=download_url,
)
else:
filename, _ = scrape_dataverse(file_id, name_only=True)
render_url = node.api_url_for('dataverse_get_rendered_file',
path=file_id)
ret = {
'file_name': filename,
'rendered': rendered,
'render_url': render_url,
'urls': {
'render': render_url,
'download': node.web_url_for('dataverse_download_file',
path=file_id),
'info': node.api_url_for('dataverse_get_file_info',
path=file_id),
}
}
ret.update(_view_project(node, auth))
return ret
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('dataverse', 'node')
def dataverse_upload_file(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
try:
name = request.args['name']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
now = datetime.datetime.utcnow()
can_edit = node.can_edit(auth) and not node.is_registration
can_view = node.can_view(auth)
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == httplib.FORBIDDEN:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
filename = secure_filename(name)
status_code = httplib.CREATED
old_id = None
# Fail if file is too small (Dataverse issue)
content = request.data
if len(content) < 5:
raise HTTPError(httplib.UNSUPPORTED_MEDIA_TYPE)
# Replace file if old version exists
old_file = get_file(study, filename)
if old_file is not None:
status_code = httplib.OK
old_id = old_file.id
delete_file(old_file)
# Check if file was deleted
if get_file_by_id(study, old_id) is not None:
raise HTTPError(httplib.BAD_REQUEST)
upload_file(study, filename, content)
file = get_file(study, filename)
if file is None:
raise HTTPError(httplib.BAD_REQUEST)
node.add_log(
action='dataverse_file_added',
params={
'project': node.parent_id,
'node': node._primary_key,
'filename': filename,
'path': node.web_url_for('dataverse_view_file', path=file.id),
'study': study.title,
},
auth=auth,
log_date=now,
)
info = {
'addon': 'dataverse',
'file_id': file.id,
'old_id': old_id,
'name': filename,
'path': filename,
'size': [
len(content),
rubeus.format_filesize(len(content))
],
rubeus.KIND: rubeus.FILE,
'urls': {
'view': node.web_url_for('dataverse_view_file',
path=file.id),
'download': node.web_url_for('dataverse_download_file',
path=file.id),
'delete': node.api_url_for('dataverse_delete_file',
path=file.id),
},
'permissions': {
'view': can_view,
'edit': can_edit,
},
}
return info, status_code
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('dataverse', 'node')
def dataverse_delete_file(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
now = datetime.datetime.utcnow()
file_id = kwargs.get('path')
if file_id is None:
raise HTTPError(httplib.NOT_FOUND)
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == httplib.FORBIDDEN:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
file = get_file_by_id(study, file_id)
delete_file(file)
# Check if file was deleted
if get_file_by_id(study, file_id) is not None:
raise HTTPError(httplib.BAD_REQUEST)
node.add_log(
action='dataverse_file_removed',
params={
'project': node.parent_id,
'node': node._primary_key,
'filename': file.name,
'study': study.title,
},
auth=auth,
log_date=now,
)
return {}
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_get_rendered_file(**kwargs):
"""
"""
node_settings = kwargs['node_addon']
file_id = kwargs['path']
cache_file = '{0}.html'.format(file_id)
return get_cache_content(node_settings, cache_file)
def scrape_dataverse(file_id, name_only=False):
# Go to file url
url = 'http://{0}/dvn/FileDownload/?fileId={1}'.format(HOST, file_id)
response = session.head(url, allow_redirects=True) if name_only else session.get(url)
# Agree to terms if a redirect has occurred
if response.history:
response = session.get(url) if name_only else response
parsed = BeautifulSoup(response.content)
view_state = parsed.find(id='javax.faces.ViewState').attrs.get('value')
data = {
'form1': 'form1',
'javax.faces.ViewState': view_state,
'form1:termsAccepted': 'on',
'form1:termsButton': 'Continue',
}
terms_url = 'http://{0}/dvn/faces/study/TermsOfUsePage.xhtml'.format(HOST)
session.post(terms_url, data=data)
response = session.head(url) if name_only else session.get(url)
if 'content-disposition' not in response.headers.keys():
raise HTTPError(httplib.NOT_FOUND)
filename = response.headers['content-disposition'].split('"')[1]
return filename, response.content
def fail_if_unauthorized(node_addon, auth, file_id):
node = node_addon.owner
user_settings = node_addon.user_settings
if file_id is None:
raise HTTPError(httplib.NOT_FOUND)
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == 403:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
released_file_ids = [f.id for f in get_files(study, released=True)]
all_file_ids = [f.id for f in get_files(study)] + released_file_ids
if file_id not in all_file_ids:
raise HTTPError(httplib.FORBIDDEN)
elif not node.can_edit(auth) and file_id not in released_file_ids:
raise HTTPError(httplib.UNAUTHORIZED)
def fail_if_private(file_id):
url = 'http://{0}/dvn/FileDownload/?fileId={1}'.format(HOST, file_id)
resp = requests.head(url)
if resp.status_code == httplib.FORBIDDEN:
raise HTTPError(
httplib.FORBIDDEN,
data={
'message_short': 'Cannot access file contents',
'message_long':
'The dataverse does not allow users to download files on ' +
'private studies at this time. Please contact the owner ' +
'of this Dataverse study for access to this file.',
}
)
| apache-2.0 |
dsolimando/Hot | hot-jython-modules/src/main/resources/test/test_inspect.py | 9 | 19658 | import sys
import types
import unittest
import inspect
import datetime
from test.test_support import TESTFN, run_unittest, is_jython
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
from test import test_support
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, getmembers, getdoc, getfile, getmodule,
# getsourcefile, getcomments, getsource, getclasstree, getargspec,
# getargvalues, formatargspec, formatargvalues, currentframe, stack, trace
# isdatadescriptor
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
import __builtin__
try:
1/0
except:
tb = sys.exc_traceback
git = mod.StupidGit()
def na_for_jython(fn):
if is_jython:
def do_nothing(*args, **kw):
pass
return do_nothing
else:
return fn
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback])
def istest(self, predicate, exp):
obj = eval(exp)
self.failUnless(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
self.failIf(other(obj), 'not %s(%s)' % (other.__name__, exp))
class TestPredicates(IsTestBase):
def test_thirteen(self):
count = len(filter(lambda x:x.startswith('is'), dir(inspect)))
# Doc/lib/libinspect.tex claims there are 13 such functions
expected = 13
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
#XXX: Jython's PySystemState needs more work before this
#will be doable.
if not test_support.is_jython:
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.isclass, 'mod.StupidGit')
self.istest(inspect.iscode, 'mod.spam.func_code')
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.ismethod, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.istraceback, 'tb')
self.istest(inspect.isdatadescriptor, '__builtin__.file.closed')
self.istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
#XXX: This detail of PyFrames is not yet supported in Jython
elif not test_support.is_jython:
self.failIf(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.failIf(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assert_(inspect.isroutine(mod.spam))
self.assert_(inspect.isroutine([].count))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assert_(len(mod.st) >= 5)
self.assertEqual(mod.st[0][1:],
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(mod.st[1][1:],
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(mod.st[2][1:],
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(mod.st[3][1:],
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(git.tr[0][1:], (modfile, 43, 'argue',
[' spam(a, b, c)\n'], 0))
self.assertEqual(git.tr[1][1:], (modfile, 9, 'spam',
[' eggs(b + d, c + f)\n'], 0))
self.assertEqual(git.tr[2][1:], (modfile, 18, 'eggs',
[' q = y / 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
# TODO - test_previous_frame could be rewritten such that we could
# introspect on the previous frame but without a dependency on
# tuple unpacking
@na_for_jython
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', ['e', ['f']]])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderFile = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.source = file(inspect.getsourcefile(self.fodderFile)).read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderFile = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)])
tree = inspect.getclasstree([cls[1] for cls in classes], 1)
self.assertEqual(tree,
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["__builtin__"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(inspect.getsourcefile(mod.spam), modfile)
self.assertEqual(inspect.getsourcefile(git.abuse), modfile)
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from new import module
name = '__inspect_dummy'
m = sys.modules[name] = module(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec "def x(): pass" in m.__dict__
self.assertEqual(inspect.getsourcefile(m.x.func_code), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
class TestDecorators(GetSourceBase):
fodderFile = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderFile = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderFile = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_classic_mro(self):
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e = None,
varkw_e = None, defaults_e = None,
formatted = None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
@na_for_jython
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted = '(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', ['e', ['f']]],
'g', 'h', (3, (4, (5,))),
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)')
@na_for_jython
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
@na_for_jython
def test_getargspec_sublistofone(self):
def sublistOfOne((foo,)): return 1
self.assertArgSpecEquals(sublistOfOne, [['foo']])
def fakeSublistOfOne((foo)): return 1
self.assertArgSpecEquals(fakeSublistOfOne, ['foo'])
def test_classify_oldstyle(self):
class A:
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', A) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', C) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', D) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
# Repeat all that, but w/ new-style classes.
def test_classify_newstyle(self):
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', A) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', C) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', D) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
def test_main():
run_unittest(TestDecorators, TestRetrievingSourceCode, TestOneliners,
TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
SublimeText/PackageDev | plugins/settings/known_settings.py | 1 | 29813 | import collections
import logging
import os
import re
import textwrap
import time
import weakref
import sublime
from sublime_lib import encodings, ResourcePath
from ..lib.weakmethod import WeakMethodProxy
from ..lib import get_setting
from .region_math import VALUE_SCOPE, get_value_region_at, get_last_key_name_from
logger = logging.getLogger(__name__)
PREF_FILE = "Preferences.sublime-settings"
PREF_FILE_ALIAS = "Base File.sublime-settings"
KIND_SETTING = (sublime.KIND_ID_VARIABLE, "S", "Setting")
def html_encode(string):
"""Encode some critical characters to html entities."""
return string.replace("&", "&") \
.replace("<", "<") \
.replace(">", ">") \
.replace("\t", " ") \
.replace(" ", " ") \
.replace("\n", "<br>") if string else ""
def format_completion_item(value, default=None, is_default=False, label=None, annotation=None):
"""Create a completion item with its type as description.
Arguments:
value (any):
The value which is added when completions are committed.
If `label` is none, the `value` is used as label, too.
default (any):
Sets is_default if equals `value`.
is_default (bool):
If `True` the completion item is marked '(default)'.
label (str):
An alternative label to use to present the `value`
in the completions panel.
annotation (str):
An optional annotation to display after the label.
"""
if isinstance(value, dict):
raise ValueError("Cannot format dictionary value", value)
if not is_default:
is_default = value in default if isinstance(default, list) else value == default
type_ = type(value).__name__
return sublime.CompletionItem(
trigger=sublime.encode_value(label or value).strip('"'),
annotation=("(default) {}" if is_default else "{}").format(annotation or ""),
completion=value,
kind=(sublime.KIND_ID_SNIPPET, type_[0], type_),
)
def decode_value(string):
"""Decode string to python object with unrestrictive booleans."""
if string.lower() == "true":
return True
if string.lower() == "false":
return False
try:
return int(string)
except ValueError:
return float(string)
class KnownSettings(object):
"""A class which provides all known settings with comments/defaults.
An object of this class initialized with a sublime-settings file loads all
basefiles from all packages including comments and default values to
provide all required information for tooltips and auto-completion.
"""
# cache for instances, keyed by the basename
# and using weakrefs for easy garbage collection
cache = weakref.WeakValueDictionary()
_is_initialized = False
_is_loaded = False
filename = None
on_loaded_callbacks = None
on_loaded_once_callbacks = None
defaults = None
comments = None
fallback_settings = None
def __new__(cls, filename, on_loaded=None, **kwargs):
# __init__ will be called on the return value
obj = cls.cache.get(filename)
if obj:
logger.debug("cache hit %r", filename)
return cls.cache[filename]
else:
obj = super().__new__(cls, **kwargs)
cls.cache[filename] = obj
return obj
def __init__(self, filename):
"""Initialize view event listener object.
Arguments:
filename (str):
Settings file name to index.
"""
# Because __init__ may be called multiple times
# and we only want to trigger a reload once,
# we need special handling here.
if not self._is_initialized:
# the associated settings file name all the settings belong to
self.filename = filename
# callback lists
self.on_loaded_callbacks = []
self.on_loaded_once_callbacks = []
self._is_initialized = True
# the dictionary with all defaults of a setting
self.defaults = collections.ChainMap()
# the dictionary with all comments of each setting
self.comments = collections.ChainMap()
self.trigger_settings_reload()
def add_on_loaded(self, on_loaded, once=False):
"""Add a callback to call once settings have been indexed (asynchronously).
Bound methods are stored as weak references.
Arguments:
on_loaded (callable):
The callback.
once (bool):
Whether the callback should be called only once.
"""
# Due to us archiving the callback, we use a weakref
# to avoid a circular reference to all SettingListeners affected,
# ensuring our __del__ is properly called when all relevant views are closed.
if self._is_loaded:
# Invoke callback 'immediately' since we're already loaded.
# Note that this is currently not thread-safe.
sublime.set_timeout_async(on_loaded, 0)
if not once:
self.on_loaded_callbacks.append(WeakMethodProxy(on_loaded))
elif not self._is_loaded:
self.on_loaded_once_callbacks.append(WeakMethodProxy(on_loaded))
def __del__(self):
logger.debug("deleting KnownSettings instance for %r", self.filename)
def __iter__(self):
"""Iterate over default keys."""
return iter(self.defaults)
def trigger_settings_reload(self):
# look for settings files asynchronously
sublime.set_timeout_async(self._load_settings, 0)
def _load_settings(self):
"""Load and merge settings and their comments from all base files.
The idea is each package which wants to add a valid entry to the
`Preferences.sublime-settings` file must provide such a file with all
keys it wants to add. These keys and the associated comments above it
are loaded into dictionaries and used to provide tooltips, completions
and linting.
"""
ignored_patterns = frozenset(("/User/", "/Preferences Editor/"))
# TODO project settings include "Preferences",
# but we don't have a syntax def for those yet
logger.debug("loading defaults and comments for %r", self.filename)
start_time = time.time()
resources = sublime.find_resources(self.filename)
resources += sublime.find_resources(self.filename + "-hints")
if self.filename == PREF_FILE:
resources += sublime.find_resources(PREF_FILE_ALIAS)
logger.debug("found %d %r files", len(resources), self.filename)
for resource in resources:
if any(ignored in resource for ignored in ignored_patterns):
logger.debug("ignoring %r", resource)
continue
try:
logger.debug("parsing %r", resource)
lines = sublime.load_resource(resource).splitlines()
for key, value in self._parse_settings(lines).items():
# merge settings without overwriting existing ones
self.defaults.setdefault(key, value)
except Exception as e:
logger.error("error parsing %r - %s%r",
resource, e.__class__.__name__, e.args)
duration = time.time() - start_time
logger.debug("loading took %.3fs", duration)
# include general settings if we're in a syntax-specific file
is_syntax_specific = self._is_syntax_specific()
if is_syntax_specific and not self.fallback_settings:
self.fallback_settings = KnownSettings(PREF_FILE)
# add fallbacks to the ChainMaps
self.defaults.maps.append(self.fallback_settings.defaults)
self.comments.maps.append(self.fallback_settings.comments)
# these may be loaded later, so delay calling our own callbacks
self.fallback_settings.add_on_loaded(self._has_loaded, once=True)
else:
if self.fallback_settings and not is_syntax_specific:
# file was renamed, probably
self.fallback_settings = None
self.defaults.maps.pop()
self.comments.maps.pop()
self._has_loaded()
def _has_loaded(self):
self._is_loaded = True
for callback in self.on_loaded_once_callbacks:
try:
callback()
except ReferenceError:
pass
self.on_loaded_once_callbacks.clear()
# copy callback list so we can clean up expired references
for callback in tuple(self.on_loaded_callbacks):
try:
callback()
except ReferenceError:
logger.debug("removing gone-away weak on_loaded_callback reference")
self.on_loaded_callbacks.remove(callback)
def _is_syntax_specific(self):
"""Check whether a syntax def with the same base file name exists.
Returns:
bool
"""
syntax_file_exts = (".sublime-syntax", ".tmLanguage")
name_no_ext = os.path.splitext(self.filename)[0]
for ext in syntax_file_exts:
syntax_file_name = name_no_ext + ext
resources = sublime.find_resources(syntax_file_name)
if resources:
logger.debug("syntax-specific settings file for %r", resources[0])
return True
return False
def _parse_settings(self, lines):
"""Parse the setting file and capture comments.
This is naive but gets the job done most of the time.
"""
content = []
comment = []
in_comment = False
for line in lines:
stripped = line.strip()
if in_comment:
if stripped.endswith("*/"):
in_comment = False
# remove all spaces and asterix
line = line.rstrip("*/ \t")
if line:
comment.append(line)
elif stripped.startswith("* "):
comment.append(stripped[2:])
else:
comment.append(line)
continue
# ignore empty lines if not in a comment
# empty line in comment may be used as visual separator
elif not stripped:
continue
if stripped.startswith("/*"):
in_comment = True
# remove all asterix
stripped = stripped[2:].lstrip("*")
if stripped:
comment.append(stripped)
continue
if stripped.startswith("//"):
# skip comment lines ending with `//` (likely used as separators)
# a standalone `//` adds an empty line as visual separator
stripped = stripped[2:]
if not stripped or not stripped.endswith("//"):
comment.append(stripped)
continue
content.append(line)
if comment:
# the json key is used as key for the comments located above it
match = re.match(r'"((?:[^"]|\\.)*)":', stripped)
if not match:
continue
key = match.group(1)
if key not in self.comments:
self.comments[key] = textwrap.dedent('\n'.join(comment))
comment.clear()
# Return decoded json file from content with stripped comments
return sublime.decode_value('\n'.join(content))
def build_tooltip(self, view, key):
"""Return html encoded docstring for settings key.
Arguments:
view (sublime.View):
the view to provide completions for
key (string):
the key under the cursor
"""
if key in self.defaults:
# the comment for the setting
comment = html_encode(self.comments.get(key) or "No description.")
# the default value from base file
default = html_encode(
sublime.encode_value(self.defaults.get(key), pretty=True))
else:
comment, default = "No description.", "unknown setting"
# format tooltip html content
return (
"<h1>{key}</h1>"
"<h2>Default: {default}</h2>"
"<p>{comment}</p>"
).format(**locals())
def insert_snippet(self, view, key):
"""Insert a snippet for the settings key at the end of the view.
Arguments:
view (sublime.View):
The view to add the snippet to. Doesn't need to be the view
of this ViewEventHandler. It's more likely the view of the
user settings which is to be passed here.
key (string):
The settings key to insert a snippet for.
"""
# find last value in the view
value_regions = view.find_by_selector(VALUE_SCOPE)
if not value_regions:
# no value found use end of global dict
selector = "meta.mapping"
value_regions = view.find_by_selector(selector)
if not value_regions:
# no global dict found, insert one
point = view.size()
is_empty_line = not view.substr(view.line(point)).strip()
bol = "{\n\t" if is_empty_line else "\n{\n\t"
eol = ",$0\n}\n"
else:
# insert first value to user file
point = value_regions[-1].end() - 1
bol, eol = "\t", "\n"
else:
# find line with last non-whitespace characters
value_region = value_regions[-1]
value_str = view.substr(value_region)
value_str_trimmed = value_str.rstrip()
ws_length = len(value_str) - len(value_str_trimmed)
point = view.line(value_region.end() - ws_length).end()
if value_str_trimmed.endswith(","):
# already have a comma after last entry
bol, eol = "\n", ","
else:
# add a comma after last entry
bol, eol = ",\n", ""
# format and insert the snippet
snippet = self._key_snippet(key, self.defaults[key], bol, eol)
view.sel().clear()
view.sel().add(point)
view.run_command('insert_snippet', {'contents': snippet})
def key_completions(self, view, prefix, point):
"""Create a list with completions for all known settings.
Arguments:
view (sublime.View):
the view to provide completions for
prefix (string):
the line content before cursor
point (int):
the text positions of all characters in prefix
Returns:
tuple ([ (trigger, content), (trigger, content) ], flags):
the tuple with content ST needs to display completions
"""
if view.match_selector(point - 1, "string"):
# we are within quotations, return words only
completions = [
sublime.CompletionItem(
trigger=key,
completion=key,
kind=KIND_SETTING,
# TODO link to show full description
# details=,
)
for key in self.defaults
]
else:
line = view.substr(view.line(point)).strip()
# don't add newline after snippet if user starts on empty line
eol = "," if len(line) == len(prefix) else ",\n"
# no quotations -> return full snippet
completions = [
sublime.CompletionItem(
trigger=key,
completion=self._key_snippet(key, value, eol=eol),
completion_format=sublime.COMPLETION_FORMAT_SNIPPET,
kind=KIND_SETTING,
# TODO link to show full description
# details=,
)
for key, value in self.defaults.items()
]
return completions
@staticmethod
def _key_snippet(key, value, bol="", eol=",\n"):
"""Create snippet with default value depending on type.
Arguments:
key (string):
the settings key name
value (any):
the default value of the setting read from base file
bol (string):
the prefix to add to the beginning of line
eol (string):
the suffix to add to the end of line
Returns:
string: the contents field to insert into completions entry
"""
encoded = sublime.encode_value(value)
encoded = encoded.replace("\\", "\\\\") # escape snippet markers
encoded = encoded.replace("$", "\\$")
encoded = encoded.replace("}", "\\}")
if isinstance(value, str):
# create the snippet for json strings and exclude quotation marks
# from the input field {1:}
#
# "key": "value"
#
fmt = '{bol}"{key}": "${{1:{encoded}}}"{eol}'
encoded = encoded[1:-1] # strip quotation
elif isinstance(value, list):
# create the snippet for json lists and exclude brackets
# from the input field {1:}
#
# "key":
# [
# value
# ]
#
fmt = '{bol}"{key}":\n[\n\t${{1:{encoded}}}\n]{eol}'
encoded = encoded[1:-1] # strip brackets
elif isinstance(value, dict):
# create the snippet for json dictionaries braces
# from the input field {1:}
#
# "key":
# {
# value
# }
#
fmt = '{bol}"{key}":\n{{\n\t${{1:{encoded}}}\n}}{eol}'
encoded = encoded[1:-1] # strip braces
else:
fmt = '{bol}"{key}": ${{1:{encoded}}}{eol}'
return fmt.format(**locals())
def value_completions(self, view, prefix, point):
"""Create a list with completions for all known settings values.
Arguments:
view (sublime.View):
the view to provide completions for
prefix (string):
the line content before cursor.
point (int):
the text positions of all characters in prefix
Returns:
tuple ([ (trigger, content), (trigger, content) ], flags):
the tuple with content ST needs to display completions
"""
value_region = get_value_region_at(view, point)
if not value_region:
logger.debug("unable to find current key region")
return None
key = get_last_key_name_from(view, value_region.begin())
if not key:
logger.debug("unable to find current key")
return None
# Use a map to deduplicate completions by trigger; latter overrides
completions_map = {c.trigger: c for c in self._value_completions_for(key)}
completions = list(completions_map.values())
if not completions:
logger.debug("no completions to offer")
return None
is_str = any(
bool(isinstance(c.completion, str)
or (isinstance(c.completion, list)
and c.completion
and isinstance(c.completion[0], str)))
for c in completions
)
in_str = view.match_selector(point, "string")
logger.debug("completing a string (%s) within a string (%s)", is_str, in_str)
is_list = isinstance(self.defaults.get(key), list)
in_list = view.match_selector(point, "meta.sequence")
logger.debug("completing a list item (%s) within a list (%s)", is_list, in_list)
if in_str and not is_str:
# We're within a string but don't have a string value to complete.
# Complain about this in the status bar, I guess.
msg = "Cannot complete value set within a string"
view.window().status_message(msg)
logger.warning(msg)
return None
if in_str and is_str:
# Strip completions of non-strings. Don't need quotation marks.
completions = [
c for c in completions
if isinstance(c.completion, str)
]
else:
# JSON-ify completion values with special handling for floats.
#
# the value typed so far, which may differ from prefix for floats
typed_region = sublime.Region(value_region.begin(), point)
typed = view.substr(typed_region).lstrip()
for c in completions:
value = c.completion
# unroll dicts
if isinstance(value, frozenset):
value = dict(value)
if isinstance(value, float):
# strip already typed text from float completions
# because ST cannot complete past word boundaries
# (e.g. strip `1.` of `1.234`)
value_str = str(value)
if value_str.startswith(typed):
offset = len(typed) - len(prefix)
value_str = value_str[offset:]
elif typed:
# don't offer as completion if 'typed' didn't match
continue
else:
value_str = sublime.encode_value(value)
if is_list and not in_list:
# wrap each item in a brackets to insert a 'list'
value_str = "[{}]".format(value_str)
# escape snippet markers
value_str = value_str.replace("$", "\\$")
c.completion = value_str
# disable word completion to prevent stupid suggestions
return completions
def _value_completions_for(self, key):
"""Collect and return value completions from matching source.
Arguments:
key (string):
the settings key name to read comments from
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
logger.debug("building completions for key %r", key)
default = self.defaults.get(key)
logger.debug("default value: %r", default)
if key in ('color_scheme', 'dark_color_scheme', 'light_color_scheme'):
yield from self._color_scheme_completions(key, default)
elif key in ('default_encoding', 'fallback_encoding'):
yield from self._encoding_completions(default)
elif key in ('theme', 'dark_theme', 'light_theme'):
yield from self._theme_completions(key, default)
else:
yield from self._completions_from_comment(key, default)
yield from self._completions_from_default(key, default)
def _completions_from_comment(self, key, default):
"""Parse settings comments and return all possible values.
Many settings are commented with a list of quoted words representing
the possible / allowed values. This method generates a list of these
quoted words which are suggested in auto-completions.
Arguments:
key (string):
the settings key name to read comments from
default (any):
the default value of the setting used to mark completion items
as "default".
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
comment = self.comments.get(key)
if not comment:
return
for match in re.finditer(r"`([^`\n]+)`", comment):
# backticks should wrap the value in JSON representation,
# so we try to decode it
value = match.group(1)
try:
value = sublime.decode_value(value)
except ValueError:
pass
if isinstance(value, list):
# Suggest list items as completions instead of a string
# representation of the list.
# Unless it's a dict.
for v in value:
if not isinstance(v, dict):
yield format_completion_item(v, default)
elif isinstance(value, dict):
# TODO what should we do with dicts?
pass
else:
yield format_completion_item(value, default)
for match in re.finditer(r'"([\.\w]+)"', comment):
# quotation marks either wrap a string, a numeric or a boolean
# fall back to a str
value, = match.groups()
try:
value = decode_value(value)
except ValueError:
pass
yield format_completion_item(value, default)
@staticmethod
def _completions_from_default(key, default):
"""Built completions from default value.
Arguments:
key (string):
the settings key name to read comments from
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
if default is None or default == "":
return
elif isinstance(default, bool):
for value in [True, False]:
yield format_completion_item(value, default=default)
elif isinstance(default, list):
for value in default:
yield format_completion_item(value, is_default=True)
elif isinstance(default, dict):
return # TODO can't complete these yet
else:
yield format_completion_item(default, is_default=True)
@staticmethod
def _color_scheme_completions(key, default):
"""Create completions of all visible color schemes.
The set will not include color schemes matching at least one entry of
`"settings.exclude_color_scheme_patterns": []`.
default (string):
The default `color_scheme` value.
Returns:
{(trigger, contents], ...}
A set of all completions.
- trigger (string): base file name of the color scheme
- contents (string): the value to commit to the settings
"""
if int(sublime.version()) >= 4095 and key == 'color_scheme':
yield format_completion_item(value="auto", annotation="dark-/light switching")
hidden = get_setting('settings.exclude_color_scheme_patterns') or []
for scheme_path in sublime.find_resources("*.sublime-color-scheme"):
if not any(hide in scheme_path for hide in hidden):
try:
root, package, *_, name = scheme_path.split("/")
except ValueError:
continue
if root == 'Cache':
continue
yield format_completion_item(value=name, default=default, annotation=package)
for scheme_path in sublime.find_resources("*.tmTheme"):
if not any(hide in scheme_path for hide in hidden):
try:
root, package, *_, name = scheme_path.split("/")
except ValueError:
continue
if root == 'Cache':
continue
yield format_completion_item(
value=scheme_path, default=default, label=name, annotation=package
)
@staticmethod
def _encoding_completions(default):
"""Create completions of all available encoding values.
default (string):
The default `encoding` value.
Returns:
{(trigger, contents), ...}
A set of all completions.
- trigger (string): the encoding in sublime format
- contents (string): the encoding in sublime format
"""
for enc in encodings.SUBLIME_TO_STANDARD.keys():
yield format_completion_item(value=enc, default=default, annotation="encoding")
@staticmethod
def _theme_completions(key, default):
"""Create completions of all visible themes.
default (string):
The default `theme` value.
The set will not include color schemes matching at least one entry of
`"settings.exclude_theme_patterns": []` setting.
Returns:
{(trigger, contents), ...}
A set of all completions.
- trigger (string): base file name of the theme
- contents (string): the file name to commit to the settings
"""
hidden = get_setting('settings.exclude_theme_patterns') or []
if int(sublime.version()) >= 4095 and key == 'theme':
yield format_completion_item(value="auto", annotation="dark-/light switching")
for theme_path in ResourcePath.glob_resources("*.sublime-theme"):
if not any(hide in theme_path.name for hide in hidden):
yield format_completion_item(
value=theme_path.name, default=default, annotation="theme"
)
| mit |
AndreyKedo/My_project_blog | node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| gpl-3.0 |
mcgonagle/ansible_f5 | library_old/bigip_gtm_facts.py | 4 | 16069 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_gtm_facts
short_description: Collect facts from F5 BIG-IP GTM devices.
description:
- Collect facts from F5 BIG-IP GTM devices.
version_added: "2.3"
options:
include:
description:
- Fact category to collect
required: true
choices:
- pool
- wide_ip
- virtual_server
filter:
description:
- Perform regex filter of response. Filtering is done on the name of
the resource. Valid filters are anything that can be provided to
Python's C(re) module.
required: false
default: None
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Get pool facts
bigip_gtm_facts:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
include: "pool"
filter: "my_pool"
delegate_to: localhost
'''
RETURN = '''
wide_ip:
description:
Contains the lb method for the wide ip and the pools
that are within the wide ip.
returned: changed
type: dict
sample:
wide_ip:
- enabled: "True"
failure_rcode: "noerror"
failure_rcode_response: "disabled"
failure_rcode_ttl: "0"
full_path: "/Common/foo.ok.com"
last_resort_pool: ""
minimal_response: "enabled"
name: "foo.ok.com"
partition: "Common"
persist_cidr_ipv4: "32"
persist_cidr_ipv6: "128"
persistence: "disabled"
pool_lb_mode: "round-robin"
pools:
- name: "d3qw"
order: "0"
partition: "Common"
ratio: "1"
ttl_persistence: "3600"
type: "naptr"
pool:
description: Contains the pool object status and enabled status.
returned: changed
type: dict
sample:
pool:
- alternate_mode: "round-robin"
dynamic_ratio: "disabled"
enabled: "True"
fallback_mode: "return-to-dns"
full_path: "/Common/d3qw"
load_balancing_mode: "round-robin"
manual_resume: "disabled"
max_answers_returned: "1"
members:
- disabled: "True"
flags: "a"
full_path: "ok3.com"
member_order: "0"
name: "ok3.com"
order: "10"
preference: "10"
ratio: "1"
service: "80"
name: "d3qw"
partition: "Common"
qos_hit_ratio: "5"
qos_hops: "0"
qos_kilobytes_second: "3"
qos_lcs: "30"
qos_packet_rate: "1"
qos_rtt: "50"
qos_topology: "0"
qos_vs_capacity: "0"
qos_vs_score: "0"
ttl: "30"
type: "naptr"
verify_member_availability: "disabled"
virtual_server:
description:
Contains the virtual server enabled and availability
status, and address
returned: changed
type: dict
sample:
virtual_server:
- addresses:
- device_name: "/Common/qweqwe"
name: "10.10.10.10"
translation: "none"
datacenter: "/Common/xfxgh"
enabled: "True"
expose_route_domains: "no"
full_path: "/Common/qweqwe"
iq_allow_path: "yes"
iq_allow_service_check: "yes"
iq_allow_snmp: "yes"
limit_cpu_usage: "0"
limit_cpu_usage_status: "disabled"
limit_max_bps: "0"
limit_max_bps_status: "disabled"
limit_max_connections: "0"
limit_max_connections_status: "disabled"
limit_max_pps: "0"
limit_max_pps_status: "disabled"
limit_mem_avail: "0"
limit_mem_avail_status: "disabled"
link_discovery: "disabled"
monitor: "/Common/bigip "
name: "qweqwe"
partition: "Common"
product: "single-bigip"
virtual_server_discovery: "disabled"
virtual_servers:
- destination: "10.10.10.10:0"
enabled: "True"
full_path: "jsdfhsd"
limit_max_bps: "0"
limit_max_bps_status: "disabled"
limit_max_connections: "0"
limit_max_connections_status: "disabled"
limit_max_pps: "0"
limit_max_pps_status: "disabled"
name: "jsdfhsd"
translation_address: "none"
translation_port: "0"
'''
try:
from distutils.version import LooseVersion
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
import re
class BigIpGtmFactsCommon(object):
def __init__(self):
self.api = None
self.attributes_to_remove = [
'kind', 'generation', 'selfLink', '_meta_data',
'membersReference', 'datacenterReference',
'virtualServersReference', 'nameReference'
]
self.gtm_types = dict(
a_s='a',
aaaas='aaaa',
cnames='cname',
mxs='mx',
naptrs='naptr',
srvs='srv'
)
self.request_params = dict(
params='expandSubcollections=true'
)
def is_version_less_than_12(self):
version = self.api.tmos_version
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
def format_string_facts(self, parameters):
result = dict()
for attribute in self.attributes_to_remove:
parameters.pop(attribute, None)
for key, val in parameters.iteritems():
result[key] = str(val)
return result
def filter_matches_name(self, name):
if not self.params['filter']:
return True
matches = re.match(self.params['filter'], str(name))
if matches:
return True
else:
return False
def get_facts_from_collection(self, collection, collection_type=None):
results = []
for item in collection:
if not self.filter_matches_name(item.name):
continue
facts = self.format_facts(item, collection_type)
results.append(facts)
return results
def connect_to_bigip(self, **kwargs):
return ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
class BigIpGtmFactsPools(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsPools, self).__init__()
self.params = kwargs
def get_facts(self):
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
def get_facts_from_device(self):
try:
if self.is_version_less_than_12():
return self.get_facts_without_types()
else:
return self.get_facts_with_types()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_with_types(self):
result = []
for key, type in self.gtm_types.iteritems():
facts = self.get_all_facts_by_type(key, type)
if facts:
result.append(facts)
return result
def get_facts_without_types(self):
pools = self.api.tm.gtm.pools.get_collection(**self.request_params)
return self.get_facts_from_collection(pools)
def get_all_facts_by_type(self, key, type):
collection = getattr(self.api.tm.gtm.pools, key)
pools = collection.get_collection(**self.request_params)
return self.get_facts_from_collection(pools, type)
def format_facts(self, pool, collection_type):
result = dict()
pool_dict = pool.to_dict()
result.update(self.format_string_facts(pool_dict))
result.update(self.format_member_facts(pool))
if collection_type:
result['type'] = collection_type
return camel_dict_to_snake_dict(result)
def format_member_facts(self, pool):
result = []
if not 'items' in pool.membersReference:
return dict(members=[])
for member in pool.membersReference['items']:
member_facts = self.format_string_facts(member)
result.append(member_facts)
return dict(members=result)
class BigIpGtmFactsWideIps(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsWideIps, self).__init__()
self.params = kwargs
def get_facts(self):
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
def get_facts_from_device(self):
try:
if self.is_version_less_than_12():
return self.get_facts_without_types()
else:
return self.get_facts_with_types()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_with_types(self):
result = []
for key, type in self.gtm_types.iteritems():
facts = self.get_all_facts_by_type(key, type)
if facts:
result.append(facts)
return result
def get_facts_without_types(self):
wideips = self.api.tm.gtm.wideips.get_collection(
**self.request_params
)
return self.get_facts_from_collection(wideips)
def get_all_facts_by_type(self, key, type):
collection = getattr(self.api.tm.gtm.wideips, key)
wideips = collection.get_collection(**self.request_params)
return self.get_facts_from_collection(wideips, type)
def format_facts(self, wideip, collection_type):
result = dict()
wideip_dict = wideip.to_dict()
result.update(self.format_string_facts(wideip_dict))
result.update(self.format_pool_facts(wideip))
if collection_type:
result['type'] = collection_type
return camel_dict_to_snake_dict(result)
def format_pool_facts(self, wideip):
result = []
if not hasattr(wideip, 'pools'):
return dict(pools=[])
for pool in wideip.pools:
pool_facts = self.format_string_facts(pool)
result.append(pool_facts)
return dict(pools=result)
class BigIpGtmFactsVirtualServers(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsVirtualServers, self).__init__()
self.params = kwargs
def get_facts(self):
try:
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_from_device(self):
servers = self.api.tm.gtm.servers.get_collection(
**self.request_params
)
return self.get_facts_from_collection(servers)
def format_facts(self, server, collection_type=None):
result = dict()
server_dict = server.to_dict()
result.update(self.format_string_facts(server_dict))
result.update(self.format_address_facts(server))
result.update(self.format_virtual_server_facts(server))
return camel_dict_to_snake_dict(result)
def format_address_facts(self, server):
result = []
if not hasattr(server, 'addresses'):
return dict(addresses=[])
for address in server.addresses:
address_facts = self.format_string_facts(address)
result.append(address_facts)
return dict(addresses=result)
def format_virtual_server_facts(self, server):
result = []
if not 'items' in server.virtualServersReference:
return dict(virtual_servers=[])
for server in server.virtualServersReference['items']:
server_facts = self.format_string_facts(server)
result.append(server_facts)
return dict(virtual_servers=result)
class BigIpGtmFactsManager(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
self.api = None
def get_facts(self):
result = dict()
facts = dict()
if 'pool' in self.params['include']:
facts['pool'] = self.get_pool_facts()
if 'wide_ip' in self.params['include']:
facts['wide_ip'] = self.get_wide_ip_facts()
if 'virtual_server' in self.params['include']:
facts['virtual_server'] = self.get_virtual_server_facts()
result.update(**facts)
result.update(dict(changed=True))
return result
def get_pool_facts(self):
pools = BigIpGtmFactsPools(**self.params)
return pools.get_facts()
def get_wide_ip_facts(self):
wide_ips = BigIpGtmFactsWideIps(**self.params)
return wide_ips.get_facts()
def get_virtual_server_facts(self):
wide_ips = BigIpGtmFactsVirtualServers(**self.params)
return wide_ips.get_facts()
class BigIpGtmFactsModuleConfig(object):
def __init__(self):
self.argument_spec = dict()
self.meta_args = dict()
self.supports_check_mode = False
self.valid_includes = ['pool', 'wide_ip', 'virtual_server']
self.initialize_meta_args()
self.initialize_argument_spec()
def initialize_meta_args(self):
args = dict(
include=dict(type='list', required=True),
filter=dict(type='str', required=False)
)
self.meta_args = args
def initialize_argument_spec(self):
self.argument_spec = f5_argument_spec()
self.argument_spec.update(self.meta_args)
def create(self):
return AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=self.supports_check_mode
)
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
config = BigIpGtmFactsModuleConfig()
module = config.create()
try:
obj = BigIpGtmFactsManager(
check_mode=module.check_mode, **module.params
)
result = obj.get_facts()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| apache-2.0 |
ntt-sic/keystone | keystone/openstack/common/db/sqlalchemy/migration.py | 6 | 10075 | # coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
import distutils.version as dist_version
import os
import re
import migrate
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate.versioning import util as migrate_util
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from keystone.openstack.common.db import exception
from keystone.openstack.common.db.sqlalchemy import session as db_session
from keystone.openstack.common.gettextutils import _ # noqa
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
_REPOSITORY = None
get_engine = db_session.get_engine
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version(abs_path, init_version):
"""Show the current version of the repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(abs_path, init_version)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.DbMigrationError(
message=_("Upgrade DB using Essex release first."))
def db_version_control(abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
global _REPOSITORY
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
if _REPOSITORY is None:
_REPOSITORY = Repository(abs_path)
return _REPOSITORY
| apache-2.0 |
ghickman/django | django/core/management/commands/shell.py | 6 | 4104 | import os
import warnings
from django.core.management.base import BaseCommand
from django.utils.deprecation import RemovedInDjango20Warning
class Command(BaseCommand):
help = "Runs a Python interactive interpreter. Tries to use IPython or bpython, if one of them is available."
requires_system_checks = False
shells = ['ipython', 'bpython', 'python']
def add_arguments(self, parser):
parser.add_argument('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython or bpython. '
'Deprecated, use the `-i python` or `--interface python` option instead.')
parser.add_argument('--no-startup', action='store_true', dest='no_startup',
help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.')
parser.add_argument('-i', '--interface', choices=self.shells, dest='interface',
help='Specify an interactive interpreter interface. Available options: "ipython", "bpython", and "python"')
def _ipython_pre_011(self):
"""Start IPython pre-0.11"""
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
def _ipython_pre_100(self):
"""Start IPython pre-1.0.0"""
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
def _ipython(self):
"""Start IPython >= 1.0"""
from IPython import start_ipython
start_ipython(argv=[])
def ipython(self, options):
"""Start any version of IPython"""
for ip in (self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except ImportError:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
def bpython(self, options):
import bpython
bpython.embed()
def python(self, options):
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not options['no_startup']:
for pythonrc in (os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec(compile(handle.read(), pythonrc, 'exec'), imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
def handle(self, **options):
if options['plain']:
warnings.warn(
"The --plain option is deprecated in favor of the -i python or --interface python option.",
RemovedInDjango20Warning
)
options['interface'] = 'python'
available_shells = [options['interface']] if options['interface'] else self.shells
for shell in available_shells:
try:
return getattr(self, shell)(options)
except ImportError:
pass
raise ImportError("Couldn't load any of the specified interfaces.")
| bsd-3-clause |
cbertinato/pandas | pandas/io/excel/_openpyxl.py | 1 | 14098 | from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import _validate_freeze_panes
class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super().__init__(path, mode=mode, **engine_kwargs)
if self.mode == 'a': # Load from existing workbook
from openpyxl import load_workbook
book = load_workbook(self.path)
self.book = book
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
try:
self.book.remove(self.book.worksheets[0])
except AttributeError:
# compat - for openpyxl <= 2.4
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict : style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,
column=freeze_panes[1] + 1)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
| bsd-3-clause |
saurabh6790/medsynaptic-lib | core/doctype/property_setter/property_setter.py | 34 | 2382 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def autoname(self):
self.doc.name = self.doc.doc_type + "-" \
+ (self.doc.field_name and (self.doc.field_name + "-") or "") \
+ self.doc.property
def validate(self):
"""delete other property setters on this, if this is new"""
if self.doc.fields['__islocal']:
webnotes.conn.sql("""delete from `tabProperty Setter` where
doctype_or_field = %(doctype_or_field)s
and doc_type = %(doc_type)s
and ifnull(field_name,'') = ifnull(%(field_name)s, '')
and property = %(property)s""", self.doc.fields)
# clear cache
webnotes.clear_cache(doctype = self.doc.doc_type)
def get_property_list(self, dt):
return webnotes.conn.sql("""select fieldname, label, fieldtype
from tabDocField
where parent=%s
and fieldtype not in ('Section Break', 'Column Break', 'HTML', 'Read Only', 'Table')
and ifnull(fieldname, '') != ''
order by label asc""", dt, as_dict=1)
def get_setup_data(self):
return {
'doctypes': [d[0] for d in webnotes.conn.sql("select name from tabDocType")],
'dt_properties': self.get_property_list('DocType'),
'df_properties': self.get_property_list('DocField')
}
def get_field_ids(self):
return webnotes.conn.sql("select name, fieldtype, label, fieldname from tabDocField where parent=%s", self.doc.doc_type, as_dict = 1)
def get_defaults(self):
if not self.doc.field_name:
return webnotes.conn.sql("select * from `tabDocType` where name=%s", self.doc.doc_type, as_dict = 1)[0]
else:
return webnotes.conn.sql("select * from `tabDocField` where fieldname=%s and parent=%s",
(self.doc.field_name, self.doc.doc_type), as_dict = 1)[0]
def on_update(self):
from core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.doc.doc_type)
def make_property_setter(doctype, fieldname, property, value, property_type, for_doctype = False):
return webnotes.bean({
"doctype":"Property Setter",
"doctype_or_field": for_doctype and "DocType" or "DocField",
"doc_type": doctype,
"field_name": fieldname,
"property": property,
"value": value,
"property_type": property_type
}).insert()
| mit |
PanDAWMS/panda-mon-qa | pandamonqa/qasuite/BSXPath.py | 3 | 80212 | # -*- coding: utf-8 -*-
"""
BSXPath.py: XPathEvaluator Extension for BeautifulSoup
"""
__version__ = '0.01e' # based on JavaScript-XPath 0.1.11 (c) 2007 Cybozu Labs, Inc. (http://coderepos.org/share/wiki/JavaScript-XPath)
__date__ = '2009-04-12'
__license__ = 'MIT-style license'
__author__ = 'furyu' # http://furyu.tea-nifty.com/annex/
# http://d.hatena.ne.jp/furyu-tei/
"""
Usage:
from BSXPath import BSXPathEvaluator,XPathResult
#*** PREPARATION (create object)
document = BSXPathEvaluator(<html>) # BSXPathEvaluator is sub-class of BeautifulSoup
# html: HTML (text string)
#*** BASIC OPERATIONS
result = document.evaluate(<expression>,<node>,None,<type>,None)
# expression: XPath expression
# node : base context-node(document is document-root)
# type : XPathResult.<name>
# name : ANY_TYPE, NUMBER_TYPE, STRING_TYPE, BOOLEAN_TYPE, UNORDERED_NODE_ITERATOR_TYPE, ORDERED_NODE_ITERATOR_TYPE
# UNORDERED_NODE_SNAPSHOT_TYPE, ORDERED_NODE_SNAPSHOT_TYPE, ANY_UNORDERED_NODE_TYPE, FIRST_ORDERED_NODE_TYPE
# (*) 3rd(resolver) and 5th(result) arguments are not implemented
length = result.snapshotLength
node = result.snapshotItem(<number>)
#*** USEFUL WRAPPER-FUNCTIONS
nodes = document.getItemList(<expression>[,<node>])
first = document.getFirstItem(<expression>[,<node>])
# expression: XPath expression
# node(optional): base context-node(default: document(document-root))
Examples:
from BSXPath import BSXPathEvaluator,XPathResult
html = '<html><head><title>Hello, DOM 3 XPath!</title></head><body><h1>Hello, DOM 3 XPath!</h1><p>This is XPathEvaluator Extension for BeautifulSoup.</p><p>This is based on JavaScript-XPath!</p></body>'
document = BSXPathEvaluator(html)
result = document.evaluate('//h1/text()[1]',document,None,XPathResult.STRING_TYPE,None)
print result.stringValue
# Hello, DOM 3 XPath!
result = document.evaluate('//h1',document,None,XPathResult.ORDERED_NODE_SNAPSHOT_TYPE,None)
print result.snapshotLength
# 1
print result.snapshotItem(0)
# <h1>Hello, DOM 3 XPath!</h1>
nodes = document.getItemList('//p')
print len(nodes)
# 2
print nodes
# [<p>This is XPathEvaluator Extension for BeautifulSoup.</p>, <p>This is based on JavaScript-XPath!</p>]
first = document.getFirstItem('//p')
print first
# <p>This is XPathEvaluator Extension for BeautifulSoup.</p>
Notice:
- This is based on JavaScript-XPath (c) 2007 Cybozu Labs, Inc. (http://coderepos.org/share/wiki/JavaScript-XPath)
- Required:
- Python 2.5+
- BeautifulSoup 3.0.7+(recommended) or 3.1.0+
"""
import re,types,math,datetime
#import logging
from BeautifulSoup import *
try:
if DEFAULT_OUTPUT_ENCODING:
pass
except:
DEFAULT_OUTPUT_ENCODING='utf-8'
#***** Optional Parameters
USE_NODE_CACHE=True
USE_NODE_INDEX=True
#***** General Functions
def throwError(str):
raise ValueError, str
def typeof(obj):
if isinstance(obj,bool):
return 'boolean'
if isinstance(obj,int) or isinstance(obj,float):
return 'number'
if isinstance(obj,basestring):
return 'string'
if isinstance(obj,types.FunctionType):
return 'function'
return 'object'
def isNaN(obj):
if isinstance(obj,int) or isinstance(obj,float):
return False
if not isinstance(obj,basestring):
return True
if obj.isdigit():
return False
try:
float(obj)
return False
except:
return True
def toNumber(obj):
if isinstance(obj,int) or isinstance(obj,float):
return obj
if isinstance(obj,basestring):
if obj.isdigit():
return int(obj)
try:
return float(obj)
except:
return obj
return obj
def toBoolean(obj):
return bool(obj)
def toString(obj):
if isinstance(obj,bool):
#return u'true' if obj else u'false'
if obj:
return u'true'
else:
return u'false'
if isinstance(obj,str) or isinstance(obj,int) or isinstance(obj,float):
return unicode(obj)
return obj
#***** General Classes
class ExtDict(dict):
def __getattr__(self,name):
try:
attr=super(ExtDict,self).__getattr__(name)
except:
if not self.has_key(name):
raise AttributeError,name
attr=self.get(name)
return attr
#***** Common Definitions
indent_space=' '
#{ // Regular Expressions
re_has_ualpha=re.compile(r'(?![0-9])[\w]')
re_seqspace=re.compile(r'\s+')
re_firstspace=re.compile(r'^\s')
re_lastspace=re.compile(r'\s$')
#} // end of Regular Expressions
#{ // NodeTypeDOM
NodeTypeDOM=ExtDict({
'ANY_NODE' :0
, 'ELEMENT_NODE' :1
, 'ATTRIBUTE_NODE' :2
, 'TEXT_NODE' :3
, 'CDATA_SECTION_NODE' :4
, 'ENTITY_REFERENCE_NODE' :5
, 'ENTITY_NODE' :6
, 'PROCESSING_INSTRUCTION_NODE':7
, 'COMMENT_NODE' :8
, 'DOCUMENT_NODE' :9
, 'DOCUMENT_TYPE_NODE' :10
, 'DOCUMENT_FRAGMENT_NODE' :11
, 'NOTATION_NODE' :12
})
NodeTypeBS=ExtDict({
'BSXPathEvaluator' :NodeTypeDOM.DOCUMENT_NODE
, 'NavigableString' :NodeTypeDOM.TEXT_NODE
, 'CData' :NodeTypeDOM.CDATA_SECTION_NODE
, 'ProcessingInstruction':NodeTypeDOM.PROCESSING_INSTRUCTION_NODE
, 'Comment' :NodeTypeDOM.COMMENT_NODE
, 'Declaration' :NodeTypeDOM.ANY_NODE
, 'Tag' :NodeTypeDOM.ELEMENT_NODE
})
#} // end of NodeTypeDOM
#{ // NodeUtil
def makeNodeUtils():
re_type_document_type=re.compile(r'^DOCTYPE\s')
re_type_entity =re.compile(r'^ENTITY\s')
re_type_notation =re.compile(r'^NOTATION\s')
#re_processing_instruction=re.compile(r'^(.*?)\s+(.*?)\?*$')
re_processing_instruction=re.compile(r'^(.*?)(\s+.*?)\?*$')
re_declaration_name=re.compile(r'^([^\s]+)\s+([\%]?)\s*([^\s]+)\s')
def makeNU_BS():
def _nodeType(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.nodeType
nodeType=NodeTypeBS.get(node.__class__.__name__)
if nodeType==NodeTypeDOM.ANY_NODE:
str=NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING)
if re_type_document_type.search(str):
nodeType=NodeTypeDOM.DOCUMENT_TYPE_NODE
elif re_type_entity.search(str):
nodeType=NodeTypeDOM.ENTITY_NODE
elif re_type_notation.search(str):
nodeType=NodeTypeDOM.NOTATION_NODE
return nodeType
def _nodeName(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.nodeName.lower()
nodeType=_nodeType(node)
if nodeType==NodeTypeDOM.DOCUMENT_NODE:
return '#document'
elif nodeType==NodeTypeDOM.TEXT_NODE:
return '#text'
elif nodeType==NodeTypeDOM.CDATA_SECTION_NODE:
return '#cdata-section'
elif nodeType==NodeTypeDOM.PROCESSING_INSTRUCTION_NODE:
mrslt=re_processing_instruction.search(NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING))
if mrslt:
return mrslt.group(1)
else:
return NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING)
elif nodeType==NodeTypeDOM.COMMENT_NODE:
return '#comment'
elif nodeType==NodeTypeDOM.DOCUMENT_TYPE_NODE or nodeType==NodeTypeDOM.ENTITY_NODE or nodeType==NodeTypeDOM.NOTATION_NODE:
mrslt=re_declaration_name.search(NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING))
if mrslt:
return mrslt.group(2)
else:
return NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING)
else:
return node.name.lower()
def _nodeValue(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.nodeValue
nodeType=_nodeType(node)
if nodeType==NodeTypeDOM.CDATA_SECTION_NODE or \
nodeType==NodeTypeDOM.COMMENT_NODE or \
nodeType==NodeTypeDOM.TEXT_NODE:
return NavigableString.encode(node, DEFAULT_OUTPUT_ENCODING)
if nodeType==NodeTypeDOM.PROCESSING_INSTRUCTION_NODE:
mrslt=re_processing_instruction.search(NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING))
if mrslt:
return mrslt.group(2)
else:
return None
return None
def _nodeAttrValue(node,attrName):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return None
nodeType=_nodeType(node)
if nodeType!=NodeTypeDOM.ELEMENT_NODE:
return None
return node.get(attrName)
def _parentNode(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.parentNode
return node.parent
def _ownerDocument(node):
owner=getattr(node,'_owner',None)
if owner:
return owner
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
owner=node.parentNode
else:
owner=node
while True:
parent=owner.parent
if not parent:
break
owner=parent
try:
node._owner=owner
except:
pass
return owner
def pairwise(iterable):
itnext = iter(iterable).next
while True:
yield itnext(), itnext()
def _attributes(node):
if _nodeType(node)==NodeTypeDOM.ELEMENT_NODE:
#return node._getAttrMap()
if not getattr(node,'attrMap'):
node.attrMap=dict(pairwise(node.attrs))
return node.attrMap
else:
return None
def _contains(node,cnode):
if _nodeType(node)==NodeTypeDOM.ATTRIBUTE_NODE: node=node.parentNode
if _nodeType(cnode)==NodeTypeDOM.ATTRIBUTE_NODE: cnode=cnode.parentNode
return node in cnode.findParents()
def _preceding(node,cnode):
if _nodeType(node)==NodeTypeDOM.ATTRIBUTE_NODE: node=node.parentNode
if _nodeType(cnode)==NodeTypeDOM.ATTRIBUTE_NODE: cnode=cnode.parentNode
#return cnode in node.findAllPrevious()
return cnode in node.findPreviousSiblings()
def _following(node,cnode):
if _nodeType(node)==NodeTypeDOM.ATTRIBUTE_NODE: node=node.parentNode
if _nodeType(cnode)==NodeTypeDOM.ATTRIBUTE_NODE: cnode=cnode.parentNode
#return cnode in node.findAllNext()
return cnode in node.findNextSiblings()
def d_getattr(self,name):
raise AttributeError,name
#{ // ExtPageElement
class ExtPageElement:
def __getattr__(self,name):
if name=='nodeType': return _nodeType(self)
if name=='nodeName': return _nodeName(self)
if name=='nodeValue': return _nodeValue(self)
if name=='parentNode': return _parentNode(self)
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return _attributes(self)
if name=='get': return self.get
if name=='contains': return self.contains
if name=='preceding': return self.preceding
if name=='following': return self.following
d_getattr(self,name)
def get(self,key,default=None):
return _nodeAttrValue(self,key)
def contains(self,cnode):
return _contains(self,cnode)
def preceding(self,cnode):
return _preceding(self,cnode)
def following(self,cnode):
return _following(self,cnode)
PageElement.__bases__+=(ExtPageElement,)
BeautifulSoup.__bases__+=(ExtPageElement,)
NavigableString.__bases__+=(ExtPageElement,)
CData.__bases__+=(ExtPageElement,)
ProcessingInstruction.__bases__+=(ExtPageElement,)
Comment.__bases__+=(ExtPageElement,)
Declaration.__bases__+=(ExtPageElement,)
Tag.__bases__+=(ExtPageElement,)
#} // ExtPageElement
#{ // _extBeautifulSoup
def _extBeautifulSoup():
o_getattr=getattr(BeautifulSoup,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.DOCUMENT_NODE
if name=='nodeName': return '#document'
if name=='nodeValue': return None
if name=='parentNode': return None
if name=='ownerDocument': return None
if name=='attributes': return None
return o_getattr(self,name)
BeautifulSoup.__getattr__=e_getattr
_extBeautifulSoup()
#} // _extBeautifulSoup
#{ // _extNavigableString
def _extNavigableString():
o_getattr=getattr(NavigableString,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.TEXT_NODE
if name=='nodeName': return '#text'
if name=='nodeValue': return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
NavigableString.__getattr__=e_getattr
_extNavigableString()
#} // _extNavigableString
#{ // _extCData
def _extCData():
o_getattr=getattr(CData,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.CDATA_SECTION_NODE
if name=='nodeName': return '#cdata-section'
if name=='nodeValue': return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
CData.__getattr__=e_getattr
_extCData()
#} // _extCData
#{ // _extProcessingInstruction
def _extProcessingInstruction():
o_getattr=getattr(ProcessingInstruction,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.PROCESSING_INSTRUCTION_NODE
if name=='nodeName':
mrslt=re_processing_instruction.search(NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING))
#return mrslt.group(1) if mrslt else NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if mrslt:
return mrslt.group(1)
else:
return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='nodeValue':
mrslt=re_processing_instruction.search(NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING))
#return mrslt.group(2) if mrslt else None
if mrslt:
return mrslt.group(2)
else:
return None
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
ProcessingInstruction.__getattr__=e_getattr
_extProcessingInstruction()
#} // _extProcessingInstruction
#{ // _extComment
def _extComment():
o_getattr=getattr(Comment,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.COMMENT_NODE
if name=='nodeName': return '#comment'
if name=='nodeValue': return NavigableString.encode(self, DEFAULT_OUTPUT_ENCODING)
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
Comment.__getattr__=e_getattr
_extComment()
#} // _extComment
#{ // _extDeclaration
def _extDeclaration():
o_getattr=getattr(Declaration,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType':
str=NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if re_type_document_type.search(str):
return NodeTypeDOM.DOCUMENT_TYPE_NODE
elif re_type_entity.search(str):
return NodeTypeDOM.ENTITY_NODE
elif re_type_notation.search(str):
return NodeTypeDOM.NOTATION_NODE
else:
return NodeTypeDOM.ANY_NODE
if name=='nodeName':
mrslt=re_declaration_name.search(NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING))
#return mrslt.group(2) if mrslt else NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if mrslt:
return mrslt.group(2)
else:
return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='nodeValue': return None
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
Declaration.__getattr__=e_getattr
_extDeclaration()
#} // _extDeclaration
#{ // _extTag
def _extTag():
o_getattr=getattr(Tag,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.ELEMENT_NODE
if name=='nodeName': return self.name.lower()
if name=='nodeValue': return None
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return self._getAttrMap()
return o_getattr(self,name)
Tag.__getattr__=e_getattr
_extTag()
#} // _extTag
def _it_deepNodes(node):
child_next=iter(getattr(node,'contents',[])).next
while True:
child=child_next()
yield child
for gchild in _it_deepNodes(child):
yield gchild
return ExtDict({
'nodeType' :_nodeType
, 'nodeName' :_nodeName
, 'nodeValue' :_nodeValue
, 'nodeAttrValue':_nodeAttrValue
, 'parentNode' :_parentNode
, 'ownerDocument':_ownerDocument
, 'attributes' :_attributes
, 'contains' :_contains
, 'preceding' :_preceding
, 'following' :_following
, 'it_deepNodes' :_it_deepNodes
})
return
def makeNU():
def _to(valueType,node):
if typeof(node)=='string':
result=node
else:
nodeType=node.nodeType
if nodeType==NodeTypeDOM.ATTRIBUTE_NODE:
result=node.nodeValue
else:
strings=[]
for _node in NodeUtilBS.it_deepNodes(node):
if _node.nodeType==NodeTypeDOM.TEXT_NODE:
strings.append(unicode(_node))
result=''.join(strings)
if valueType=='number':
return toNumber(result)
elif valueType=='boolean':
return toBoolean(result)
else:
return result
def _attrMatch(node,attrName,attrValue):
if not attrName or \
not attrValue and node.get(attrName) or \
(attrValue and node.get(attrName)==attrValue):
return True
else:
return False
def _getDescendantNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
if prevNodeset:
prevNodeset.delDescendant(node,prevIndex)
if USE_NODE_CACHE:
_cachemap=getattr(node,'_cachemap',None)
if not _cachemap:
_cachemap=node._cachemap=ExtDict({'attrib':ExtDict({}),'all':None,'tag':ExtDict({})})
if attrValue and attrName:
_cm=_cachemap.attrib
_anmap=_cm.get(attrName)
if not _anmap:
_anmap=_cm[attrName]=ExtDict({})
nodes=_anmap.get(attrValue)
if not nodes:
nodes=_anmap[attrValue]=[]
if getattr(node,'findAll',None):
nodes.extend(node.findAll(attrs={attrName:attrValue}))
for elm in nodes:
if test.match(elm):
nodeset.push(elm)
elif getattr(test,'notOnlyElement',None):
nodes=_cachemap.all
if not nodes:
nodes=_cachemap.all=[]
for elm in NodeUtilBS.it_deepNodes(node):
nodes.append(elm)
for elm in nodes:
if NodeUtil.attrMatch(elm,attrName,attrValue) and test.match(elm):
nodeset.push(elm)
else:
nodeType=node.nodeType
if nodeType==NodeTypeDOM.ELEMENT_NODE or nodeType==NodeTypeDOM.DOCUMENT_NODE:
_cm=_cachemap.tag
name=getattr(test,'name',None)
if not name or name=='*':
nodes=_cm.get('*')
if not nodes:
nodes=_cm['*']=node.findAll()
else:
nodes=_cm.get(name)
if not nodes:
nodes=_cm[name]=node.findAll([name])
for elm in nodes:
if NodeUtil.attrMatch(elm,attrName,attrValue):
nodeset.push(elm)
else: # USE_NODE_CACHE is False
if attrValue and attrName:
if getattr(node,'findAll',None):
for elm in node.findAll(attrs={attrName:attrValue}):
if test.match(elm):
nodeset.push(elm)
elif getattr(test,'notOnlyElement',None):
for elm in NodeUtilBS.it_deepNodes(node):
if NodeUtil.attrMatch(elm,attrName,attrValue) and test.match(elm):
nodeset.push(elm)
else:
nodeType=node.nodeType
if nodeType==NodeTypeDOM.ELEMENT_NODE or nodeType==NodeTypeDOM.DOCUMENT_NODE:
name=getattr(test,'name',None)
if not name or name=='*':
nodes=node.findAll()
else:
nodes=node.findAll([name])
for elm in nodes:
if NodeUtil.attrMatch(elm,attrName,attrValue):
nodeset.push(elm)
return nodeset
def _getChildNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
contents=getattr(node,'contents',[])
for elm in contents:
if NodeUtil.attrMatch(elm,attrName,attrValue) and test.match(elm):
nodeset.push(elm)
return nodeset
return ExtDict({
'to' :_to
, 'attrMatch' :_attrMatch
, 'getDescendantNodes':_getDescendantNodes
, 'getChildNodes' :_getChildNodes
})
return (makeNU_BS(),makeNU())
(NodeUtilBS,NodeUtil)=makeNodeUtils()
#} // end of NodeUtil
#***** Application Classes
#{ // Lexer
class Lexer(object):
def __init__(self,source):
tokens=self.tokens=[]
def anlz_token(mrslt):
token=mrslt.group()
if not self.re_strip.search(token):
tokens.append(token)
return token
self.re_token.sub(anlz_token,source,count=0)
self.index=0
def peek(self,i=0):
#token=self.tokens[self.index+i] if self.index+i<len(self.tokens) else None
if self.index+i<len(self.tokens):
token=self.tokens[self.index+i]
else:
token=None
return token
def next(self):
#token=self.tokens[self.index] else None
if self.index<len(self.tokens):
token=self.tokens[self.index]
else:
token=None
self.index+=1
return token
def back(self):
self.index-=1
#token=self.tokens[self.index] if self.index<len(self.tokens) else None
if self.index<len(self.tokens):
token=self.tokens[self.index]
else:
token=None
def empty(self):
return (len(self.tokens)<=self.index)
re_token=re.compile(r'\$?(?:(?![0-9-])[\w-]+:)?(?![0-9-])[\w-]+|\/\/|\.\.|::|\d+(?:\.\d*)?|\.\d+|"[^"]*"|\'[^\']*\'|[!<>]=|(?![0-9-])[\w-]+:\*|\s+|.')
re_strip=re.compile(r'^\s')
#} // end of Lexer
#{ // Ctx
class Ctx(object):
def __init__(self,node,position=1,last=1):
self.node=node
self.position=position
self.last=last
#} // end of Ctx
#{ // AttributeWrapper
class AttributeWrapper(object):
def __init__(self,name,value,parent):
self.nodeType=NodeTypeDOM.ATTRIBUTE_NODE
self.nodeName=name
self.nodeValue=value
self.parentNode=parent
self.ownerElement=parent
def get(self,key,default=None):
return None
def contains(self,cnode):
return NodeUtilBS.contains(self,cnode)
def preceding(self,cnode):
return NodeUtilBS.preceding(self,cnode)
def following(self,cnode):
return NodeUtilBS.following(self,cnode)
def __str__(self,encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.nodeValue.encode(encoding)
else:
return self.nodeValue
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
@classmethod
def getAttributeWrapper(cls,name,value,parent):
_mapattr=getattr(parent,'_mapattr',None)
if not _mapattr:
_mapattr=parent._mapattr=ExtDict({})
if _mapattr.get(name):
return _mapattr[name]
_mapattr[name]=cls(name,value,parent)
return _mapattr[name]
#} // end of AttributeWrapper
#{ // BaseExpr
class BaseExpr(object):
def __init__(self):
pass
def number(self,ctx):
exrs=self.evaluate(ctx)
if getattr(exrs,'isNodeSet',None):
result=exrs.number()
else:
result=toNumber(exrs)
return result
def string(self,ctx):
exrs=self.evaluate(ctx)
if getattr(exrs,'isNodeSet',None):
result=exrs.string()
else:
result=toString(exrs)
return result
def bool(self,ctx):
exrs=self.evaluate(ctx)
if getattr(exrs,'isNodeSet',None):
result=exrs.bool()
else:
result=toBoolean(exrs)
return result
#} // end of BaseExpr
#{ // BaseExprHasPredicates
class BaseExprHasPredicates(BaseExpr):
def __init__(self):
pass
def evaluatePredicates(self,nodeset,start=0):
reverse=getattr(self,'reverse',False)
predicates=getattr(self,'predicates',[])
nodeset.sort()
l0=len(predicates)
for i in range(start,l0):
predicate=predicates[i]
deleteIndexes=[]
nodes=nodeset.list()
l1=len(nodes)
for j in range(0,l1):
#position=(l1-j) if reverse else (j+1)
if reverse:
position=(l1-j)
else:
position=(j+1)
exrs=predicate.evaluate(Ctx(nodes[j],position,l1))
if typeof(exrs)=='number':
exrs=(position==exrs)
elif typeof(exrs)=='string':
#exrs=False if exrs=='' else True
if exrs=='' :
exrs=False
else:
exrs=True
elif typeof(exrs)=='object':
exrs=exrs.bool()
if not exrs:
deleteIndexes.append(j)
r=range(0,len(deleteIndexes))
r.sort(reverse=True)
for j in r:
nodeset._del(deleteIndexes[j])
return nodeset
@classmethod
def parsePredicates(cls,lexer,expr):
while lexer.peek()=='[':
lexer.next()
if lexer.empty():
throwError(u'missing predicate expr')
predicate=BinaryExpr.parse(lexer)
expr.predicate(predicate)
if lexer.empty():
throwError(u'unclosed predicate expr')
if lexer.next() != ']':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
#} // end of BaseExprHasPredicates
#{ // BinaryExpr
class BinaryExpr(BaseExpr):
def __init__(self,op,left,right):
self.op=op
self.left=left
self.right=right
self.dataType=BinaryExpr.ops[op][2]
(lneedContextPosition,rneedContextPosition)=(getattr(left,'needContextPosition',None),getattr(right,'needContextPosition',None))
(lneedContextNode,rneedContextNode)=(getattr(left,'needContextNode',None),getattr(right,'needContextNode',None))
self.needContextPosition=lneedContextPosition or rneedContextPosition
self.needContextNode=lneedContextNode or rneedContextNode
if op=='=':
(ldatatype,rdatatype)=(getattr(left,'datatype',None),getattr(right,'datatype',None))
(lqattr,rqattr)=(getattr(left,'quickAttr',None),getattr(right,'quickAttr',None))
if not rneedContextNode and not rneedContextPosition and rdatatype!='nodeset' and rdatatype!='void' and lqattr:
self.quickAttr=True
self.attrName=left.attrName
self.attrValueExpr=right
elif not lneedContextNode and not lneedContextPosition and ldatatype!='nodeset' and ldatatype!='void' and rqattr:
self.quickAttr=True
self.attrName=right.attrName
self.attrValueExpr=left
def evaluate(self,ctx):
result=BinaryExpr.ops[self.op][1](self.left,self.right,ctx)
return result
def show(self,indent=''):
t=''
t+=indent+'binary: '+self.op+'\n'
indent+=indent_space
t+=self.left.show(indent)
t+=self.right.show(indent)
return t
# --- Local Functions
@staticmethod
def _compare(op,comp,left,right,ctx):
left=left.evaluate(ctx)
right=right.evaluate(ctx)
if getattr(left,'isNodeSet',None) and getattr(right,'isNodeSet',None):
lnodes=left.list()
rnodes=right.list()
for lnode in lnodes:
for rnode in rnodes:
if comp(NodeUtil.to('string',lnode),NodeUtil.to('string',rnode)):
return True
return False
if getattr(left,'isNodeSet',None) or getattr(right,'isNodeSet',None):
if getattr(left,'isNodeSet',None):
(nodeset,primitive)=(left,right)
else:
(nodeset,primitive)=(right,left)
nodes=nodeset.list()
type=typeof(primitive)
for node in nodes:
if comp(NodeUtil.to(type,node),primitive):
return True
return False
if op=='=' or op=='!=':
if typeof(left)=='boolean' or typeof(right)=='boolean':
return comp(toBoolean(left),toBoolean(right))
if typeof(left)=='number' or typeof(right)=='number':
return comp(toNumber(left),toNumber(right))
return comp(left,right)
return comp(toNumber(left),toNumber(right))
def _div(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
if r==0:
sign=int(getattr(left,'op','+')+'1')*int(getattr(right,'op','+')+'1')
if l==0: return 'NaN'
elif sign<0: return '-Infinity'
else: return 'Infinity'
n=float(l) / float(r)
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _mod(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
if r==0:
if l==0: return 'NaN'
else: return 0
return l % r
def _mul(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
n=l * r
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _add(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
n=l + r
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _sub(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
n=l - r
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _lt(left,right,ctx):
return BinaryExpr._compare('<',(lambda a,b:a<b),left,right,ctx)
def _gt(left,right,ctx):
return BinaryExpr._compare('>',(lambda a,b:a>b),left,right,ctx)
def _le(left,right,ctx):
return BinaryExpr._compare('<=',(lambda a,b:a<=b),left,right,ctx)
def _ge(left,right,ctx):
return BinaryExpr._compare('>=',(lambda a,b:a>=b),left,right,ctx)
def _eq(left,right,ctx):
return BinaryExpr._compare('=',(lambda a,b:a==b),left,right,ctx)
def _ne(left,right,ctx):
return BinaryExpr._compare('!=',(lambda a,b:a!=b),left,right,ctx)
def _and(left,right,ctx):
return left.bool(ctx) & right.bool(ctx)
def _or(left,right,ctx):
return left.bool(ctx) | right.bool(ctx)
ops=ExtDict({
'div':[6,_div,'number' ]
, 'mod':[6,_mod,'number' ]
, '*' :[6,_mul,'number' ]
, '+' :[5,_add,'number' ]
, '-' :[5,_sub,'number' ]
, '<' :[4,_lt ,'boolean']
, '>' :[4,_gt ,'boolean']
, '<=' :[4,_le ,'boolean']
, '>=' :[4,_ge ,'boolean']
, '=' :[3,_eq ,'boolean']
, '!=' :[3,_ne ,'boolean']
, 'and':[2,_and,'boolean']
, 'or' :[1,_or ,'boolean']
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
stack=[]
index=lexer.index
while True:
if lexer.empty():
throwError(u'missing right expression')
expr=UnaryExpr.parse(lexer)
op=lexer.next()
if not op:
break
info=ops.get(op)
precedence=info and info[0]
if not precedence:
lexer.back()
break
while 0<len(stack) and precedence<=ops[stack[len(stack)-1]][0]:
expr=BinaryExpr(stack.pop(),stack.pop(),expr)
stack.extend([expr,op])
while 0<len(stack):
expr=BinaryExpr(stack.pop(),stack.pop(),expr)
return expr
#} // end of BinaryExpr
#{ // UnaryExpr
class UnaryExpr(BaseExpr):
def __init__(self,op,expr):
self.op=op
self.expr=expr
self.needContextPosition=getattr(expr,'needContextPosition',None)
self.needContextNode=getattr(expr,'needContextNode',None)
self.datatype='number'
def evaluate(self,ctx):
result=-self.expr.number(ctx)
return result
def show(self,indent=''):
t=''
t+=indent+'unary: '+self.op+'\n'
indent+=indent_space
t+=self.expr.show(indent)
return t
ops=ExtDict({
'-':1
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
if ops.get(lexer.peek()):
return cls(lexer.next(),cls.parse(lexer))
else:
return UnionExpr.parse(lexer)
#} // end of UnaryExpr
#{ // UnionExpr
class UnionExpr(BaseExpr):
def __init__(self):
self.paths=[]
self.datatype='nodeset'
def evaluate(self,ctx):
paths=self.paths
nodeset=NodeSet()
for path in paths:
exrs=path.evaluate(ctx)
if not getattr(exrs,'isNodeSet',None):
throwError(u'PathExpr must be nodeset')
nodeset.merge(exrs)
return nodeset
def path(self,path):
self.paths.append(path)
if getattr(path,'needContextPosition',None):
self.needContextPosition=True
if getattr(path,'needContextNode',None):
self.needContextNode=True
def show(self,indent=''):
t=''
t+=indent+'union: '+'\n'
indent+=indent_space
for path in self.paths:
t+=path.show(indent)
return t
ops=ExtDict({
'|':1
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
expr=PathExpr.parse(lexer)
if not ops.get(lexer.peek()):
return expr
union=UnionExpr()
union.path(expr)
while True:
if not ops.get(lexer.next()):
break
if lexer.empty():
throwError(u'missing next union location path')
union.path(PathExpr.parse(lexer))
lexer.back()
return union
#} // end of UnionExpr
#{ // PathExpr
class PathExpr(BaseExpr):
def __init__(self,filter):
self.filter=filter
self.steps=[]
self.datatype=filter.datatype
self.needContextPosition=filter.needContextPosition
self.needContextNode=filter.needContextNode
def evaluate(self,ctx):
nodeset=self.filter.evaluate(ctx)
if not getattr(nodeset,'isNodeSet',None):
throwException('Filter nodeset must be nodeset type')
for _step in self.steps:
if nodeset.length<=0:
break
step=_step[1] # _step=[op,step]
reverse=step.reverse
iter=nodeset.iterator(reverse)
prevNodeset=nodeset
nodeset=None
needContextPosition=getattr(step,'needContextPosition',None)
axis=step.axis
if not needContextPosition and axis=='following':
node=iter()
while True:
next=iter()
if not next:
break
if not node.contains(next):
break
node=next
nodeset=step.evaluate(Ctx(node))
elif not needContextPosition and axis=='preceding':
node=iter()
nodeset=step.evaluate(Ctx(node))
else:
node=iter()
j=0
nodeset=step.evaluate(Ctx(node),False,prevNodeset,j)
while True:
node=iter()
if not node:
break
j+=1
nodeset.merge(step.evaluate(Ctx(node),False,prevNodeset,j))
return nodeset
def step(self,op,step):
step.op=op
self.steps.append([op,step])
self.quickAttr=False
if len(self.steps)==1:
if op=='/' and step.axis=='attribute':
test=step.test
if not getattr(test,'notOnlyElement',None) and test.name!='*':
self.quickAttr=True
self.attrName=test.name
def show(self,indent=''):
t=''
t+=indent+'path: '+'\n'
indent+=indent_space
t+=indent+'filter:'+'\n'
t+=self.filter.show(indent+indent_space)
if 0<len(self.steps):
t+=indent+'steps:'+'\n'
indent+=indent_space
for _step in self.steps:
t+=indent+'operator: '+step[0]+'\n'
t+=_step[1].show(indent) # _step=[op,step]
return t
ops=ExtDict({
'//':1
, '/': 1
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
if ops.get(lexer.peek()):
op=lexer.next()
token=lexer.peek()
if op=='/' and lexer.empty() or (token!='.' and token!='..' and token!='@' and token!='*' and not re_has_ualpha.search(token)):
return FilterExpr.root()
path=PathExpr(FilterExpr.root()) # RootExpr
if lexer.empty():
throwError(u'missing next location step')
expr=Step.parse(lexer)
path.step(op,expr)
else:
expr=FilterExpr.parse(lexer)
if not expr:
expr=Step.parse(lexer)
path=PathExpr(FilterExpr.context())
path.step('/',expr)
elif not ops.get(lexer.peek()):
return expr
else:
path=PathExpr(expr)
while True:
if not ops.get(lexer.peek()):
break
op=lexer.next()
if lexer.empty():
throwError(u'missing next location step')
path.step(op,Step.parse(lexer))
return path
#} // end of PathExpr
#{ // FilterExpr
class FilterExpr(BaseExprHasPredicates):
def __init__(self,primary):
self.primary=primary
self.predicates=[]
self.datatype=primary.datatype
self.needContextPosition=primary.needContextPosition
self.needContextNode=primary.needContextNode
def evaluate(self,ctx):
nodeset=self.primary.evaluate(ctx)
if not getattr(nodeset,'isNodeSet',None):
if 0<len(self.predicates):
throwError(u'Primary result must be nodeset type if filter have predicate expression')
return nodeset
return self.evaluatePredicates(nodeset)
def predicate(self,predicate):
self.predicates.append(predicate)
def show(self,indent=''):
t=''
t+=indent+'filter: '+'\n'
indent+=indent_space
t+=self.primary.show(indent+indent_space)
if 0<len(self.predicates):
t+=indent+'predicates:'+'\n'
indent+=indent_space
for predicate in self.predicates:
t+=predicate.show(indent)
return t
@classmethod
def root(cls):
return FunctionCall('root-node')
@classmethod
def context(cls):
return FunctionCall('context-node')
@classmethod
def parse(cls,lexer):
token=lexer.peek()
ch=token[0:1]
if ch=='$':
expr=VariableReference.parse(lexer)
elif ch=='(':
lexer.next()
expr=BinaryExpr.parse(lexer)
if lexer.empty():
throwError(u'unclosed "("')
if lexer.next()!=')':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
elif ch=='"' or ch=="'":
expr=Literal.parse(lexer)
else:
if not isNaN(token):
expr=Number.parse(lexer)
elif NodeType.types.get(token):
return None
elif re_has_ualpha.search(ch) and lexer.peek(1)=='(':
expr=FunctionCall.parse(lexer)
else:
return None
if lexer.peek()!='[':
return expr
filter=FilterExpr(expr)
BaseExprHasPredicates.parsePredicates(lexer,filter)
return filter
#} // end of FilterExpr
#{ // Step
class Step(BaseExprHasPredicates):
def __init__(self,axis,test):
self.axis=axis
self.reverse=self.axises[axis][0]
self.func=self.axises[axis][1]
self.test=test
self.predicates=[]
self._quickAttr=self.axises[axis][2]
self.quickAttr=False
self.needContextPosition=False
def evaluate(self,ctx,special=False,prevNodeset=None,prevIndex=None):
node=ctx.node
reverse=False
if not special and getattr(self,'op',None)=='//':
if not self.needContextPosition and self.axis=='child':
if getattr(self,'quickAttr',None):
attrValueExpr=getattr(self,'attrValueExpr',None)
#attrValue=attrValueExpr.string(ctx) if attrValueExpr else None
if attrValueExpr:
attrValue=attrValueExpr.string(ctx)
else:
attrValue=None
nodeset=NodeUtil.getDescendantNodes(self.test,node,NodeSet(),self.attrName,attrValue,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset,1)
else:
nodeset=NodeUtil.getDescendantNodes(self.test,node,NodeSet(),None,None,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset)
else:
step=Step('descendant-or-self',NodeType('node'))
nodes=step.evaluate(ctx,False,prevNodeset,prevIndex).list()
nodeset=None
step.op='/'
for _node in nodes:
if not nodeset:
nodeset=self.evaluate(Ctx(_node),True,None,None)
else:
nodeset.merge(self.evaluate(Ctx(_node),True,None,None))
nodeset=nodeset or NodeSet()
else:
if getattr(self,'needContextPosition',None):
prevNodeset=None
prevIndex=None
if getattr(self,'quickAttr',None):
attrValueExpr=getattr(self,'attrValueExpr',None)
#attrValue=attrValueExpr.string(ctx) if attrValueExpr else None
if attrValueExpr:
attrValue=attrValueExpr.string(ctx)
else:
attrValue=None
nodeset=self.func(self.test,node,NodeSet(),self.attrName,attrValue,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset,1)
else:
nodeset=self.func(self.test,node,NodeSet(),None,None,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset)
if prevNodeset:
prevNodeset.doDel()
return nodeset
def predicate(self,predicate):
self.predicates.append(predicate)
datatype=getattr(predicate,'datatype',None)
if getattr(predicate,'needContextPosition',None) or datatype=='number' or datatype=='void':
self.needContextPosition=True
if getattr(self,'_quickAttr',None) and len(self.predicates)==1 and getattr(predicate,'quickAttr',None):
attrName=predicate.attrName
self.attrName=attrName
self.attrValueExpr=getattr(predicate,'attrValueExpr',None)
self.quickAttr=True
def show(self,indent=''):
t=''
t+=indent+'step: '+'\n'
indent+=indent_space
if self.axis:
t+=indent+'axis: '+self.axis+'\n'
t+=self.test.show(indent)
if 0<len(self.predicates):
t+=indent+'predicates:'+'\n'
indent+=indent_space
for predicate in self.predicates:
t+=predicate.show(indent)
return t
# --- Local Functions
def _ancestor(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
node=node.parentNode
if not node:
break
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex,True)
if test.match(node):
nodeset.unshift(node)
return nodeset
def _ancestorOrSelf(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex,True)
if test.match(node):
nodeset.unshift(node)
node=node.parentNode
if not node:
break
return nodeset
def _attribute(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
attrs=node.attributes
if attrs:
if getattr(test,'notOnlyElement',None) and test.type==NodeTypeDOM.ANY_NODE or test.name=='*':
for name in attrs.keys():
#nodeset.push(AttributeWrapper(name,attrs[name],node))
nodeset.push(AttributeWrapper.getAttributeWrapper(name,attrs[name],node))
else:
attr=attrs.get(test.name)
if attr!=None:
#nodeset.push(AttributeWrapper(test.name,attr,node))
nodeset.push(AttributeWrapper.getAttributeWrapper(test.name,attr,node))
return nodeset
def _child(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
return NodeUtil.getChildNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex)
def _descendant(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
return NodeUtil.getDescendantNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex)
def _descendantOrSelf(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
if NodeUtil.attrMatch(node,attrName,attrValue) and test.match(node):
nodeset.push(node)
return NodeUtil.getDescendantNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex)
def _following(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
child=node
while True:
child=child.nextSibling
if not child:
break
if NodeUtil.attrMatch(child,attrName,attrValue) and test.match(child):
nodeset.push(child)
nodeset=NodeUtil.getDescendantNodes(test,child,nodeset,attrName,attrValue,None,None)
node=node.parentNode
if not node:
break
return nodeset
def _followingSibling(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
node=node.nextSibling
if not node:
break
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex)
if test.match(node):
nodeset.push(node)
return nodeset;
def _namespace(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
# not implemented
return nodeset
def _parent(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
nodeType=node.nodeType
if nodeType==NodeTypeDOM.DOCUMENT_NODE:
return nodeset
if nodeType==NodeTypeDOM.ATTRIBUTE_NODE:
nodeset.push(node.ownerElement)
return nodeset
node=node.parentNode
if test.match(node):
nodeset.push(node)
return nodeset
def _preceding(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
parents=[]
while True:
parents.insert(0,node)
node=node.parentNode
if not node:
break
for node in parents[1:]:
siblings=[]
while True:
node=node.previousSibling
if not node:
break
siblings.insert(0,node)
for node in siblings:
if NodeUtil.attrMatch(node,attrName,attrValue) and test.match(node):
nodeset.push(node)
nodeset=NodeUtil.getDescendantNodes(test,node,nodeset,attrName,attrValue,None,None)
return nodeset
def _precedingSibling(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
node=node.previousSibling
if not node:
break
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex,True)
if test.match(node):
nodeset.unshift(node)
return nodeset
def _self(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
if test.match(node):
nodeset.push(node)
return nodeset
axises=ExtDict({
'ancestor' :[True ,_ancestor ,False]
, 'ancestor-or-self' :[True ,_ancestorOrSelf ,False]
, 'attribute' :[False,_attribute ,False]
, 'child' :[False,_child,True ,False]
, 'descendant' :[False,_descendant ,True ]
, 'descendant-or-self':[False,_descendantOrSelf,True ]
, 'following' :[False,_following ,True ]
, 'following-sibling' :[False,_followingSibling,False]
, 'namespace' :[False,_namespace ,False]
, 'parent' :[False,_parent ,False]
, 'preceding' :[True ,_preceding ,True ]
, 'preceding-sibling' :[True ,_precedingSibling,False]
, 'self' :[False,_self ,False]
})
@classmethod
def _cself(cls):
return cls('self',NodeType('node'))
@classmethod
def parent(cls):
return cls('parent',NodeType('node'))
@classmethod
def parse(cls,lexer):
(parent,_cself,axises)=(cls.parent,cls._cself,cls.axises)
if lexer.peek()=='.':
step=_cself()
lexer.next()
elif lexer.peek()=='..':
step=parent()
lexer.next()
else:
if lexer.peek()=='@':
axis='attribute'
lexer.next()
if lexer.empty():
throwError(u'missing attribute name')
else:
if lexer.peek(1)=='::':
ch=lexer.peek()[0:1]
if not re_has_ualpha.search(ch):
throwError(u'bad token: %s' % (lexer.next()))
axis=lexer.next()
lexer.next()
if not axises.get(axis):
throwError(u'invalid axis: %s' % (axis))
if lexer.empty():
throwError(u'missing node name')
else:
axis='child'
token=lexer.peek()
ch=token[0:1]
if not re_has_ualpha.search(ch):
if token=='*':
test=NameTest.parse(lexer)
else:
throwError(u'bad token: %s' % (lexer.next()))
else:
if lexer.peek(1)=='(':
if not NodeType.types.get(token):
throwError(u'invalid node type: %s' % (token))
test=NodeType.parse(lexer)
else:
test=NameTest.parse(lexer)
step=Step(axis,test)
BaseExprHasPredicates.parsePredicates(lexer,step)
return step
#} // end of Step
#{ // NodeType
class NodeType(BaseExpr):
def __init__(self,name,literal=None):
self.name=name
self.literal=literal
self.type=NodeType.typeNums.get(name,NodeType.typeNums.node)
self.notOnlyElement=True
def match(self,node):
return self.type==NodeTypeDOM.ANY_NODE or self.type==node.nodeType
def show(self,indent=''):
t=''
t+=indent+'nodetype: '+toString(self.type)+'\n'
if self.literal:
indent+=indent_space
t+=self.literal.show(indent)
return t
types=ExtDict({
'comment' :1
, 'text' :1
, 'processing-instruction':1
, 'node' :1
})
typeNums=ExtDict({
'comment' :NodeTypeDOM.COMMENT_NODE
, 'text' :NodeTypeDOM.TEXT_NODE
, 'processing-instruction':NodeTypeDOM.PROCESSING_INSTRUCTION_NODE
, 'node' :NodeTypeDOM.ANY_NODE
})
@classmethod
def parse(cls,lexer):
type=lexer.next()
lexer.next()
if lexer.empty():
throwError(u'bad nodetype')
ch=lexer.peek()[0:1]
literal=None
if ch=='"' or ch=="'":
literal=Literal.parse(lexer)
if lexer.empty():
throwError(u'bad nodetype')
if lexer.next()!=')':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
return cls(type,literal)
#} // end of NodeType
#{ // NameTest
class NameTest(BaseExpr):
def __init__(self,name):
self.name=name.lower()
def match(self,node):
type=node.nodeType
if type==NodeTypeDOM.ELEMENT_NODE or type==NodeTypeDOM.ATTRIBUTE_NODE:
if self.name=='*' or self.name==node.nodeName:
return True
return False
def show(self,indent=''):
t=''
t+=indent+'nametest: '+self.name+'\n'
return t
@classmethod
def parse(cls,lexer):
if lexer.peek()!= '*' and lexer.peek(1)==':' and lexer.peek(2)=='*':
return cls(lexer.next()+lexer.next()+lexer.next())
return cls(lexer.next())
#} // end of NameTest
#{ // VariableReference
class VariableReference(BaseExpr):
def __init__(self,name):
self.name=name[1:]
self.datatype='void'
def show(self,indent=''):
t=''
t+=indent+'variable: '+self.name+'\n'
return t
@classmethod
def parse(cls,lexer):
token=lexer.next()
if len(token)<2:
throwError(u'unnamed variable reference')
return cls(token)
#} // end of VariableReference
#{ // Literal
class Literal(BaseExpr):
def __init__(self,text):
self.text=text[1:-1]
self.datatype='string'
def evaluate(self,ctx):
result=self.text
return result
def show(self,indent=''):
t=''
t+=indent+'literal: '+self.text+'\n'
return t
@classmethod
def parse(cls,lexer):
token=lexer.next()
if len(token)<2:
throwError(u'unclosed literal string')
return cls(token)
#} // end of Literal
#{ // Number
class Number(BaseExpr):
def __init__(self,digit):
self.digit=toNumber(digit)
self.datatype='number'
def evaluate(self,ctx):
result=self.digit
return result
def show(self,indent=''):
t=''
t+=indent+'number: '+toString(self.digit)+'\n'
return t
@classmethod
def parse(cls,lexer):
return cls(lexer.next())
#} // end of Number
#{ // FunctionCall
class FunctionCall(BaseExpr):
def __init__(self,name):
info=self.funcs.get(name)
if not info:
throwError(u'%s is not a function' % (name))
self.name=name
self.func=info[0]
self.args=[]
self.datatype=info[1]
#self.needContextPosition=True if info[2] else False
if info[2]:
self.needContextPosition=True
else:
self.needContextPosition=False
self.needContextNodeInfo=info[3]
#self.needContextNode=self.needContextNodeInfo[0] if 0<len(self.needContextNodeInfo) else False
if 0<len(self.needContextNodeInfo):
self.needContextNode=self.needContextNodeInfo[0]
else:
self.needContextNode=False
def evaluate(self,ctx):
result=self.func(ctx,*self.args)
return result
def arg(self,arg):
self.args.append(arg)
if getattr(arg,'needContextPosition',None):
self.needContextPosition=True
args=self.args
if getattr(arg,'needContextNode',None):
#args.needContextNode=True
self.needContextNode=True
#self.needContextNode=args.needContextNode or self.needContextNodeInfo[len(args)]
if not getattr(self,'needContextNode',None) and len(args)<len(self.needContextNodeInfo):
self.needContextNode=self.needContextNodeInfo[len(args)]
def show(self,indent=''):
t=''
t+=indent+'function: '+self.name+'\n'
indent+=indent_space
if 0<len(self.args):
t+=indent+'arguments: '+'\n'
indent+=indent_space
for arg in self.args:
t+=arg.show(indent)
return t
# --- Local Functions
def _contextNode(self,*arguments):
if len(arguments)!=0:
throwError(u'Function context-node expects ()')
nodeset=NodeSet()
nodeset.push(self.node)
return nodeset
def _rootNode(self,*arguments):
if len(arguments)!=0:
throwError(u'Function root-node expects ()')
nodeset=NodeSet()
ctxn=self.node
if ctxn.nodeType==NodeTypeDOM.DOCUMENT_NODE:
nodeset.push(ctxn)
else:
nodeset.push(ctxn.ownerDocument)
return nodeset
def _last(self,*arguments):
if len(arguments)!=0:
throwError(u'Function last expects ()')
return self.last
def _position(self,*arguments):
if len(arguments)!=0:
throwError(u'Function position expects ()')
return self.position
def _count(self,*arguments):
if len(arguments)!=1:
throwError(u'Function count expects (nodeset)')
nodeset=arguments[0].evaluate(self)
if not nodeset.isNodeSet:
throwError(u'Function count expects (nodeset)')
return nodeset.length
def _id(self,*arguments):
if len(arguments)!=1:
throwError(u'Function id expects (object)')
s=arguments[0]
ctxn=self.node
if ctxn.nodeType==NodeTypeDOM.DOCUMENT_NODE:
doc=ctxn
else:
doc=ctxn.ownerDocument
s=s.string(self)
ids=re_seqspace.split(s)
nodeset=NodeSet()
for id in ids:
for elm in doc.findAll(id=id):
nodeset.push(elm)
nodeset.isSorted=False
return nodeset
def _localName(self,*arguments):
alen=len(arguments)
if alen<0 or 1<alen:
throwError(u'Function local-name expects (nodeset?)')
if alen==0:
node=self.node
else:
nodeset=arguments[0]
nodeset=nodeset.evaluate(self)
if getattr(nodeset,'isNodeSet',None):
node=nodeset.first()
return ''+node.nodeName
def _name(self,*arguments):
# not implemented
return FunctionCall.funcs['local-name'][0](self,*arguments)
def _namespaceUri(self,*arguments):
# not implemented
return ''
def _string(self,*arguments):
alen=len(arguments)
if alen==0:
s=NodeUtil.to('string',self.node)
elif alen==1:
s=arguments[0]
s=s.string(self)
else:
throwError(u'Function string expects (object?)')
return s
def _concat(self,*arguments):
if len(arguments)<2:
throwError('Function concat expects (string, string[, ...])')
t=''
for argument in arguments:
t+=argument.string(self)
return t
def _startsWith(self,*arguments):
if len(arguments)!=2:
throwError('Function starts-with expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1 and s1.index(s2)==0:
# return True
#else:
# return False
if s1.find(s2)==0:
return True
else:
return False
def _contains(self,*arguments):
if len(arguments)!=2:
throwError('Function contains expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1:
# return True
#else:
# return False
n=s1.find(s2)
if n<0:
return False
else:
return True
def _substring(self,*arguments):
alen=len(arguments)
if alen<2 or 3<alen:
throwError(u'Function substring expects (string, string)')
(s,n1)=(arguments[0],arguments[1])
s=s.string(self)
n1=n1.number(self)
if alen==2:
n2=len(s)-n1+1
elif alen==3:
n2=arguments[2]
n2=n2.number(self)
if n1=='NaN' or n2=='NaN' or n1=='-Infinity' or n2=='-Infinity' or n1=='Infinity':
return u''
# n1,n2:origin=1 a1,a2:origin=0
n1=int(round(n1))
a1=n1-1
if a1<0: a1=0
if n2=='Infinity':
return s[a1:]
else:
n2=int(round(n2))
a2=n1+n2-1
return s[a1:a2]
def _substringBefore(self,*arguments):
if len(arguments)!=2:
throwError('Function substring-before expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1:
# n=s1.index(s2)
#else:
# return ''
n=s1.find(s2)
if n<0:
return ''
return s1[:n]
def _substringAfter(self,*arguments):
if len(arguments)!=2:
throwError('Function substring-after expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1:
# n=s1.index(s2)
#else:
# return ''
n=s1.find(s2)
if n<0:
return ''
return s1[n+len(s2):]
def _substringLength(self,*arguments):
alen=len(arguments)
if alen==0:
s=NodeUtil.to('string',self.node)
elif alen==1:
s=arguments[0]
s=s.string(self)
else:
throwError(u'Function string-length expects (string?)')
return len(s)
def _normalizeSpace(self,*arguments):
alen=len(arguments)
if alen==0:
s=NodeUtil.to('string',self.node)
elif alen==1:
s=arguments[0]
s=s.string(self)
else:
throwError(u'Function normalize-space expects (string?)')
return re_lastspace.sub('',re_firstspace.sub('',re_seqspace.sub(' ',s)))
def _translate(self,*arguments):
if len(arguments)!=3:
throwError('Function translate expects (string, string, string)')
(s1,s2,s3)=(arguments[0],arguments[1],arguments[2])
s1=s1.string(self)
s2=s2.string(self)
s3=s3.string(self)
_map={}
for i in range(0,len(s2)):
ch=s2[i]
if not _map.get(ch):
#_map[ch]=s3[i] if i<len(s3) else ''
if i<len(s3):
_map[ch]=s3[i]
else:
_map[ch]=''
t=''
for ch in s1:
replace=_map.get(ch)
#t+=replace if replace!=None else ch
if replace!=None:
t+=replace
else:
t=ch
return t
def _boolean(self,*arguments):
if len(arguments)!=1:
throwError(u'Function not expects (object)')
b=arguments[0]
b=b.bool(self)
return b
def _not(self,*arguments):
if len(arguments)!=1:
throwError(u'Function not expects (object)')
b=arguments[0]
b=b.bool(self)
return not b
def _true(self,*arguments):
if len(arguments)!=0:
throwError(u'Function false expects ()')
return True
def _false(self,*arguments):
if len(arguments)!=0:
throwError(u'Function false expects ()')
return False
def _lang(self,*arguments):
# not implemented
return False
def _number(self,*arguments):
alen=len(arguments)
if alen==0:
n=NodeUtil.to('number',self.node)
elif alen==1:
n=arguments[0]
n=n.number(self)
else:
throwError(u'Function number expects (object?)')
if isinstance(n,int):
return n
elif isinstance(n,float):
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
else:
return 'NaN'
def _sum(self,*arguments):
if len(arguments)!=1:
throwError(u'Function sum expects (nodeset)')
nodeset=arguments[0]
nodeset=nodeset.evaluate(self)
if not getattr(nodeset,'isNodeSet',None):
throwError(u'Function sum expects (nodeset)')
nodes=nodeset.list()
n=0
for node in nodes:
n+=NodeUtil.to('number',node)
return n
def _floor(self,*arguments):
if len(arguments)!=1:
throwError(u'Function floor expects (number)')
n=arguments[0]
n=n.number(self)
return int(math.floor(n))
def _ceiling(self,*arguments):
if len(arguments)!=1:
throwError(u'Function ceiling expects (number)')
n=arguments[0]
n=n.number(self)
return int(math.ceil(n))
def _round(self,*arguments):
if len(arguments)!=1:
throwError(u'Function round expects (number)')
n=arguments[0]
n=n.number(self)
return int(round(n))
funcs=ExtDict({
'context-node' :[_contextNode ,'nodeset',False,[True]]
, 'root-node' :[_rootNode ,'nodeset',False,[]]
, 'last' :[_last ,'number' ,True ,[]]
, 'position' :[_position ,'number' ,True ,[]]
, 'count' :[_count ,'number' ,False,[]]
, 'id' :[_id ,'nodeset',False,[]]
, 'local-name' :[_localName ,'string' ,False,[True ,False]]
, 'name' :[_name ,'string' ,False,[True ,False]]
, 'namespace-uri' :[_namespaceUri ,'string' ,False,[True ,False]]
, 'string' :[_string ,'string' ,False,[True ,False]]
, 'concat' :[_concat ,'string' ,False,[]]
, 'starts-with' :[_startsWith ,'boolean',False,[]]
, 'contains' :[_contains ,'boolean',False,[]]
, 'substring' :[_substring ,'string' ,False,[]]
, 'substring-before':[_substringBefore,'string' ,False,[]]
, 'substring-after' :[_substringAfter ,'string' ,False,[]]
, 'string-length' :[_substringLength,'number' ,False,[True ,False]]
, 'normalize-space' :[_normalizeSpace ,'string' ,False,[True ,False]]
, 'translate' :[_translate ,'string' ,False,[]]
, 'boolean' :[_boolean ,'boolean',False,[]]
, 'not' :[_not ,'boolean',False,[]]
, 'true' :[_true ,'boolean',False,[]]
, 'false' :[_false ,'boolean',False,[]]
, 'lang' :[_lang ,'boolean',False,[]]
, 'number' :[_number ,'number' ,False,[True ,False]]
, 'sum' :[_sum ,'number' ,False,[]]
, 'floor' :[_floor ,'number' ,False,[]]
, 'ceiling' :[_ceiling ,'number' ,False,[]]
, 'round' :[_round ,'number' ,False,[]]
})
@classmethod
def parse(cls,lexer):
func=cls(lexer.next())
lexer.next()
while lexer.peek()!=')':
if lexer.empty():
throwError(u'missing function argument list')
expr=BinaryExpr.parse(lexer)
func.arg(expr)
if lexer.peek()!=',':
break
lexer.next()
if lexer.empty():
throwError(u'unclosed function argument list')
if lexer.next()!=')':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
return func
#} // end of FunctionCall
#{ // NodeSet
class NodeSet(object):
def __init__(self):
self.length=0
self.nodes=[]
self.seen={}
self.idIndexMap=None
self.reserveDels=[]
self.isNodeSet=True
self.isSorted=True
self.sortOff=False
self.only=None
def merge(self,nodeset):
self.isSorted=False
if getattr(nodeset,'only',None):
return self.push(nodeset.only)
if getattr(self,'only',None):
only=self.only
self.only=None
self.push(only)
self.length-=1
map(self._add,nodeset.nodes)
def sort(self):
if getattr(self,'only',None):
return
if getattr(self,'sortOff',None):
return
if getattr(self,'isSorted',None):
return
self.isSorted=True
self.idIndexMap=None
nodes=self.nodes
def _comp(a,b):
if a.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: a=a.parentNode
if b.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: b=b.parentNode
if a==b:
return 0
(node1,node2)=(a,b)
(ancestor1,ancestor2)=(a,b)
(deep1,deep2)=(0,0)
while True:
ancestor1=ancestor1.parentNode
if not ancestor1:
break
deep1+=1
while True:
ancestor2=ancestor2.parentNode
if not ancestor2:
break
deep2+=1
if deep1>deep2:
while deep1!=deep2:
deep1-=1
node1=node1.parentNode
if node1==node2:
return 1
elif deep2>deep1:
while deep2!=deep1:
deep2-=1
node2=node2.parentNode
if node1==node2:
return -1
while True:
ancestor1=node1.parentNode
ancestor2=node2.parentNode
if ancestor1==ancestor2:
break
node1=ancestor1
node2=ancestor2
while True:
node1=node1.nextSibling
if not node1:
break
if node1==node2:
return -1
return 1
def index_comp(a,b):
if a.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: a=a.parentNode
if b.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: b=b.parentNode
return cmp(a._sortindex,b._sortindex)
if USE_NODE_INDEX:
nodes.sort(index_comp)
else:
nodes.sort(_comp)
def reserveDelByNodeID(self,id,offset,reverse):
_map=self.createIdIndexMap()
index=_map.get(id)
if index:
if reverse and index<(self.length-offset-1) or not reverse and offset<index:
self.reserveDels.append(index)
def reserveDelByNode(self,node,offset,reverse=False):
self.reserveDelByNodeID(self.NodeID.get(node),offset,reverse)
def doDel(self):
if len(self.reserveDels)<=0:
return
map(self._del,sorted(self.reserveDels,lambda x,y:cmp(y,x)))
self.reserveDels=[]
self.idIndexMap=None
def createIdIndexMap(self):
if getattr(self,'idIndexMap',None):
return self.idIndexMap
else:
_map=self.idIndexMap={}
nodes=self.nodes
for i in range(0,len(nodes)):
node=nodes[i]
id=self.NodeID.get(node)
_map[id]=i
return _map
def _del(self,index):
self.length-=1
if getattr(self,'only',None):
self.only=None
else:
node=self.nodes[index]
if getattr(self,'_first',None)==node:
self._first=None
self._firstSourceIndex=None
self._firstSubIndex=None
del(self.seen[self.NodeID.get(node)])
del(self.nodes[index])
def delDescendant(self,elm,offset):
if getattr(self,'only',None):
return
nodeType=elm.nodeType
if nodeType!=NodeTypeDOM.ELEMENT_NODE and nodeType!=NodeTypeDOM.DOCUMENT_NODE:
return
nodes=self.nodes
i=offset+1
while i<len(nodes):
if elm.contains(nodes[i]):
self._del(i)
i-=1
i+=1
def _add(self,node,reverse=False):
seen=self.seen
id=self.NodeID.get(node)
if seen.get(id):
return
seen[id]=True
self.length+=1
if reverse:
self.nodes.insert(0,node)
else:
self.nodes.append(node)
def unshift(self,node):
if self.length<=0:
self.length+=1
self.only=node
return
if getattr(self,'only',None):
only=self.only
self.only=None
self.unshift(only)
self.length-=1
return self._add(node,True)
def push(self,node):
if self.length<=0:
self.length+=1
self.only=node
return
if getattr(self,'only',None):
only=self.only
self.only=None
self.push(only)
self.length-=1
return self._add(node)
def first(self):
if getattr(self,'only',None):
return self.only
if 0<len(self.nodes):
self.sort()
return self.nodes[0]
else:
return None
def list(self):
if getattr(self,'only',None):
return [self.only]
self.sort()
return self.nodes
def string(self):
node=self.only or self.first()
#return NodeUtil.to('string',node) if node else ''
if node:
return NodeUtil.to('string',node)
else:
return ''
def bool(self):
return toBoolean(self.length or self.only)
def number(self):
return toNumber(self.string())
def iterator(self,reverse=False):
self.sort()
_info=ExtDict({
'nodeset':self
, 'count':0
})
if not reverse:
calcIndex=(lambda x,y:x)
else:
calcIndex=(lambda x,y:y.length-x-1)
def iter():
nodeset=_info.nodeset
index=calcIndex(_info.count,nodeset)
_info['count']+=1
if getattr(nodeset,'only',None) and index==0:
return nodeset.only
#return nodeset.nodes[index] if 0<=index and index<len(nodeset.nodes) else None
if 0<=index and index<len(nodeset.nodes):
return nodeset.nodes[index]
else:
return None
return iter
class nodeID(object):
def __init__(self):
self.uuid=1
def get(self,node):
id=getattr(node,'__bsxpath_id__',None)
if id:
return id
id=node.__bsxpath_id__=self.uuid
self.uuid+=1
return id
NodeID=nodeID()
#} // end of NodeSet
#{ // XPathEvaluator
class XPathResult(object):
ANY_TYPE =0
NUMBER_TYPE =1
STRING_TYPE =2
BOOLEAN_TYPE =3
UNORDERED_NODE_ITERATOR_TYPE=4
ORDERED_NODE_ITERATOR_TYPE =5
UNORDERED_NODE_SNAPSHOT_TYPE=6
ORDERED_NODE_SNAPSHOT_TYPE =7
ANY_UNORDERED_NODE_TYPE =8
FIRST_ORDERED_NODE_TYPE =9
def __init__(self,value,type):
if type==XPathResult.ANY_TYPE:
tov=typeof(value)
if tov=='object' : type=self.UNORDERED_NODE_ITERATOR_TYPE
if tov=='boolean': type=self.BOOLEAN_TYPE
if tov=='string' : type=self.STRING_TYPE
if tov=='number' : type=self.NUMBER_TYPE
if type<self.NUMBER_TYPE or self.FIRST_ORDERED_NODE_TYPE<type:
throwError(u'unknown type: %d' %(type))
self.resultType=type
if type==self.NUMBER_TYPE:
#self.numberValue=value.number() if getattr(value,'isNodeSet',None) else toNumber(value)
if getattr(value,'isNodeSet',None):
self.numberValue=value.number()
else:
self.numberValue=toNumber(value)
elif type==self.STRING_TYPE:
#self.stringValue=value.string() if getattr(value,'isNodeSet',None) else toString(value)
if getattr(value,'isNodeSet',None):
self.stringValue=value.string()
else:
self.stringValue=toString(value)
elif type==self.BOOLEAN_TYPE:
#self.booleanValue=value.bool() if getattr(value,'isNodeSet',None) else toBoolean(value)
if getattr(value,'isNodeSet',None):
self.booleanValue=value.bool()
else:
self.booleanValue=toBoolean(value)
elif type==self.ANY_UNORDERED_NODE_TYPE or type==self.FIRST_ORDERED_NODE_TYPE:
self.singleNodeValue=value.first()
else:
self.nodes=value.list()
self.snapshotLength=value.length
self.index=0
self.invalidIteratorState=False
def iterateNext(self):
node=self.nodes[self.index]
self.index+=1
return node
def snapshotItem(self,i):
return self.nodes[i]
class XPathExpression(object):
def __init__(self,expr,resolver):
if len(expr)<=0:
throwError(u'no expression')
lexer=self.lexer=Lexer(expr)
if lexer.empty():
throwError(u'no expression')
self.expr=BinaryExpr.parse(lexer)
if not lexer.empty():
throwError(u'bad token: %s' % (lexer.next()))
def evaluate(self,node,type):
return XPathResult(self.expr.evaluate(Ctx(node)),type)
class BSXPathEvaluator(BeautifulSoup):
def __init__(self, *args, **kwargs):
BeautifulSoup.__init__(self, *args, **kwargs)
self._string=u'[object HTMLDocument]'
self._fix_table()
self._init_index()
SELF_CLOSING_TAGS=buildTagMap(None,['br','hr','input','img','meta','spacer','frame','base'])
# exclude 'link' for XML
def _init_index(self):
idx=self._sortindex=1
self._cachemap=None
for node in NodeUtilBS.it_deepNodes(self):
idx=node._sortindex=idx+1
for node in self.findAll():
node.attrMap=dict(node.attrs)
def _fix_table(self):
tables=self.findAll('table')
for table in tables:
parent=table.parent
contents=getattr(table,'contents',[])
if len(contents)<=0: continue
(tbody,tr)=(None,None)
node=table.contents[0]
while node:
_next=node.nextSibling
name=getattr(node,'name',None)
if name in ('thead','tbody','tfoot',):
(tbody,tr)=(None,None)
elif name in ('tr',):
tr=None
if not tbody:
tbody=Tag(self,'tbody')
table.insert(table.contents.index(node),tbody)
tbody.append(node)
elif name in ('th','td',):
if not tbody:
tbody=Tag(self,'tbody')
table.insert(table.contents.index(node),tbody)
if not tr:
tr=Tag(self,'tr')
tbody.append(tr)
tr.append(node)
else:
parent.insert(parent.contents.index(table),node)
node=_next
def __str__(self,encoding=DEFAULT_OUTPUT_ENCODING):
return self._string
def __unicode__(self):
return self._string
def decode(self):
return self._string
def createExpression(self,expr,resolver):
return XPathExpression(expr,resolver)
def createNSResolver(self,nodeResolver):
# not implemented
pass
def evaluate(self,expr,context,resolver,type,result):
if not context:
context=self
if isinstance(context,list):
context=context[0]
return self.createExpression(expr,resolver).evaluate(context,type)
def getItemList(self,expr,context=None):
elms=[]
result=self.evaluate(expr,context,None,XPathResult.ORDERED_NODE_SNAPSHOT_TYPE,None)
for i in range(0,result.snapshotLength):
elms.append(result.snapshotItem(i))
return elms
def getFirstItem(self,expr,context=None):
elm=self.evaluate(expr,context,None,XPathResult.FIRST_ORDERED_NODE_TYPE,None).singleNodeValue
return elm
def applyXPath(self,context,expr):
start_t=datetime.datetime.now()
expression=self.createExpression(expr,None)
result=expression.evaluate(context,XPathResult.ANY_TYPE)
time=datetime.datetime.now()-start_t
resultType=result.resultType
if XPathResult.BOOLEAN_TYPE<resultType:
result=expression.evaluate(context,XPathResult.ORDERED_NODE_SNAPSHOT_TYPE)
array=[]
for i in range(0,result.snapshotLength):
array.append(result.snapshotItem(i))
resultItems=array
else:
if resultType==XPathResult.NUMBER_TYPE:
resultItems=result.numberValue
elif resultType==XPathResult.STRING_TYPE:
resultItems=result.stringValue
elif resultType==XPathResult.BOOLEAN_TYPE:
resultItems=result.booleanValue
else:
resultItems=None
return (resultItems,time,resultType)
#} // end of XPathEvaluator
if __name__ == '__main__':
import sys
import optparse
import pdb
options=None
def prn(obj):
def prn_sub(obj,indent):
indent+=u' '
if isinstance(obj,list):
for i in range(0,len(obj)):
print u'[%d]' % (i)
prn_sub(obj[i],indent)
elif isinstance(obj,dict):
for mem in obj:
print u'[%s]' % (mem)
prn_sub(obj[mem],indent)
elif getattr(obj,'nodeType',None) or isinstance(obj,basestring):
str=indent+re.sub(r'([\r?\n])',r'\1'+indent,unicode(obj))
print str
else:
print obj
prn_sub(obj,u'')
def test():
global options
if options.expr:
if options.html:
document=BSXPathEvaluator(options.html)
elif options.file:
fp=open(options.file)
document=BSXPathEvaluator(fp.read())
fp.close()
else:
document=BSXPathEvaluator(sys.stdin.read())
(result,time,resultType)=document.applyXPath(document,options.expr)
prn(result)
else:
optparser.print_help()
optparser=optparse.OptionParser()
optparser.add_option(
'-e','--expr'
, action='store'
, metavar='<expression>'
, help=u'expression: XPATH expression'
, dest='expr'
)
optparser.add_option(
'-t','--html'
, action='store'
, metavar='<text>'
, help=u'text: HTML text'
, dest='html'
)
optparser.add_option(
'-f','--file'
, action='store'
, metavar='<filename>'
, help=u'filename: HTML filename'
, dest='file'
)
optparser.add_option(
'-d','--debug'
, action='store_true'
, help=u'use pdb'
, dest='debug'
)
(options,args)=optparser.parse_args()
if options.debug:
pdb.run('test()')
else:
test()
#[History]
#
# 0.01e: 2009-04-12
# - exclude 'link' tag from SELF_CLOSING_TAGS (for XML)
# - add __str__() and __unicode__() to AttributeWrapper class
#
# 0.01d: 2009-03-28
# - performance improvement: node searching(make attrMap in advance)
#
# 0.01c: 2009-03-28
# - performance improvement: node sorting(indexing) and node search(caching)
#
# 0.01b: 2009-03-27
# - fixed 'singleNodeValue' bug
# result = document.evaluate('//title[1]',document,None,XPathResult.FIRST_ORDERED_NODE_TYPE,None).singleNodeValue
# # returnd 'None', even though first-value exists
#
# 0.01a: 2009-03-27
# - fixed string() bug
# BSXPath.py -e "boolean(//p[contains(string(),\"br\")])" -t "<html><head></head><body><p>text before<br />text after</p></body></html>"
# # returned 'True', even though 'False' is right
# - cope with <table> problems on malformed HTML
# may convert '<table><th></th><td></td></table>' to '<table><tbody><tr><th></th><td></td></tr></tbody></table>' automatically
#
# 0.01 : 2009-03-25
# first release
#
#■ End of BSXPath.py
| apache-2.0 |
RRZE-HPC/pycachesim | setup.py | 1 | 4792 | #!/usr/bin/env python
from __future__ import absolute_import
from setuptools.extension import Extension
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
import os
import io
import re
#import numpy
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Stolen from pip
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
# Stolen from pip
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='pycachesim',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=find_version('cachesim', '__init__.py'),
description='Python Cache Hierarchy Simulator',
long_description=long_description,
long_description_content_type='text/x-rst',
# The project's main homepage.
url='https://github.com/RRZE-HPC/pycachesim',
# Author details
author='Julian Hammer',
author_email='[email protected]',
# Choose your license
license='AGPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
# What does your project relate to?
keywords='hpc performance benchmark analysis',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
ext_modules=[
Extension(
'cachesim.backend',
sources=['cachesim/backend.c'],
extra_compile_args=['-std=c99'],
#include_dirs=[numpy.get_include()]
)
],
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'cachesim': ['*.h']
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'cachesim=cachesim.frontend:main',
],
},
)
| agpl-3.0 |
martinwicke/tensorflow | tensorflow/python/client/device_lib.py | 149 | 1308 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python import pywrap_tensorflow
def list_local_devices():
"""List the available devices available in the local process.
Returns:
A list of `DeviceAttribute` protocol buffers.
"""
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
return [_convert(s) for s in pywrap_tensorflow.list_devices()]
| apache-2.0 |
wxthon/googletest | scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| bsd-3-clause |
peterm-itr/edx-platform | lms/djangoapps/mobile_api/tests.py | 8 | 1648 | """
Tests for mobile API utilities
"""
import ddt
from rest_framework.test import APITestCase
from courseware.tests.factories import UserFactory
from student import auth
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from .utils import mobile_available_when_enrolled
ROLE_CASES = (
(auth.CourseBetaTesterRole, True),
(auth.CourseStaffRole, True),
(auth.CourseInstructorRole, True),
(None, False)
)
@ddt.ddt
class TestMobileApiUtils(ModuleStoreTestCase, APITestCase):
"""
Tests for mobile API utilities
"""
def setUp(self):
self.user = UserFactory.create()
@ddt.data(*ROLE_CASES)
@ddt.unpack
def test_mobile_role_access(self, role, should_have_access):
"""
Verifies that our mobile access function properly handles using roles to grant access
"""
course = CourseFactory.create(mobile_available=False)
if role:
role(course.id).add_users(self.user)
self.assertEqual(should_have_access, mobile_available_when_enrolled(course, self.user))
def test_mobile_explicit_access(self):
"""
Verifies that our mobile access function listens to the mobile_available flag as it should
"""
course = CourseFactory.create(mobile_available=True)
self.assertTrue(mobile_available_when_enrolled(course, self.user))
def test_missing_course(self):
"""
Verifies that we handle the case where a course doesn't exist
"""
self.assertFalse(mobile_available_when_enrolled(None, self.user))
| agpl-3.0 |
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/src/gdata/contentforshopping/client.py | 29 | 31884 | #!/usr/bin/python
#
# Copyright (C) 2010-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extend the gdata client for the Content API for Shopping."""
__author__ = 'afshar (Ali Afshar), dhermes (Daniel Hermes)'
import urllib
import atom.data
import gdata.client
from gdata.contentforshopping.data import ClientAccount
from gdata.contentforshopping.data import ClientAccountFeed
from gdata.contentforshopping.data import DatafeedEntry
from gdata.contentforshopping.data import DatafeedFeed
from gdata.contentforshopping.data import DataQualityEntry
from gdata.contentforshopping.data import DataQualityFeed
from gdata.contentforshopping.data import InventoryFeed
from gdata.contentforshopping.data import ProductEntry
from gdata.contentforshopping.data import ProductFeed
from gdata.contentforshopping.data import UsersEntry
from gdata.contentforshopping.data import UsersFeed
CFS_VERSION = 'v1'
CFS_HOST = 'content.googleapis.com'
CFS_URI = 'https://%s/content' % CFS_HOST
CFS_PROJECTION = 'schema'
class ContentForShoppingClient(gdata.client.GDClient):
"""Client for Content for Shopping API.
:param account_id: Merchant account ID. This value will be used by default
for all requests, but may be overridden on a
request-by-request basis.
:param api_version: The version of the API to target. Default value: 'v1'.
:param **kwargs: Pass all addtional keywords to the GDClient constructor.
"""
api_version = '1.0'
def __init__(self, account_id=None, api_version=CFS_VERSION,
cfs_uri=CFS_URI, **kwargs):
self.cfs_account_id = account_id
self.cfs_api_version = api_version
self.cfs_uri = cfs_uri
gdata.client.GDClient.__init__(self, **kwargs)
def _create_uri(self, account_id, resource, path=(), use_projection=True,
dry_run=False, warnings=False, max_results=None,
start_token=None, start_index=None,
performance_start=None, performance_end=None):
"""Create a request uri from the given arguments.
If arguments are None, use the default client attributes.
"""
account_id = account_id or self.cfs_account_id
if account_id is None:
raise ValueError('No Account ID set. '
'Either set for the client, or per request')
segments = [self.cfs_uri, self.cfs_api_version, account_id, resource]
if use_projection:
segments.append(CFS_PROJECTION)
segments.extend(urllib.quote(value) for value in path)
result = '/'.join(segments)
request_params = []
if dry_run:
request_params.append('dry-run')
if warnings:
request_params.append('warnings')
if max_results is not None:
request_params.append('max-results=%s' % max_results)
if start_token is not None:
request_params.append('start-token=%s' % start_token)
if start_index is not None:
request_params.append('start-index=%s' % start_index)
if performance_start is not None:
request_params.append('performance.start=%s' % performance_start)
if performance_end is not None:
request_params.append('performance.end=%s' % performance_end)
request_params = '&'.join(request_params)
if request_params:
result = '%s?%s' % (result, request_params)
return result
def _create_product_id(self, id, country, language, channel='online'):
return '%s:%s:%s:%s' % (channel, language, country, id)
def _create_batch_feed(self, entries, operation, feed=None,
feed_class=ProductFeed):
if feed is None:
feed = feed_class()
for entry in entries:
entry.batch_operation = gdata.data.BatchOperation(type=operation)
feed.entry.append(entry)
return feed
# Operations on a single product
def get_product(self, id, country, language, account_id=None,
auth_token=None):
"""Get a product by id, country and language.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
pid = self._create_product_id(id, country, language)
uri = self._create_uri(account_id, 'items/products', path=[pid])
return self.get_entry(uri, desired_class=ProductEntry,
auth_token=auth_token)
GetProduct = get_product
def insert_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Create a new product, by posting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products',
dry_run=dry_run, warnings=warnings)
return self.post(product, uri=uri, auth_token=auth_token)
InsertProduct = insert_product
def update_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update a product, by putting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False
by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.update(product, uri=uri, auth_token=auth_token)
UpdateProduct = update_product
def delete_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete a product
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
DeleteProduct = delete_product
# Operations on multiple products
def get_products(self, max_results=None, start_token=None, start_index=None,
performance_start=None, performance_end=None,
account_id=None, auth_token=None):
"""Get a feed of products for the account.
:param max_results: The maximum number of results to return (default 25,
maximum 250).
:param start_token: The start token of the feed provided by the API.
:param start_index: The starting index of the feed to return (default 1,
maximum 10000)
:param performance_start: The start date (inclusive) of click data returned.
Should be represented as YYYY-MM-DD; not appended
if left as None.
:param performance_end: The end date (inclusive) of click data returned.
Should be represented as YYYY-MM-DD; not appended
if left as None.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'items/products',
max_results=max_results,
start_token=start_token,
start_index=start_index,
performance_start=performance_start,
performance_end=performance_end)
return self.get_feed(uri, auth_token=auth_token,
desired_class=ProductFeed)
GetProducts = get_products
def batch(self, feed, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Send a batch request.
:param feed: The feed of batch entries to send.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products', path=['batch'],
dry_run=dry_run, warnings=warnings)
return self.post(feed, uri=uri, auth_token=auth_token,
desired_class=ProductFeed)
Batch = batch
def insert_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
feed = self._create_batch_feed(products, 'insert')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
InsertProducts = insert_products
def update_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'update')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
UpdateProducts = update_products
def delete_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete the products using a batch request.
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'delete')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
DeleteProducts = delete_products
# Operations on datafeeds
def get_datafeeds(self, account_id=None):
"""Get the feed of datafeeds.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False)
return self.get_feed(uri, desired_class=DatafeedFeed)
GetDatafeeds = get_datafeeds
# Operations on a single datafeed
def get_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Get the feed of a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.get_feed(uri, auth_token=auth_token,
desired_class=DatafeedEntry)
GetDatafeed = get_datafeed
def insert_datafeed(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a datafeed.
:param entry: XML Content of post request required for registering a
datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertDatafeed = insert_datafeed
def update_datafeed(self, entry, feed_id, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the feed of a single datafeed.
:param entry: XML Content of put request required for updating a
datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.update(entry, auth_token=auth_token, uri=uri)
UpdateDatafeed = update_datafeed
def delete_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Delete a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.delete(uri, auth_token=auth_token)
DeleteDatafeed = delete_datafeed
# Operations on client accounts
def get_client_accounts(self, max_results=None, start_index=None,
account_id=None, auth_token=None):
"""Get the feed of managed accounts
:param max_results: The maximum number of results to return (default 25,
maximum 250).
:param start_index: The starting index of the feed to return (default 1,
maximum 10000)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'managedaccounts',
max_results=max_results, start_index=start_index,
use_projection=False)
return self.get_feed(uri, desired_class=ClientAccountFeed,
auth_token=auth_token)
GetClientAccounts = get_client_accounts
def get_client_account(self, client_account_id,
account_id=None, auth_token=None):
"""Get a managed account.
:param client_account_id: The Account ID of the subaccount being retrieved.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False)
return self.get_entry(uri, desired_class=ClientAccount,
auth_token=auth_token)
GetClientAccount = get_client_account
def insert_client_account(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a client account entry
:param entry: An entry of type ClientAccount
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertClientAccount = insert_client_account
def update_client_account(self, entry, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Update a client account
:param entry: An entry of type ClientAccount to update to
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.update(entry, uri=uri, auth_token=auth_token)
UpdateClientAccount = update_client_account
def delete_client_account(self, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Delete a client account
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
DeleteClientAccount = delete_client_account
def get_users_feed(self, account_id=None, auth_token=None):
"""Get the users feed for an account.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'users', use_projection=False)
return self.get_feed(uri, auth_token=auth_token, desired_class=UsersFeed)
GetUsersFeed = get_users_feed
def get_users_entry(self, user_email, account_id=None, auth_token=None):
"""Get a users feed entry for an account.
:param user_email: Email of the user entry to be retrieved.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.get_entry(uri, auth_token=auth_token, desired_class=UsersEntry)
GetUsersEntry = get_users_entry
def insert_users_entry(self, entry, account_id=None, auth_token=None):
"""Insert a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'users', use_projection=False)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertUsersEntry = insert_users_entry
def update_users_entry(self, entry, account_id=None, auth_token=None):
"""Update a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
# Could also use entry.find_edit_link() but that is inconsistent
# with the rest of the module
user_email = entry.title.text
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.update(entry, uri=uri, auth_token=auth_token)
UpdateUsersEntry = update_users_entry
def delete_users_entry(self, entry, account_id=None, auth_token=None):
"""Delete a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
# Could also use entry.find_edit_link() but that is inconsistent
# with the rest of the module
user_email = entry.title.text
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.delete(uri, auth_token=auth_token)
DeleteUsersEntry = delete_users_entry
def get_data_quality_feed(self, account_id=None, auth_token=None,
max_results=None, start_index=None):
"""Get the data quality feed for an account.
:param max_results: The maximum number of results to return (default 25,
max 100).
:param start_index: The starting index of the feed to return.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'dataquality', use_projection=False,
max_results=max_results, start_index=start_index)
return self.get_feed(uri, auth_token=auth_token,
desired_class=DataQualityFeed)
GetDataQualityFeed = get_data_quality_feed
def get_data_quality_entry(self, secondary_account_id=None,
account_id=None, auth_token=None):
"""Get the data quality feed entry for an account.
:param secondary_account_id: The Account ID of the secondary account. If
ommitted the value of account_id is used.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
if secondary_account_id is None:
secondary_account_id = account_id or self.cfs_account_id
uri = self._create_uri(account_id, 'dataquality',
path=[secondary_account_id],
use_projection=False)
return self.get_entry(uri, auth_token=auth_token,
desired_class=DataQualityEntry)
GetDataQualityEntry = get_data_quality_entry
def update_inventory_entry(self, product, id, country, language, store_code,
account_id=None, auth_token=None):
"""Make a local product update, by putting the inventory entry.
:param product: A :class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param store_code: The code for the store where this local product will
be updated.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
pid = self._create_product_id(id, country, language, channel='local')
uri = self._create_uri(account_id, 'inventory',
path=[store_code, 'items', pid],
use_projection=False)
return self.update(product, uri=uri, auth_token=auth_token)
UpdateInventoryEntry = update_inventory_entry
def add_local_id(self, product, id, country, language,
store_code, account_id=None):
"""Add an atom id to a local product with a local store specific URI.
:param product: A :class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param store_code: The code for the store where this local product will
be updated.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
"""
pid = self._create_product_id(id, country, language, channel='local')
uri = self._create_uri(account_id, 'inventory',
path=[store_code, 'items', pid],
use_projection=False)
product.id = atom.data.Id(uri)
return product
AddLocalId = add_local_id
def update_inventory_feed(self, products, account_id=None, auth_token=None):
"""Update a batch of local products, by putting the product entry feed.
:param products: A list containing entries of
:class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
.. note:: Entries must have the atom:id element set. You can use
add_local_id to set this attribute using the store_code, product
id, country and language.
"""
feed = self._create_batch_feed(products, 'update',
feed_class=InventoryFeed)
uri = self._create_uri(account_id, 'inventory', path=['batch'],
use_projection=False)
return self.post(feed, uri=uri, auth_token=auth_token)
UpdateInventoryFeed = update_inventory_feed
| gpl-3.0 |
kalahbrown/HueBigSQL | desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/tests/__init__.py | 44 | 1749 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from test_views import *
from test_store import *
from test_auth import *
from test_admin import *
def suite():
suite = unittest.TestSuite()
for name in ['test_auth', 'test_store', 'test_views', 'test_admin']:
mod = __import__('%s.%s' % (__name__, name), {}, {}, ['suite'])
suite.addTest(mod.suite())
return suite
| apache-2.0 |
40223136/w11-2 | static/Brython3.1.0-20150301-090019/Lib/browser/object_storage.py | 627 | 1315 | import pickle
class __UnProvided():
pass
class ObjectStorage():
def __init__(self, storage):
self.storage = storage
def __delitem__(self, key):
del self.storage[pickle.dumps(key)]
def __getitem__(self, key):
return pickle.loads(self.storage[pickle.dumps(key)])
def __setitem__(self, key, value):
self.storage[pickle.dumps(key)] = pickle.dumps(value)
def __contains__(self, key):
return pickle.dumps(key) in self.storage
def get(self, key, default=None):
if pickle.dumps(key) in self.storage:
return self.storage[pickle.dumps(key)]
return default
def pop(self, key, default=__UnProvided()):
if type(default) is __UnProvided or pickle.dumps(key) in self.storage:
return pickle.loads(self.storage.pop(pickle.dumps(key)))
return default
def __iter__(self):
keys = self.keys()
return keys.__iter__()
def keys(self):
return [pickle.loads(key) for key in self.storage.keys()]
def values(self):
return [pickle.loads(val) for val in self.storage.values()]
def items(self):
return list(zip(self.keys(), self.values()))
def clear(self):
self.storage.clear()
def __len__(self):
return len(self.storage)
| gpl-3.0 |
andrewyoung1991/abjad | abjad/tools/layouttools/test/test_layouttools_set_line_breaks_by_line_duration_ge.py | 2 | 2235 | # -*- encoding: utf-8 -*-
from abjad import *
def test_layouttools_set_line_breaks_by_line_duration_ge_01():
r'''Iterate classes in expr and accumulate duration.
Add line break after every total le line duration.
'''
staff = Staff()
staff.append(Measure((2, 8), "c'8 d'8"))
staff.append(Measure((2, 8), "e'8 f'8"))
staff.append(Measure((2, 8), "g'8 a'8"))
staff.append(Measure((2, 8), "b'8 c''8"))
layouttools.set_line_breaks_by_line_duration_ge(
staff,
Duration(4, 8),
)
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
{
\time 2/8
c'8
d'8
}
{
e'8
f'8
\break
}
{
g'8
a'8
}
{
b'8
c''8
\break
}
}
'''
)
assert inspect_(staff).is_well_formed()
def test_layouttools_set_line_breaks_by_line_duration_ge_02():
r'''Iterate classes in expr and accumulate duration.
Add line break after every total le line duration.
'''
staff = Staff()
staff.append(Measure((2, 8), "c'8 d'8"))
staff.append(Measure((2, 8), "e'8 f'8"))
staff.append(Measure((2, 8), "g'8 a'8"))
staff.append(Measure((2, 8), "b'8 c''8"))
layouttools.set_line_breaks_by_line_duration_ge(
staff,
Duration(1, 8),
line_break_class=scoretools.Leaf,
)
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
{
\time 2/8
c'8
\break
d'8
\break
}
{
e'8
\break
f'8
\break
}
{
g'8
\break
a'8
\break
}
{
b'8
\break
c''8
\break
}
}
'''
)
assert inspect_(staff).is_well_formed() | gpl-3.0 |
Maspear/odoo | addons/hw_proxy/__openerp__.py | 313 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yorvic/.vim | bundle/python-mode/pylibs/pylama/checkers/pylint/logilab/common/modutils.py | 1 | 21802 | # -*- coding: utf-8 -*-
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIR: str
:var STD_LIB_DIR: directory where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names has key
"""
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import splitext, join, abspath, isdir, dirname, exists, basename
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
from distutils.sysconfig import get_config_var, get_python_lib, get_python_version
from distutils.errors import DistutilsPlatformError
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from . import STD_BLACKLIST, _handle_blacklist
# Notes about STD_LIB_DIR
# Consider arch-specific installation for STD_LIB_DIR definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds <http://bugs.python.org/issue1294959>`_
# :see: `FHS <http://www.pathname.com/fhs/pub/fhs-2.3.html#LIBLTQUALGTALTERNATEFORMATESSENTIAL>`_
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
try:
STD_LIB_DIR = get_python_lib(standard_lib=1)
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIR = '//'
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
[1]*len(sys.builtin_module_names)))
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def _getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError, ex:
return getattr(self._getobj(), attr)
def __call__(self, *args, **kwargs):
return self._getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=1):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
module = load_module(curname, mp_file, mp_filename, mp_desc)
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) )
path = [dirname( _file )]
prevmodule = module
return module
def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
for part in mod_path:
path = join(path, part)
if not _has_init(path):
return False
return True
def modpath_from_file(filename, extrapath=None):
"""given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
base = splitext(abspath(filename))[0]
if extrapath is not None:
for path_ in extrapath:
path = abspath(path_)
if path and base[:len(path)] == path:
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in sys.path:
path = abspath(path)
if path and base.startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
modules = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
if directory != src_directory:
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in filenames:
if _is_python_file(filename) and filename != '__init__.py':
src = join(directory, filename)
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
for modname, module in sys.modules.items():
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
del sys.modules[modname]
break
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered has standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError, ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return 0
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
return 1
filename = abspath(filename)
if filename.startswith(EXT_LIB_DIR):
return 0
for path in std_path:
if filename.startswith(abspath(path)):
return 1
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), file, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
imported = []
while modpath:
try:
_, mp_filename, mp_desc = find_module(modpath[0], path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
| gpl-3.0 |
ImpalaToGo/ImpalaToGo | thirdparty/gtest-1.6.0/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| apache-2.0 |
blacklin/kbengine | kbe/res/scripts/common/Lib/ensurepip/__init__.py | 67 | 6388 | import os
import os.path
import pkgutil
import sys
import tempfile
__all__ = ["version", "bootstrap"]
_SETUPTOOLS_VERSION = "2.1"
_PIP_VERSION = "1.5.6"
# pip currently requires ssl support, so we try to provide a nicer
# error message when that is missing (http://bugs.python.org/issue19744)
_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION))
try:
import ssl
except ImportError:
ssl = None
def _require_ssl_for_pip():
raise RuntimeError(_MISSING_SSL_MESSAGE)
else:
def _require_ssl_for_pip():
pass
_PROJECTS = [
("setuptools", _SETUPTOOLS_VERSION),
("pip", _PIP_VERSION),
]
def _run_pip(args, additional_paths=None):
# Add our bundled software to the sys.path so we can import it
if additional_paths is not None:
sys.path = additional_paths + sys.path
# Install the bundled software
import pip
pip.main(args)
def version():
"""
Returns a string specifying the bundled version of pip.
"""
return _PIP_VERSION
def _disable_pip_configuration_settings():
# We deliberately ignore all pip environment variables
# when invoking pip
# See http://bugs.python.org/issue19734 for details
keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
for k in keys_to_remove:
del os.environ[k]
# We also ignore the settings in the default pip configuration file
# See http://bugs.python.org/issue20053 for details
os.environ['PIP_CONFIG_FILE'] = os.devnull
def bootstrap(*, root=None, upgrade=False, user=False,
altinstall=False, default_pip=False,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory).
Note that calling this function will alter both sys.path and os.environ.
"""
if altinstall and default_pip:
raise ValueError("Cannot use altinstall and default_pip together")
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# By default, installing pip and setuptools installs all of the
# following scripts (X.Y == running Python version):
#
# pip, pipX, pipX.Y, easy_install, easy_install-X.Y
#
# pip 1.5+ allows ensurepip to request that some of those be left out
if altinstall:
# omit pip, pipX and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
elif not default_pip:
# omit pip and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "install"
with tempfile.TemporaryDirectory() as tmpdir:
# Put our bundled wheels into a temporary directory and construct the
# additional paths that need added to sys.path
additional_paths = []
for project, version in _PROJECTS:
wheel_name = "{}-{}-py2.py3-none-any.whl".format(project, version)
whl = pkgutil.get_data(
"ensurepip",
"_bundled/{}".format(wheel_name),
)
with open(os.path.join(tmpdir, wheel_name), "wb") as fp:
fp.write(whl)
additional_paths.append(os.path.join(tmpdir, wheel_name))
# Construct the arguments to be passed to the pip command
args = ["install", "--no-index", "--find-links", tmpdir]
if root:
args += ["--root", root]
if upgrade:
args += ["--upgrade"]
if user:
args += ["--user"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in _PROJECTS], additional_paths)
def _uninstall_helper(*, verbosity=0):
"""Helper to support a clean default uninstall process on Windows
Note that calling this function may alter os.environ.
"""
# Nothing to do if pip was never installed, or has been removed
try:
import pip
except ImportError:
return
# If the pip version doesn't match the bundled one, leave it alone
if pip.__version__ != _PIP_VERSION:
msg = ("ensurepip will only uninstall a matching version "
"({!r} installed, {!r} bundled)")
print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr)
return
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# Construct the arguments to be passed to the pip command
args = ["uninstall", "-y"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in reversed(_PROJECTS)])
def _main(argv=None):
if ssl is None:
print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE),
file=sys.stderr)
return
import argparse
parser = argparse.ArgumentParser(prog="python -m ensurepip")
parser.add_argument(
"--version",
action="version",
version="pip {}".format(version()),
help="Show the version of pip that is bundled with this Python.",
)
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
dest="verbosity",
help=("Give more output. Option is additive, and can be used up to 3 "
"times."),
)
parser.add_argument(
"-U", "--upgrade",
action="store_true",
default=False,
help="Upgrade pip and dependencies, even if already installed.",
)
parser.add_argument(
"--user",
action="store_true",
default=False,
help="Install using the user scheme.",
)
parser.add_argument(
"--root",
default=None,
help="Install everything relative to this alternate root directory.",
)
parser.add_argument(
"--altinstall",
action="store_true",
default=False,
help=("Make an alternate install, installing only the X.Y versioned"
"scripts (Default: pipX, pipX.Y, easy_install-X.Y)"),
)
parser.add_argument(
"--default-pip",
action="store_true",
default=False,
help=("Make a default pip install, installing the unqualified pip "
"and easy_install in addition to the versioned scripts"),
)
args = parser.parse_args(argv)
bootstrap(
root=args.root,
upgrade=args.upgrade,
user=args.user,
verbosity=args.verbosity,
altinstall=args.altinstall,
default_pip=args.default_pip,
)
| lgpl-3.0 |
roqu3/Kernel-2.6.35.14-P500 | tools/perf/scripts/python/syscall-counts-by-pid.py | 944 | 1744 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38d %10d\n" % (id, val),
| gpl-2.0 |
zhoffice/minos | client/package.py | 5 | 8146 | import argparse
import glob
import hashlib
import os
import pprint
import subprocess
import yaml
import deploy_config
from log import Log
from tank_client import TankClient
def check_directory(path):
if not os.path.exists(path):
Log.print_critical(
'Directory doesn''t exist: ' + path)
if not os.path.isdir(path):
Log.print_critical(
'NOT a directory: ' + path)
if not os.access(path, os.X_OK):
Log.print_critical(
'Can''t cd to: ' + path)
def check_file(path):
if not os.path.exists(path):
Log.print_critical(
'File doesn''t exist: ' + path)
if not os.path.isfile(path):
Log.print_critical(
'NOT a file: ' + path)
if not os.access(path, os.R_OK):
Log.print_critical(
'Can''t read file: ' + path)
def get_package_config_dir():
return deploy_config.get_deploy_config().get_config_dir() + '/package'
def get_package_config_file(package):
return '%s/%s.yaml' % (get_package_config_dir(), package)
def get_pacakge_config(package):
return yaml.load(open(get_package_config_file(package)))
def get_tank_client():
'''
A factory method to construct a tank(package server) client object.
'''
tank_config = deploy_config.get_deploy_config().get_tank_config()
return TankClient(tank_config.get('server_host'),
tank_config.get('server_port'))
def get_revision_number(cmd, output_prefix, work_space_dir):
env = os.environ
# Enforce English locale.
env["LC_ALL"] = "C"
current_work_dir = os.getcwd()
os.chdir(work_space_dir)
content = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
os.chdir(current_work_dir)
for line in content.splitlines():
if line.startswith(output_prefix):
return line[len(output_prefix):]
def generate_package_revision(root):
'''
Get the revision of the package. Currently, svn revision and git commit are
supported. If the package directory is neither a svn working directory nor
a git working directory, a fake revision will be returned.
@param root the local package root directory
@return string the revision of the package
'''
if os.path.islink(root):
real_path = os.readlink(root)
if not real_path.startswith('/'):
abs_path = "%s/%s" % (os.path.dirname(root), real_path)
else:
abs_path = real_path
else:
abs_path = root
try:
try:
cmd = ["svn", "info"]
revision_prefix = "Revision: "
return "r%s" % get_revision_number(cmd, revision_prefix, abs_path)
except:
cmd = ["git", "show"]
commit_prefix = "commit "
return get_revision_number(cmd, commit_prefix, abs_path)
except:
# We cannot get the version No., just return a fake one
return "r%s" % FAKE_SVN_VERSION
def generate_checksum(path):
'''
Generate the SHA-1 digest of specified file.
@param path the path of the file
@return string the SHA-1 digest
'''
fd = open(path, "r")
sha1 = hashlib.sha1()
while True:
buffer = fd.read(4096)
if not buffer: break
sha1.update(buffer)
fd.close()
return sha1.hexdigest()
def upload_package(artifact, package_tarball, package_source):
'''
Upload the specified package to the package server(Tank). Note that
if the file with the same checksum is already uploaded, this uploading
will be skipped.
@param artifact the artifact of the package
@return dict the package information return by the package server
'''
Log.print_info("Uploading pacakge: %s" % package_tarball)
revision = generate_package_revision(package_source)
Log.print_success("Revision is: %s" % revision)
Log.print_info("Generating checksum of package: %s" % package_tarball)
checksum = generate_checksum(package_tarball)
Log.print_success("Checksum is: %s" % checksum)
tank_client = get_tank_client()
package_info = tank_client.check_package(artifact, checksum)
if not package_info:
if 200 == tank_client.upload(package_tarball, artifact, revision):
Log.print_success("Upload package %s success" % package_tarball)
package_info = tank_client.check_package(artifact, checksum)
return eval(package_info)
else:
Log.print_warning("Package %s has already uploaded, skip uploading" %
package_tarball)
return eval(package_info)
return None
def parse_command_line():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Manage Minos packages.')
parser.add_argument('--version', action='version',
version='%(prog)s 1.0.0-beta')
parser.add_argument('-v', '--verbosity', default=0, type=int,
help='The verbosity level of log, higher value, more details.')
subparsers = parser.add_subparsers(
title='commands',
help='Type \'%(prog)s command -h\' to get more help for individual '
'command.')
sub_parser = subparsers.add_parser(
'list',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='List packages, locally or remotely.')
sub_parser.add_argument('--remote', action='store_true',
help='List remote packages.')
sub_parser.set_defaults(handler=process_command_list)
sub_parser = subparsers.add_parser(
'build',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='Build local package.')
sub_parser.add_argument('package',
help='The package name.')
sub_parser.set_defaults(handler=process_command_build)
sub_parser = subparsers.add_parser(
'install',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='Install binary packages from local to remote package server.')
sub_parser.add_argument('--make_current', action='store_false',
help='Make the installed pacakge as current version.')
sub_parser.add_argument('package',
help='The package name.')
sub_parser.set_defaults(handler=process_command_install)
args = parser.parse_args()
Log.verbosity = args.verbosity
return args
def process_command_list(args):
if not args.remote:
# list local packages.
Log.print_info('All local packages:')
print '[package]: [artifact] [version]'
for path in glob.glob(get_package_config_file('*')):
basename = os.path.basename(path)
package = basename[:-len('.yaml')]
package_config = get_pacakge_config(package)
print '%s: %s %s' % (
package, package_config['artifact'], package_config['version'])
else:
# list local packages.
Log.print_critical('Not implemented yet!')
def process_command_build(args):
package_file = get_package_config_file(args.package)
package_config = get_pacakge_config(args.package)
package_dir = os.path.dirname(package_file)
package_source = os.path.abspath(
os.path.join(package_dir, package_config['source']))
check_directory(package_source)
subprocess.check_call(
'cd %s; %s' % (package_source, package_config['build']), shell=True)
def process_command_install(args):
package_file = get_package_config_file(args.package)
package_config = get_pacakge_config(args.package)
package_dir = os.path.dirname(package_file)
package_source = os.path.abspath(
os.path.join(package_dir, package_config['source']))
package_tarball = os.path.abspath(
os.path.join(package_source, package_config['package']['tarball']))
# the abspath would remove the trailing slash, so we have to check the
# original config.
if package_config['package']['tarball'][-1] == '/':
package_tarball += '/%s-%s.tar.gz' % (
package_config['artifact'], package_config['version'])
check_file(package_tarball)
Log.print_info("Installing %s to package server" % package_config['artifact'])
package_info = upload_package(
package_config['artifact'], package_tarball, package_source)
if package_info:
Log.print_success("Install %s to package server success" %
package_config['artifact'])
pprint.pprint(package_info)
else:
Log.print_critical("Install %s to package server fail" %
package_config['artifact'])
def main():
args = parse_command_line()
return args.handler(args)
if __name__ == '__main__':
main()
| apache-2.0 |
kennedyshead/home-assistant | tests/components/nut/test_config_flow.py | 2 | 11140 | """Test the Network UPS Tools (NUT) config flow."""
from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.nut.const import DOMAIN
from homeassistant.const import CONF_RESOURCES, CONF_SCAN_INTERVAL
from .util import _get_mock_pynutclient
from tests.common import MockConfigEntry
VALID_CONFIG = {
"host": "localhost",
"port": 123,
"name": "name",
"resources": ["battery.charge"],
}
async def test_form_zeroconf(hass):
"""Test we can setup from zeroconf."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={"host": "192.168.1.5", "port": 1234},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["step_id"] == "resources"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"resources": ["battery.voltage", "ups.status", "ups.status.display"]},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "192.168.1.5:1234"
assert result3["data"] == {
"host": "192.168.1.5",
"password": "test-password",
"port": 1234,
"resources": ["battery.voltage", "ups.status", "ups.status.display"],
"username": "test-username",
}
assert result3["result"].unique_id is None
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_one_ups(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["step_id"] == "resources"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"resources": ["battery.voltage", "ups.status", "ups.status.display"]},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "1.1.1.1:2222"
assert result3["data"] == {
"host": "1.1.1.1",
"password": "test-password",
"port": 2222,
"resources": ["battery.voltage", "ups.status", "ups.status.display"],
"username": "test-username",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_multiple_ups(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "2.2.2.2", "port": 123, "resources": ["battery.charge"]},
options={CONF_RESOURCES: ["battery.charge"]},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage"},
list_ups={"ups1": "UPS 1", "ups2": "UPS2"},
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["step_id"] == "ups"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"alias": "ups2"},
)
assert result3["step_id"] == "resources"
assert result3["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{"resources": ["battery.voltage"]},
)
await hass.async_block_till_done()
assert result4["type"] == "create_entry"
assert result4["title"] == "[email protected]:2222"
assert result4["data"] == {
"host": "1.1.1.1",
"password": "test-password",
"alias": "ups2",
"port": 2222,
"resources": ["battery.voltage"],
"username": "test-username",
}
assert len(mock_setup_entry.mock_calls) == 2
async def test_form_user_one_ups_with_ignored_entry(hass):
"""Test we can setup a new one when there is an ignored one."""
ignored_entry = MockConfigEntry(
domain=DOMAIN, data={}, source=config_entries.SOURCE_IGNORE
)
ignored_entry.add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["step_id"] == "resources"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"resources": ["battery.voltage", "ups.status", "ups.status.display"]},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "1.1.1.1:2222"
assert result3["data"] == {
"host": "1.1.1.1",
"password": "test-password",
"port": 2222,
"resources": ["battery.voltage", "ups.status", "ups.status.display"],
"username": "test-username",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_pynut = _get_mock_pynutclient()
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=VALID_CONFIG,
options={CONF_RESOURCES: ["battery.charge"]},
)
config_entry.add_to_hass(hass)
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch("homeassistant.components.nut.async_setup_entry", return_value=True):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_RESOURCES: ["battery.voltage"]}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_RESOURCES: ["battery.voltage"],
CONF_SCAN_INTERVAL: 60,
}
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch("homeassistant.components.nut.async_setup_entry", return_value=True):
result2 = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result2["flow_id"],
user_input={CONF_RESOURCES: ["battery.voltage"], CONF_SCAN_INTERVAL: 12},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_RESOURCES: ["battery.voltage"],
CONF_SCAN_INTERVAL: 12,
}
| apache-2.0 |
moreati/pandashells | pandashells/lib/arg_lib.py | 7 | 6681 | from pandashells.lib import config_lib
def _check_for_recognized_args(*args):
"""
Raise an error if unrecognized argset is specified
"""
allowed_arg_set = set([
'io_in',
'io_out',
'example',
'xy_plotting',
'decorating',
])
in_arg_set = set(args)
unrecognized_set = in_arg_set - allowed_arg_set
if unrecognized_set:
msg = '{} not in allowed set {}'.format(unrecognized_set,
allowed_arg_set)
raise ValueError(msg)
def _io_in_adder(parser, config_dict, *args):
"""
Add input options to the parser
"""
in_arg_set = set(args)
if 'io_in' in in_arg_set:
group = parser.add_argument_group('Input Options')
# define the valid components
io_opt_list = ['csv', 'table', 'header', 'noheader']
# allow the option of supplying input column names
msg = 'Overwrite input column names with this list'
group.add_argument(
'--names', nargs='+', type=str, dest='names',
metavar="name", help=msg)
default_for_input = [
config_dict['io_input_type'],
config_dict['io_input_header']
]
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-i', '--input_options', nargs='+', type=str, dest='input_options',
metavar='option', default=default_for_input, choices=io_opt_list,
help=msg)
def _io_out_adder(parser, config_dict, *args):
"""
Add output options to the parser
"""
in_arg_set = set(args)
if 'io_out' in in_arg_set:
group = parser.add_argument_group('Output Options')
# define the valid components
io_opt_list = [
'csv', 'table', 'html', 'header', 'noheader', 'index', 'noindex',
]
# define the current defaults
default_for_output = [
config_dict['io_output_type'],
config_dict['io_output_header'],
config_dict['io_output_index']
]
# show the current defaults in the arg parser
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-o', '--output_options', nargs='+',
type=str, dest='output_options', metavar='option',
default=default_for_output, help=msg)
msg = (
'Replace NaNs with this string. '
'A string containing \'nan\' will set na_rep to numpy NaN. '
'Current default is {}'
).format(repr(str(config_dict['io_output_na_rep'])))
group.add_argument(
'--output_na_rep', nargs=1, type=str, dest='io_output_na_rep',
help=msg)
def _decorating_adder(parser, *args):
in_arg_set = set(args)
if 'decorating' in in_arg_set:
# get a list of valid plot styling info
context_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_context'][0][1]
theme_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_theme'][0][1]
palette_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_palette'][0][1]
group = parser.add_argument_group('Plot specific Options')
msg = "Set the x-limits for the plot"
group.add_argument(
'--xlim', nargs=2, type=float, dest='xlim',
metavar=('XMIN', 'XMAX'), help=msg)
msg = "Set the y-limits for the plot"
group.add_argument(
'--ylim', nargs=2, type=float, dest='ylim',
metavar=('YMIN', 'YMAX'), help=msg)
msg = "Draw x axis with log scale"
group.add_argument(
'--xlog', action='store_true', dest='xlog', default=False,
help=msg)
msg = "Draw y axis with log scale"
group.add_argument(
'--ylog', action='store_true', dest='ylog', default=False,
help=msg)
msg = "Set the x-label for the plot"
group.add_argument(
'--xlabel', nargs=1, type=str, dest='xlabel', help=msg)
msg = "Set the y-label for the plot"
group.add_argument(
'--ylabel', nargs=1, type=str, dest='ylabel', help=msg)
msg = "Set the title for the plot"
group.add_argument(
'--title', nargs=1, type=str, dest='title', help=msg)
msg = "Specify legend location"
group.add_argument(
'--legend', nargs=1, type=str, dest='legend',
choices=['1', '2', '3', '4', 'best'], help=msg)
msg = "Specify whether hide the grid or not"
group.add_argument(
'--nogrid', action='store_true', dest='no_grid', default=False,
help=msg)
msg = "Specify plot context. Default = '{}' ".format(context_list[0])
group.add_argument(
'--context', nargs=1, type=str, dest='plot_context',
default=[context_list[0]], choices=context_list, help=msg)
msg = "Specify plot theme. Default = '{}' ".format(theme_list[0])
group.add_argument(
'--theme', nargs=1, type=str, dest='plot_theme',
default=[theme_list[0]], choices=theme_list, help=msg)
msg = "Specify plot palette. Default = '{}' ".format(palette_list[0])
group.add_argument(
'--palette', nargs=1, type=str, dest='plot_palette',
default=[palette_list[0]], choices=palette_list, help=msg)
msg = "Save the figure to this file"
group.add_argument('--savefig', nargs=1, type=str, help=msg)
def _xy_adder(parser, *args):
in_arg_set = set(args)
if 'xy_plotting' in in_arg_set:
msg = 'Column to plot on x-axis'
parser.add_argument(
'-x', nargs=1, type=str, dest='x', metavar='col', help=msg)
msg = 'List of columns to plot on y-axis'
parser.add_argument(
'-y', nargs='+', type=str, dest='y', metavar='col', help=msg)
msg = "Plot style(s) defaults to .-"
parser.add_argument(
'-s', '--style', nargs='+', type=str, dest='style', default=['.-'],
help=msg, metavar='style')
def add_args(parser, *args):
"""Adds argument blocks to the arg parser
:type parser: argparse instance
:param parser: The argarse instance to use in adding arguments
Additinional arguments are the names of argument blocks to add
"""
config_dict = config_lib.get_config()
_check_for_recognized_args(*args)
_io_in_adder(parser, config_dict, *args)
_io_out_adder(parser, config_dict, *args)
_decorating_adder(parser, *args)
_xy_adder(parser, *args)
| bsd-2-clause |
andybak/hendrix | hendrix/test/testproject/settings.py | 4 | 1047 |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
SECRET_KEY = 'NOTREALt@k0)scq$uuph3gjpbhjhd%ipe)04f5d^^1%)%my(%b6&pus_2NOTREAL'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'hendrix.tests.testproject.urls'
WSGI_APPLICATION = 'hendrix.test.testproject.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
| mit |
mm112287/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_case.py | 738 | 51689 | import difflib
import pprint
import pickle
import re
import sys
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult,
ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferrable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
| gpl-3.0 |
avedaee/DIRAC | Interfaces/scripts/dirac-wms-job-parameters.py | 8 | 1186 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-delete
# Author : Stuart Paterson
########################################################################
"""
Retrieve parameters associated to the given DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
for job in args:
result = dirac.parameters( job, printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
| gpl-3.0 |
sdague/home-assistant | homeassistant/components/aprs/device_tracker.py | 17 | 5575 | """Support for APRS device tracking."""
import logging
import threading
import aprslib
from aprslib import ConnectionError as AprsConnectionError, LoginError
import geopy.distance
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_HOST,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
DOMAIN = "aprs"
_LOGGER = logging.getLogger(__name__)
ATTR_ALTITUDE = "altitude"
ATTR_COURSE = "course"
ATTR_COMMENT = "comment"
ATTR_FROM = "from"
ATTR_FORMAT = "format"
ATTR_POS_AMBIGUITY = "posambiguity"
ATTR_SPEED = "speed"
CONF_CALLSIGNS = "callsigns"
DEFAULT_HOST = "rotate.aprs2.net"
DEFAULT_PASSWORD = "-1"
DEFAULT_TIMEOUT = 30.0
FILTER_PORT = 14580
MSG_FORMATS = ["compressed", "uncompressed", "mic-e"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_CALLSIGNS): cv.ensure_list,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(float),
}
)
def make_filter(callsigns: list) -> str:
"""Make a server-side filter from a list of callsigns."""
return " ".join(f"b/{sign.upper()}" for sign in callsigns)
def gps_accuracy(gps, posambiguity: int) -> int:
"""Calculate the GPS accuracy based on APRS posambiguity."""
pos_a_map = {0: 0, 1: 1 / 600, 2: 1 / 60, 3: 1 / 6, 4: 1}
if posambiguity in pos_a_map:
degrees = pos_a_map[posambiguity]
gps2 = (gps[0], gps[1] + degrees)
dist_m = geopy.distance.distance(gps, gps2).m
accuracy = round(dist_m)
else:
message = f"APRS position ambiguity must be 0-4, not '{posambiguity}'."
raise ValueError(message)
return accuracy
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the APRS tracker."""
callsigns = config.get(CONF_CALLSIGNS)
server_filter = make_filter(callsigns)
callsign = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
timeout = config.get(CONF_TIMEOUT)
aprs_listener = AprsListenerThread(callsign, password, host, server_filter, see)
def aprs_disconnect(event):
"""Stop the APRS connection."""
aprs_listener.stop()
aprs_listener.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, aprs_disconnect)
if not aprs_listener.start_event.wait(timeout):
_LOGGER.error("Timeout waiting for APRS to connect")
return
if not aprs_listener.start_success:
_LOGGER.error(aprs_listener.start_message)
return
_LOGGER.debug(aprs_listener.start_message)
return True
class AprsListenerThread(threading.Thread):
"""APRS message listener."""
def __init__(
self, callsign: str, password: str, host: str, server_filter: str, see
):
"""Initialize the class."""
super().__init__()
self.callsign = callsign
self.host = host
self.start_event = threading.Event()
self.see = see
self.server_filter = server_filter
self.start_message = ""
self.start_success = False
self.ais = aprslib.IS(
self.callsign, passwd=password, host=self.host, port=FILTER_PORT
)
def start_complete(self, success: bool, message: str):
"""Complete startup process."""
self.start_message = message
self.start_success = success
self.start_event.set()
def run(self):
"""Connect to APRS and listen for data."""
self.ais.set_filter(self.server_filter)
try:
_LOGGER.info(
"Opening connection to %s with callsign %s", self.host, self.callsign
)
self.ais.connect()
self.start_complete(
True, f"Connected to {self.host} with callsign {self.callsign}."
)
self.ais.consumer(callback=self.rx_msg, immortal=True)
except (AprsConnectionError, LoginError) as err:
self.start_complete(False, str(err))
except OSError:
_LOGGER.info(
"Closing connection to %s with callsign %s", self.host, self.callsign
)
def stop(self):
"""Close the connection to the APRS network."""
self.ais.close()
def rx_msg(self, msg: dict):
"""Receive message and process if position."""
_LOGGER.debug("APRS message received: %s", str(msg))
if msg[ATTR_FORMAT] in MSG_FORMATS:
dev_id = slugify(msg[ATTR_FROM])
lat = msg[ATTR_LATITUDE]
lon = msg[ATTR_LONGITUDE]
attrs = {}
if ATTR_POS_AMBIGUITY in msg:
pos_amb = msg[ATTR_POS_AMBIGUITY]
try:
attrs[ATTR_GPS_ACCURACY] = gps_accuracy((lat, lon), pos_amb)
except ValueError:
_LOGGER.warning(
"APRS message contained invalid posambiguity: %s", str(pos_amb)
)
for attr in [ATTR_ALTITUDE, ATTR_COMMENT, ATTR_COURSE, ATTR_SPEED]:
if attr in msg:
attrs[attr] = msg[attr]
self.see(dev_id=dev_id, gps=(lat, lon), attributes=attrs)
| apache-2.0 |
tanglei528/glance | glance/tests/unit/common/test_property_utils.py | 1 | 23549 | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import xrange
from glance.api import policy
from glance.common import exception
from glance.common import property_utils
import glance.context
from glance.tests.unit import base
CONFIG_SECTIONS = [
'^x_owner_.*',
'spl_create_prop',
'spl_read_prop',
'spl_read_only_prop',
'spl_update_prop',
'spl_update_only_prop',
'spl_delete_prop',
'^x_all_permitted.*',
'^x_none_permitted.*',
'x_none_read',
'x_none_update',
'x_none_delete',
'x_foo_matcher',
'x_foo_*',
'.*'
]
def create_context(policy, roles=None):
if roles is None:
roles = []
return glance.context.RequestContext(roles=roles,
policy_enforcer=policy)
class TestPropertyRulesWithRoles(base.IsolatedUnitTest):
def setUp(self):
super(TestPropertyRulesWithRoles, self).setUp()
self.set_property_protections()
self.policy = policy.Enforcer()
def tearDown(self):
super(TestPropertyRulesWithRoles, self).tearDown()
def test_is_property_protections_enabled_true(self):
self.config(property_protection_file="property-protections.conf")
self.assertTrue(property_utils.is_property_protection_enabled())
def test_is_property_protections_enabled_false(self):
self.config(property_protection_file=None)
self.assertFalse(property_utils.is_property_protection_enabled())
def test_property_protection_file_doesnt_exist(self):
self.config(property_protection_file='fake-file.conf')
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_mutually_exclusive_rule(self):
exclusive_rules = {'.*': {'create': ['@', '!'],
'read': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(exclusive_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_malformed_rule(self):
malformed_rules = {'^[0-9)': {'create': ['fake-role'],
'read': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_missing_operation(self):
rules_with_missing_operation = {'^[0-9]': {'create': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(rules_with_missing_operation)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_misspelt_operation(self):
rules_with_misspelt_operation = {'^[0-9]': {'create': ['fake-role'],
'rade': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(rules_with_misspelt_operation)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_whitespace(self):
rules_whitespace = {
'^test_prop.*': {
'create': ['member ,fake-role'],
'read': ['fake-role, member'],
'update': ['fake-role, member'],
'delete': ['fake-role, member']
}
}
self.set_property_protection_rules(rules_whitespace)
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules('test_prop_1',
'read', create_context(self.policy, ['member'])))
self.assertTrue(self.rules_checker.check_property_rules('test_prop_1',
'read', create_context(self.policy, ['fake-role'])))
def test_check_property_rules_invalid_action(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'hall', create_context(self.policy, ['admin'])))
def test_check_property_rules_read_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'read', create_context(self.policy, ['admin'])))
def test_check_property_rules_read_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'read',
create_context(self.policy, ['member'])))
def test_check_property_rules_read_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'read', create_context(self.policy, ['member'])))
def test_check_property_rules_create_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'create', create_context(self.policy, ['admin'])))
def test_check_property_rules_create_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'create',
create_context(self.policy, ['member'])))
def test_check_property_rules_create_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'create', create_context(self.policy, ['member'])))
def test_check_property_rules_update_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'update', create_context(self.policy, ['admin'])))
def test_check_property_rules_update_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'update',
create_context(self.policy, ['member'])))
def test_check_property_rules_update_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'update', create_context(self.policy, ['member'])))
def test_check_property_rules_delete_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'delete', create_context(self.policy, ['admin'])))
def test_check_property_rules_delete_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'delete',
create_context(self.policy, ['member'])))
def test_check_property_rules_delete_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'delete', create_context(self.policy, ['member'])))
def test_property_config_loaded_in_order(self):
"""
Verify the order of loaded config sections matches that from the
configuration file
"""
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertEqual(property_utils.CONFIG.sections(), CONFIG_SECTIONS)
def test_property_rules_loaded_in_order(self):
"""
Verify rules are iterable in the same order as read from the config
file
"""
self.rules_checker = property_utils.PropertyRules(self.policy)
for i in xrange(len(property_utils.CONFIG.sections())):
self.assertEqual(property_utils.CONFIG.sections()[i],
self.rules_checker.rules[i][0].pattern)
def test_check_property_rules_create_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_create_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_read_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_read', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'delete',
create_context(self.policy, [''])))
def test_check_property_rules_update_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_update', 'update',
create_context(self.policy, [''])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'delete',
create_context(self.policy, ['admin', 'member'])))
def test_check_property_rules_delete_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'update',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_delete', 'delete',
create_context(self.policy, [''])))
def test_check_return_first_match(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'create',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'delete',
create_context(self.policy, [''])))
class TestPropertyRulesWithPolicies(base.IsolatedUnitTest):
def setUp(self):
super(TestPropertyRulesWithPolicies, self).setUp()
self.set_property_protections(use_policies=True)
self.policy = policy.Enforcer()
self.rules_checker = property_utils.PropertyRules(self.policy)
def tearDown(self):
super(TestPropertyRulesWithPolicies, self).tearDown()
def test_check_property_rules_create_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'create',
create_context(self.policy, ['spl_role'])))
def test_check_property_rules_create_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'create',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_read_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'read',
create_context(self.policy, ['spl_role'])))
def test_check_property_rules_read_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'read',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_update_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'update',
create_context(self.policy, ['admin'])))
def test_check_property_rules_update_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'update',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_delete_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'delete',
create_context(self.policy, ['admin'])))
def test_check_property_rules_delete_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'delete',
create_context(self.policy, ['fake-role'])))
def test_property_protection_with_malformed_rule(self):
malformed_rules = {'^[0-9)': {'create': ['fake-policy'],
'read': ['fake-policy'],
'update': ['fake-policy'],
'delete': ['fake-policy']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_multiple_policies(self):
malformed_rules = {'^x_.*': {'create': ['fake-policy, another_pol'],
'read': ['fake-policy'],
'update': ['fake-policy'],
'delete': ['fake-policy']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_check_property_rules_create_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_create_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_read_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_read', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'delete',
create_context(self.policy, [''])))
def test_check_property_rules_update_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_update', 'update',
create_context(self.policy, [''])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'delete',
create_context(self.policy, ['admin', 'member'])))
def test_check_property_rules_delete_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'update',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_delete', 'delete',
create_context(self.policy, [''])))
def test_check_return_first_match(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'create',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'delete',
create_context(self.policy, [''])))
| apache-2.0 |
ppasq/geonode | geonode/base/management/commands/delete_orphaned_thumbs.py | 18 | 1091 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from geonode.base.utils import delete_orphaned_thumbs
class Command(BaseCommand):
help = ("Delete orphaned thumbnails.")
def handle(self, *args, **options):
delete_orphaned_thumbs()
| gpl-3.0 |
mykoz/ThinkStats2 | code/thinkstats2.py | 68 | 68825 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| gpl-3.0 |
paulorauber/nn | examples/rnn.py | 1 | 2389 | import numpy as np
from sklearn.utils import check_random_state
from nn.model.recurrent import RecurrentNetwork
random_state = check_random_state(None)
def nback(n, k, length):
"""Random n-back targets given n, number of digits k and sequence length"""
Xi = random_state.randint(k, size=length)
yi = np.zeros(length, dtype=int)
for t in range(n, length):
yi[t] = (Xi[t - n] == Xi[t])
return Xi, yi
def one_of_k(Xi_, k):
Xi = np.zeros((len(Xi_), k))
for t, Xit in np.ndenumerate(Xi_):
Xi[t, Xit] = 1
return Xi
def nback_dataset(n_sequences, mean_length, std_length, n, k):
X, y = [], []
for _ in range(n_sequences):
length = random_state.normal(loc=mean_length, scale=std_length)
length = int(max(n + 1, length))
Xi_, yi = nback(n, k, length)
Xi = one_of_k(Xi_, k)
X.append(Xi)
y.append(yi)
return X, y
def nback_example():
# Input dimension
k = 4
# n-back
n = 3
n_sequences = 100
mean_length = 20
std_length = 5
# Training
Xtrain, ytrain = nback_dataset(n_sequences, mean_length, std_length, n, k)
rnn = RecurrentNetwork(64, learning_rate=2.0, n_epochs=30,
lmbda=0.0, mu=0.2, output_activation='softmax',
random_state=None, verbose=1)
rnn.fit(Xtrain, ytrain)
# Evaluating
Xtest, ytest = nback_dataset(5*n_sequences, 5*mean_length, 5*std_length, n, k)
print('Average accuracy: {0:.3f}'.format(rnn.score(Xtest, ytest)))
acc_zeros = 0.0
for yi in ytest:
acc_zeros += float((yi == 0).sum()) / len(yi)
acc_zeros /= len(ytest)
print('Negative guess accuracy: {0:.3f}'.format(acc_zeros))
# Example
Xi_ = [3, 2, 1, 3, 2, 1, 3, 2, 2, 1, 2, 3, 1, 2, 0, 0, 2, 0]
print('\nExample sequence: {0}'.format(Xi_))
yi = np.zeros(len(Xi_), dtype=int)
for t in range(n, len(Xi_)):
yi[t] = (Xi_[t - n] == Xi_[t])
Xi = one_of_k(Xi_, k)
yipred = rnn.predict([Xi])[0]
print('Correct: \t{0}'.format(yi))
print('Predicted: \t{0}'.format(yipred))
print('Accuracy: {0:.3f}'.format(float((yi == yipred).sum())/len(yi)))
def main():
nback_example()
if __name__ == "__main__":
main() | mit |
MoamerEncsConcordiaCa/tensorflow | tensorflow/contrib/ndlstm/python/lstm1d_test.py | 94 | 4122 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for 1D LSTM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.ndlstm.python import lstm1d as lstm1d_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
lstm1d = lstm1d_lib
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class Lstm1DTest(test.TestCase):
def testSequenceToSequenceDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 1, 5))
outputs = lstm1d.ndlstm_base(inputs, 8)
variables.global_variables_initializer().run()
names = [v.name for v in variables.trainable_variables()]
self.assertEqual(len(names), 2)
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 1, 8))
def testSequenceToSequenceGradient(self):
with self.test_session():
size = (17, 1, 15)
output_size = (17, 1, 8)
inputs = constant_op.constant(_rand(*size))
outputs = lstm1d.ndlstm_base(inputs, 8, dynamic=False)
variables.global_variables_initializer().run()
gradients = gradients_impl.gradients(outputs, inputs)
if 1: # pylint: disable=using-constant-test
gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
self.assertEqual(gradients.shape, size)
else:
# TODO(tmb) tf.test.compute_gradient error is currently broken
# with dynamic_rnn. Enable this test case eventually.
err = gradient_checker.compute_gradient_error(
inputs, size, outputs, output_size, delta=1e-4)
self.assert_(not np.isnan(err))
self.assert_(err < 0.1)
def testSequenceToSequenceGradientReverse(self):
with self.test_session():
size = (17, 1, 15)
output_size = (17, 1, 8)
inputs = constant_op.constant(_rand(*size))
outputs = lstm1d.ndlstm_base(inputs, 8, reverse=1, dynamic=False)
variables.global_variables_initializer().run()
if 1: # pylint: disable=using-constant-test
gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
self.assertEqual(gradients.shape, size)
else:
# TODO(tmb) tf.test.compute_gradient error is currently broken
# with dynamic_rnn. Enable this test case eventually.
err = gradient_checker.compute_gradient_error(
inputs, size, outputs, output_size, delta=1e-4)
self.assert_(not np.isnan(err))
self.assert_(err < 0.1)
def testSequenceToFinalDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 6, 5))
outputs = lstm1d.sequence_to_final(inputs, 8)
variables.global_variables_initializer().run()
names = [v.name for v in variables.trainable_variables()]
self.assertEqual(len(names), 2)
result = outputs.eval()
self.assertEqual(tuple(result.shape), (6, 8))
def testSequenceSoftmaxDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 1, 5))
outputs = lstm1d.sequence_softmax(inputs, 8)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 1, 8))
if __name__ == "__main__":
test.main()
| apache-2.0 |
cldershem/osf.io | website/mailchimp_utils.py | 16 | 2747 | # -*- coding: utf-8 -*-
import mailchimp
from framework import sentry
from framework.tasks import app
from framework.auth.core import User
from framework.tasks.handlers import queued_task
from framework.auth.signals import user_confirmed
from framework.transactions.context import transaction
from website import settings
def get_mailchimp_api():
if not settings.MAILCHIMP_API_KEY:
raise RuntimeError("An API key is required to connect to Mailchimp.")
return mailchimp.Mailchimp(settings.MAILCHIMP_API_KEY)
def get_list_id_from_name(list_name):
m = get_mailchimp_api()
mailing_list = m.lists.list(filters={'list_name': list_name})
return mailing_list['data'][0]['id']
def get_list_name_from_id(list_id):
m = get_mailchimp_api()
mailing_list = m.lists.list(filters={'list_id': list_id})
return mailing_list['data'][0]['name']
@queued_task
@app.task
@transaction()
def subscribe_mailchimp(list_name, user_id):
user = User.load(user_id)
m = get_mailchimp_api()
list_id = get_list_id_from_name(list_name=list_name)
if user.mailing_lists is None:
user.mailing_lists = {}
try:
m.lists.subscribe(
id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True,
)
except mailchimp.ValidationError as error:
sentry.log_exception()
sentry.log_message(error.message)
user.mailing_lists[list_name] = False
else:
user.mailing_lists[list_name] = True
finally:
user.save()
@queued_task
@app.task
@transaction()
def unsubscribe_mailchimp(list_name, user_id, username=None):
"""Unsubscribe a user from a mailchimp mailing list given its name.
:param str list_name: mailchimp mailing list name
:param str user_id: current user's id
:param str username: current user's email (required for merged users)
:raises: ListNotSubscribed if user not already subscribed
"""
user = User.load(user_id)
m = get_mailchimp_api()
list_id = get_list_id_from_name(list_name=list_name)
m.lists.unsubscribe(id=list_id, email={'email': username or user.username})
# Update mailing_list user field
if user.mailing_lists is None:
user.mailing_lists = {}
user.save()
user.mailing_lists[list_name] = False
user.save()
@user_confirmed.connect
def subscribe_on_confirm(user):
# Subscribe user to general OSF mailing list upon account confirmation
if settings.ENABLE_EMAIL_SUBSCRIPTIONS:
subscribe_mailchimp(settings.MAILCHIMP_GENERAL_LIST, user._id)
| apache-2.0 |
k4cg/Rezeptionistin | plugins/temperature.py | 1 | 1851 | # coding: utf8
import socket
import urllib2
import json
from plugin import Plugin
class Temperature(Plugin):
def __init__(self, config=None):
try:
self.wunderground = config.get('Temperature', 'wunderground')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
print "Temperature was not properly configured in your config.ini"
super(Temperature, self).__init__()
def help_text(self, bot):
return bot.translate("temp_help")
def get_indoor_temp(self, bot):
msg = bot.get_spacestatus_data()
if msg is None:
return None
else:
return str(msg['temperature'])
def get_outdoor_temp(self, bot):
f = urllib2.urlopen(self.wunderground)
json_string = f.read()
parsed_json = json.loads(json_string)
temp_outdoor = parsed_json['current_observation']['temp_c']
f.close()
return temp_outdoor
def on_msg(self, bot, user_nick, host, channel, message):
if message.lower().startswith('!kt'):
temp = self.get_indoor_temp(bot)
temp_outdoor = self.get_outdoor_temp(bot)
if temp is not None:
bot.send_message(channel, bot.translate("temp_str1").format(temp=temp) + " " + bot.translate("temp_str2").format(temp=temp_outdoor), user_nick)
else:
bot.send_message(channel, bot.translate("temp_str3").format(temp=temp_outdoor), user_nick)
def on_privmsg(self, bot, user_nick, host, message):
if message.lower().startswith('!kt'):
temp = self.get_indoor_temp(bot)
temp_outdoor = self.get_outdoor_temp(bot)
if temp is not None:
bot.send_message(user_nick, bot.translate("temp_str1").format(temp=temp) + " " + bot.translate("temp_str2").format(temp=temp_outdoor), user_nick)
else:
bot.send_message(user_nick, bot.translate("temp_str3").format(temp=temp_outdoor), user_nick)
| mit |
blockc/fabric | bddtests/peer/configuration_pb2.py | 17 | 4136 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peer/configuration.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='peer/configuration.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x18peer/configuration.proto\x12\x06protos\"7\n\x0b\x41nchorPeers\x12(\n\x0c\x61nchor_peers\x18\x01 \x03(\x0b\x32\x12.protos.AnchorPeer\"(\n\nAnchorPeer\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x42O\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ANCHORPEERS = _descriptor.Descriptor(
name='AnchorPeers',
full_name='protos.AnchorPeers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='anchor_peers', full_name='protos.AnchorPeers.anchor_peers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=91,
)
_ANCHORPEER = _descriptor.Descriptor(
name='AnchorPeer',
full_name='protos.AnchorPeer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='protos.AnchorPeer.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='port', full_name='protos.AnchorPeer.port', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=133,
)
_ANCHORPEERS.fields_by_name['anchor_peers'].message_type = _ANCHORPEER
DESCRIPTOR.message_types_by_name['AnchorPeers'] = _ANCHORPEERS
DESCRIPTOR.message_types_by_name['AnchorPeer'] = _ANCHORPEER
AnchorPeers = _reflection.GeneratedProtocolMessageType('AnchorPeers', (_message.Message,), dict(
DESCRIPTOR = _ANCHORPEERS,
__module__ = 'peer.configuration_pb2'
# @@protoc_insertion_point(class_scope:protos.AnchorPeers)
))
_sym_db.RegisterMessage(AnchorPeers)
AnchorPeer = _reflection.GeneratedProtocolMessageType('AnchorPeer', (_message.Message,), dict(
DESCRIPTOR = _ANCHORPEER,
__module__ = 'peer.configuration_pb2'
# @@protoc_insertion_point(class_scope:protos.AnchorPeer)
))
_sym_db.RegisterMessage(AnchorPeer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
sbidoul/pip | tests/unit/test_locations.py | 4 | 5324 | """
locations.py tests
"""
import getpass
import os
import shutil
import sys
import tempfile
from unittest.mock import Mock
import pytest
from pip._internal.locations import SCHEME_KEYS, get_scheme
if sys.platform == 'win32':
pwd = Mock()
else:
import pwd
def _get_scheme_dict(*args, **kwargs):
scheme = get_scheme(*args, **kwargs)
return {k: getattr(scheme, k) for k in SCHEME_KEYS}
class TestLocations:
def setup(self):
self.tempdir = tempfile.mkdtemp()
self.st_uid = 9999
self.username = "example"
self.patch()
def teardown(self):
self.revert_patch()
shutil.rmtree(self.tempdir, ignore_errors=True)
def patch(self):
""" first store and then patch python methods pythons """
self.tempfile_gettempdir = tempfile.gettempdir
self.old_os_fstat = os.fstat
if sys.platform != 'win32':
# os.geteuid and pwd.getpwuid are not implemented on windows
self.old_os_geteuid = os.geteuid
self.old_pwd_getpwuid = pwd.getpwuid
self.old_getpass_getuser = getpass.getuser
# now patch
tempfile.gettempdir = lambda: self.tempdir
getpass.getuser = lambda: self.username
os.geteuid = lambda: self.st_uid
os.fstat = lambda fd: self.get_mock_fstat(fd)
if sys.platform != 'win32':
pwd.getpwuid = lambda uid: self.get_mock_getpwuid(uid)
def revert_patch(self):
""" revert the patches to python methods """
tempfile.gettempdir = self.tempfile_gettempdir
getpass.getuser = self.old_getpass_getuser
if sys.platform != 'win32':
# os.geteuid and pwd.getpwuid are not implemented on windows
os.geteuid = self.old_os_geteuid
pwd.getpwuid = self.old_pwd_getpwuid
os.fstat = self.old_os_fstat
def get_mock_fstat(self, fd):
""" returns a basic mock fstat call result.
Currently only the st_uid attribute has been set.
"""
result = Mock()
result.st_uid = self.st_uid
return result
def get_mock_getpwuid(self, uid):
""" returns a basic mock pwd.getpwuid call result.
Currently only the pw_name attribute has been set.
"""
result = Mock()
result.pw_name = self.username
return result
class TestDistutilsScheme:
def test_root_modifies_appropriately(self, monkeypatch):
# This deals with nt/posix path differences
# root is c:\somewhere\else or /somewhere/else
root = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'somewhere', 'else')))
norm_scheme = _get_scheme_dict("example")
root_scheme = _get_scheme_dict("example", root=root)
for key, value in norm_scheme.items():
drive, path = os.path.splitdrive(os.path.abspath(value))
expected = os.path.join(root, path[1:])
assert os.path.abspath(root_scheme[key]) == expected
@pytest.mark.incompatible_with_venv
def test_distutils_config_file_read(self, tmpdir, monkeypatch):
# This deals with nt/posix path differences
install_scripts = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'somewhere', 'else')))
f = tmpdir / "config" / "setup.cfg"
f.parent.mkdir()
f.write_text("[install]\ninstall-scripts=" + install_scripts)
from distutils.dist import Distribution
# patch the function that returns what config files are present
monkeypatch.setattr(
Distribution,
'find_config_files',
lambda self: [f],
)
scheme = _get_scheme_dict('example')
assert scheme['scripts'] == install_scripts
@pytest.mark.incompatible_with_venv
# when we request install-lib, we should install everything (.py &
# .so) into that path; i.e. ensure platlib & purelib are set to
# this path
def test_install_lib_takes_precedence(self, tmpdir, monkeypatch):
# This deals with nt/posix path differences
install_lib = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'somewhere', 'else')))
f = tmpdir / "config" / "setup.cfg"
f.parent.mkdir()
f.write_text("[install]\ninstall-lib=" + install_lib)
from distutils.dist import Distribution
# patch the function that returns what config files are present
monkeypatch.setattr(
Distribution,
'find_config_files',
lambda self: [f],
)
scheme = _get_scheme_dict('example')
assert scheme['platlib'] == install_lib + os.path.sep
assert scheme['purelib'] == install_lib + os.path.sep
def test_prefix_modifies_appropriately(self):
prefix = os.path.abspath(os.path.join('somewhere', 'else'))
normal_scheme = _get_scheme_dict("example")
prefix_scheme = _get_scheme_dict("example", prefix=prefix)
def _calculate_expected(value):
path = os.path.join(prefix, os.path.relpath(value, sys.prefix))
return os.path.normpath(path)
expected = {
k: _calculate_expected(v)
for k, v in normal_scheme.items()
}
assert prefix_scheme == expected
| mit |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/django/contrib/messages/storage/base.py | 399 | 6134 | from django.conf import settings
from django.utils.encoding import force_unicode, StrAndUnicode
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message(StrAndUnicode):
"""
Represents an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepares the message for serialization by forcing the ``message``
and ``extra_tags`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_unicode`` implementation for details).
"""
self.message = force_unicode(self.message, strings_only=True)
self.extra_tags = force_unicode(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __unicode__(self):
return force_unicode(self.message)
def _get_tags(self):
label_tag = force_unicode(LEVEL_TAGS.get(self.level, ''),
strings_only=True)
extra_tags = force_unicode(self.extra_tags, strings_only=True)
if extra_tags and label_tag:
return u' '.join([extra_tags, label_tag])
elif extra_tags:
return extra_tags
elif label_tag:
return label_tag
return ''
tags = property(_get_tags)
class BaseStorage(object):
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Returns a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieves a list of stored messages. Returns a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError()
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages, returning a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError()
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Stores all unread messages.
If the backend has yet to be iterated, previously stored messages will
be stored again. Otherwise, only messages added after the last
iteration will be stored.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Returns the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Sets a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| apache-2.0 |
Drvanon/Game | venv/lib/python3.3/site-packages/werkzeug/serving.py | 309 | 27668 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import time
import signal
import subprocess
try:
import thread
except ImportError:
import _thread as thread
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import iteritems, PY2, reraise, text_type, \
wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError, BadRequest
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown':
shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is bytes, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
from OpenSSL import crypto
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'w') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'w') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from OpenSSL import SSL
cert, pkey = generate_adhoc_ssl_pair()
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def load_ssl_context(cert_file, pkey_file):
"""Loads an SSL context from a certificate and private key file."""
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(cert_file)
ctx.use_privatekey_file(pkey_file)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
def shutdown(self, arg=None):
try:
self._con.shutdown()
except Exception:
pass
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
##try:
## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
## socket.SOCK_STREAM, 0,
## socket.AI_PASSIVE)
## if info:
## return info[0][0]
##except socket.gaierror:
## pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def _iter_module_files():
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _reloader_stat_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
from itertools import chain
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def _reloader_inotify(extra_files=None, interval=None):
# Mutated by inotify loop when changes occur.
changed = [False]
# Setup inotify watches
from pyinotify import WatchManager, Notifier
# this API changed at one point, support both
try:
from pyinotify import EventsCodes as ec
ec.IN_ATTRIB
except (ImportError, AttributeError):
import pyinotify as ec
wm = WatchManager()
mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB
def signal_changed(event):
if changed[0]:
return
_log('info', ' * Detected change in %r, reloading' % event.path)
changed[:] = [True]
for fname in extra_files or ():
wm.add_watch(fname, mask, signal_changed)
# ... And now we wait...
notif = Notifier(wm)
try:
while not changed[0]:
# always reiterate through sys.modules, adding them
for fname in _iter_module_files():
wm.add_watch(fname, mask, signal_changed)
notif.process_events()
if notif.check_events(timeout=interval):
notif.read_events()
# TODO Set timeout to something small and check parent liveliness
finally:
notif.stop()
sys.exit(3)
# currently we always use the stat loop reloader for the simple reason
# that the inotify one does not respond to added files properly. Also
# it's quite buggy and the API is a mess.
reloader_loop = _reloader_stat_loop
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an OpenSSL
context, a tuple in the form ``(cert_file, pkey_file)``,
the string ``'adhoc'`` if the server should
automatically create one, or `None` to disable SSL
(which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
_log('info', ' * Running on %s://%s:%d/', ssl_context is None
and 'http' or 'https', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
| apache-2.0 |
shootstar/novatest | nova/api/openstack/compute/plugins/v3/rescue.py | 4 | 3531 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The rescue mode extension."""
from oslo.config import cfg
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import utils
ALIAS = "os-rescue"
CONF = cfg.CONF
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class RescueController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(RescueController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_id):
try:
return self.compute_api.get(context, instance_id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(msg)
@wsgi.action('rescue')
def _rescue(self, req, id, body):
"""Rescue an instance."""
context = req.environ["nova.context"]
authorize(context)
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
password = utils.generate_password()
instance = self._get_instance(context, id)
try:
self.compute_api.rescue(context, instance,
rescue_password=password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rescue')
except exception.InvalidVolume as volume_error:
raise exc.HTTPConflict(explanation=volume_error.format_message())
except exception.InstanceNotRescuable as non_rescuable:
raise exc.HTTPBadRequest(
explanation=non_rescuable.format_message())
return {'adminPass': password}
@wsgi.action('unrescue')
def _unrescue(self, req, id, body):
"""Unrescue an instance."""
context = req.environ["nova.context"]
authorize(context)
instance = self._get_instance(context, id)
try:
self.compute_api.unrescue(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue')
return webob.Response(status_int=202)
class Rescue(extensions.V3APIExtensionBase):
"""Instance rescue mode."""
name = "Rescue"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/rescue/api/v3"
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = RescueController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/.install/.backup/lib/googlecloudapis/container/v1/container_v1_messages.py | 4 | 19560 | """Generated message classes for container version v1.
The Google Container Engine API is used for building and managing container
based applications, powered by the open source Kubernetes technology.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from protorpc import messages as _messages
from googlecloudapis.apitools.base.py import encoding
package = 'container'
class Cluster(_messages.Message):
"""A Google Container Engine cluster.
Enums:
StatusValueValuesEnum: [Output only] The current status of this cluster.
Fields:
clusterIpv4Cidr: The IP address range of the container pods in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8` or
`172.16.0.0/12`.
createTime: [Output only] The time the cluster was created, in
[RFC3339](href='https://www.ietf.org/rfc/rfc3339.txt) text format.
currentMasterVersion: [Output only] The current software version of the
master endpoint.
currentNodeVersion: [Output only] The current version of the node software
components. If they are currently at different versions because they're
in the process of being upgraded, this reflects the minimum version of
any of them.
description: An optional description of this cluster.
endpoint: [Output only] The IP address of this cluster's Kubernetes master
endpoint. The endpoint can be accessed from the internet at
`https://username:password@endpoint/`. See the `masterAuth` property of
this resource for username and password information.
initialClusterVersion: [Output only] The software version of Kubernetes
master and kubelets used in the cluster when it was first created. The
version can be upgraded over time.
initialNodeCount: The number of nodes to create in this cluster. You must
ensure that your Compute Engine [resource quota](/compute/docs/resource-
quotas) is sufficient for this number of instances. You must also have
available firewall and routes quota.
instanceGroupUrls: [Output only] The resource URLs of [instance
groups](/compute/docs/instance-groups/) associated with this cluster.
loggingService: The logging service that the cluster should write logs to.
Currently available options: * "logging.googleapis.com" - the Google
Cloud Logging service * "none" - no logs will be exported from the
cluster * "" - default value; the default is "logging.googleapis.com"
masterAuth: The authentication information for accessing the master.
monitoringService: The monitoring service that the cluster should write
metrics to. Currently available options: * "monitoring.googleapis.com"
- the Google Cloud Monitoring service * "none" - no metrics will be
exported from the cluster * "" - default value; the default is
"monitoring.googleapis.com"
name: The name of this cluster. The name must be unique within this
project and zone, and can be up to 40 characters with the following
restrictions: * Lowercase letters, numbers, and hyphens only. * Must
start with a letter. * Must end with a number or a letter.
network: The name of the Google Compute Engine
[network](/compute/docs/networking#networks_1) to which the cluster is
connected. If left unspecified, the "default" network will be used.
nodeConfig: Parameters used in creating the cluster's nodes. See the
descriptions of the child properties of `nodeConfig`.
nodeIpv4CidrSize: [Output only] The size of the address space on each node
for hosting containers. This is provisioned from within the
container_ipv4_cidr range.
selfLink: [Output only] Server-defined URL for the resource.
servicesIpv4Cidr: [Output only] The IP address range of the Kubernetes
services in this cluster, in [CIDR](http://en.wikipedia.org/wiki
/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service
addresses are typically put in the last /16 from the container CIDR.
status: [Output only] The current status of this cluster.
statusMessage: [Output only] Additional information about the current
status of this cluster, if available.
zone: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
class StatusValueValuesEnum(_messages.Enum):
"""[Output only] The current status of this cluster.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the cluster is being
created.
RUNNING: The RUNNING state indicates the cluster has been created and is
fully usable.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the cluster, such as upgrading the master or node
software. Details can be found in the statusMessage field.
STOPPING: The STOPPING state indicates the cluster is being deleted.
ERROR: The ERROR state indicates the cluster may be unusable. Details
can be found in the statusMessage field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RECONCILING = 3
STOPPING = 4
ERROR = 5
clusterIpv4Cidr = _messages.StringField(1)
createTime = _messages.StringField(2)
currentMasterVersion = _messages.StringField(3)
currentNodeVersion = _messages.StringField(4)
description = _messages.StringField(5)
endpoint = _messages.StringField(6)
initialClusterVersion = _messages.StringField(7)
initialNodeCount = _messages.IntegerField(8, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(9, repeated=True)
loggingService = _messages.StringField(10)
masterAuth = _messages.MessageField('MasterAuth', 11)
monitoringService = _messages.StringField(12)
name = _messages.StringField(13)
network = _messages.StringField(14)
nodeConfig = _messages.MessageField('NodeConfig', 15)
nodeIpv4CidrSize = _messages.IntegerField(16, variant=_messages.Variant.INT32)
selfLink = _messages.StringField(17)
servicesIpv4Cidr = _messages.StringField(18)
status = _messages.EnumField('StatusValueValuesEnum', 19)
statusMessage = _messages.StringField(20)
zone = _messages.StringField(21)
class ClusterUpdate(_messages.Message):
"""ClusterUpdate describes an update to the cluster.
Fields:
desiredMasterVersion: The Kubernetes version to change the master to
(typically an upgrade). Use "-" to upgrade to the latest version
supported by the server.
desiredNodeVersion: The Kubernetes version to change the nodes to
(typically an upgrade). Use "-" to upgrade to the latest version
supported by the server.
"""
desiredMasterVersion = _messages.StringField(1)
desiredNodeVersion = _messages.StringField(2)
class ContainerProjectsZonesClustersCreateRequest(_messages.Message):
"""A ContainerProjectsZonesClustersCreateRequest object.
Fields:
createClusterRequest: A CreateClusterRequest resource to be passed as the
request body.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
createClusterRequest = _messages.MessageField('CreateClusterRequest', 1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersDeleteRequest(_messages.Message):
"""A ContainerProjectsZonesClustersDeleteRequest object.
Fields:
clusterId: The name of the cluster to delete.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersGetRequest(_messages.Message):
"""A ContainerProjectsZonesClustersGetRequest object.
Fields:
clusterId: The name of the cluster to retrieve.
projectId: The Google Developers Console A [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersListRequest(_messages.Message):
"""A ContainerProjectsZonesClustersListRequest object.
Fields:
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones.
"""
projectId = _messages.StringField(1, required=True)
zone = _messages.StringField(2, required=True)
class ContainerProjectsZonesClustersUpdateRequest(_messages.Message):
"""A ContainerProjectsZonesClustersUpdateRequest object.
Fields:
clusterId: The name of the cluster to upgrade.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
updateClusterRequest: A UpdateClusterRequest resource to be passed as the
request body.
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
updateClusterRequest = _messages.MessageField('UpdateClusterRequest', 3)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesOperationsGetRequest(_messages.Message):
"""A ContainerProjectsZonesOperationsGetRequest object.
Fields:
operationId: The server-assigned `name` of the operation.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
operationId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesOperationsListRequest(_messages.Message):
"""A ContainerProjectsZonesOperationsListRequest object.
Fields:
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or "-"
for all zones.
"""
projectId = _messages.StringField(1, required=True)
zone = _messages.StringField(2, required=True)
class CreateClusterRequest(_messages.Message):
"""CreateClusterRequest creates a cluster.
Fields:
cluster: A [cluster resource](/container-
engine/reference/rest/v1/projects.zones.clusters)
"""
cluster = _messages.MessageField('Cluster', 1)
class ListClustersResponse(_messages.Message):
"""ListClustersResponse is the result of ListClustersRequest.
Fields:
clusters: A list of clusters in the project in the specified zone, or
across all ones.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
class ListOperationsResponse(_messages.Message):
"""ListOperationsResponse is the result of ListOperationsRequest.
Fields:
operations: A list of operations in the project in the specified zone.
"""
operations = _messages.MessageField('Operation', 1, repeated=True)
class MasterAuth(_messages.Message):
"""The authentication information for accessing the master endpoint.
Authentication can be done using HTTP basic auth or using client
certificates.
Fields:
clientCertificate: [Output only] Base64 encoded public certificate used by
clients to authenticate to the cluster endpoint.
clientKey: [Output only] Base64 encoded private key used by clients to
authenticate to the cluster endpoint.
clusterCaCertificate: [Output only] Base64 encoded public certificate that
is the root of trust for the cluster.
password: The password to use for HTTP basic authentication when accessing
the Kubernetes master endpoint. Because the master endpoint is open to
the internet, you should create a strong password.
username: The username to use for HTTP basic authentication when accessing
the Kubernetes master endpoint.
"""
clientCertificate = _messages.StringField(1)
clientKey = _messages.StringField(2)
clusterCaCertificate = _messages.StringField(3)
password = _messages.StringField(4)
username = _messages.StringField(5)
class NodeConfig(_messages.Message):
"""Per-node parameters.
Fields:
diskSizeGb: Size of the disk attached to each node, specified in GB. The
smallest allowed disk size is 10GB, and the default is 100GB.
machineType: The name of a Google Compute Engine [machine
type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If
unspecified, the default machine type is `n1-standard-1`.
oauthScopes: The set of Google API scopes to be made available on all of
the node VMs under the "default" service account. Currently, the
following scopes are necessary to ensure the correct functioning of the
cluster: * "https://www.googleapis.com/auth/compute" *
"https://www.googleapis.com/auth/devstorage.read_only"
"""
diskSizeGb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
machineType = _messages.StringField(2)
oauthScopes = _messages.StringField(3, repeated=True)
class Operation(_messages.Message):
"""Defines the operation resource. All fields are output only.
Enums:
OperationTypeValueValuesEnum: The operation type.
StatusValueValuesEnum: The current status of the operation.
Fields:
name: The server-assigned ID for the operation.
operationType: The operation type.
selfLink: Server-defined URL for the resource.
status: The current status of the operation.
statusMessage: If an error has occurred, a textual description of the
error.
targetLink: Server-defined URL for the target of the operation.
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation is taking
place.
"""
class OperationTypeValueValuesEnum(_messages.Enum):
"""The operation type.
Values:
TYPE_UNSPECIFIED: Not set.
CREATE_CLUSTER: Cluster create.
DELETE_CLUSTER: Cluster delete.
UPGRADE_MASTER: A master upgrade.
UPGRADE_NODES: A node upgrade.
REPAIR_CLUSTER: Cluster repair.
"""
TYPE_UNSPECIFIED = 0
CREATE_CLUSTER = 1
DELETE_CLUSTER = 2
UPGRADE_MASTER = 3
UPGRADE_NODES = 4
REPAIR_CLUSTER = 5
class StatusValueValuesEnum(_messages.Enum):
"""The current status of the operation.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
name = _messages.StringField(1)
operationType = _messages.EnumField('OperationTypeValueValuesEnum', 2)
selfLink = _messages.StringField(3)
status = _messages.EnumField('StatusValueValuesEnum', 4)
statusMessage = _messages.StringField(5)
targetLink = _messages.StringField(6)
zone = _messages.StringField(7)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" or "email:<ldap>" to
include in api requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class UpdateClusterRequest(_messages.Message):
"""UpdateClusterRequest updates a cluster.
Fields:
update: A description of the update.
"""
update = _messages.MessageField('ClusterUpdate', 1)
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'container')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'container')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'container')
| apache-2.0 |
t-wissmann/qutebrowser | qutebrowser/misc/savemanager.py | 1 | 8427 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Saving things to disk periodically."""
import os.path
import collections
import typing
from PyQt5.QtCore import pyqtSlot, QObject, QTimer
from qutebrowser.config import config
from qutebrowser.api import cmdutils
from qutebrowser.utils import utils, log, message, usertypes, error
from qutebrowser.misc import objects
class Saveable:
"""A single thing which can be saved.
Attributes:
_name: The name of the thing to be saved.
_dirty: Whether the saveable was changed since the last save.
_save_handler: The function to call to save this Saveable.
_save_on_exit: Whether to always save this saveable on exit.
_config_opt: A config option which decides whether to auto-save or not.
None if no such option exists.
_filename: The filename of the underlying file.
"""
def __init__(self, name, save_handler, changed=None, config_opt=None,
filename=None):
self._name = name
self._dirty = False
self._save_handler = save_handler
self._config_opt = config_opt
if changed is not None:
changed.connect(self.mark_dirty)
self._save_on_exit = False
else:
self._save_on_exit = True
self._filename = filename
if filename is not None and not os.path.exists(filename):
self._dirty = True
self.save()
def __repr__(self):
return utils.get_repr(self, name=self._name, dirty=self._dirty,
save_handler=self._save_handler,
config_opt=self._config_opt,
save_on_exit=self._save_on_exit,
filename=self._filename)
def mark_dirty(self):
"""Mark this saveable as dirty (having changes)."""
log.save.debug("Marking {} as dirty.".format(self._name))
self._dirty = True
def save(self, is_exit=False, explicit=False, silent=False, force=False):
"""Save this saveable.
Args:
is_exit: Whether we're currently exiting qutebrowser.
explicit: Whether the user explicitly requested this save.
silent: Don't write information to log.
force: Force saving, no matter what.
"""
if (self._config_opt is not None and
(not config.instance.get(self._config_opt)) and
(not explicit) and (not force)):
if not silent:
log.save.debug("Not saving {name} because autosaving has been "
"disabled by {cfg[0]} -> {cfg[1]}.".format(
name=self._name, cfg=self._config_opt))
return
do_save = self._dirty or (self._save_on_exit and is_exit) or force
if not silent:
log.save.debug("Save of {} requested - dirty {}, save_on_exit {}, "
"is_exit {}, force {} -> {}".format(
self._name, self._dirty, self._save_on_exit,
is_exit, force, do_save))
if do_save:
self._save_handler()
self._dirty = False
class SaveManager(QObject):
"""Responsible to save 'saveables' periodically and on exit.
Attributes:
saveables: A dict mapping names to Saveable instances.
_save_timer: The Timer used to periodically auto-save things.
"""
def __init__(self, parent=None):
super().__init__(parent)
self.saveables = collections.OrderedDict(
) # type: typing.MutableMapping[str, Saveable]
self._save_timer = usertypes.Timer(self, name='save-timer')
self._save_timer.timeout.connect(self.autosave)
self._set_autosave_interval()
config.instance.changed.connect(self._set_autosave_interval)
def __repr__(self):
return utils.get_repr(self, saveables=self.saveables)
@config.change_filter('auto_save.interval')
def _set_autosave_interval(self):
"""Set the auto-save interval."""
interval = config.val.auto_save.interval
if interval == 0:
self._save_timer.stop()
else:
self._save_timer.setInterval(interval)
self._save_timer.start()
def add_saveable(self, name, save, changed=None, config_opt=None,
filename=None, dirty=False):
"""Add a new saveable.
Args:
name: The name to use.
save: The function to call to save this saveable.
changed: The signal emitted when this saveable changed.
config_opt: An option deciding whether to auto-save or not.
filename: The filename of the underlying file, so we can force
saving if it doesn't exist.
dirty: Whether the saveable is already dirty.
"""
if name in self.saveables:
raise ValueError("Saveable {} already registered!".format(name))
saveable = Saveable(name, save, changed, config_opt, filename)
self.saveables[name] = saveable
if dirty:
saveable.mark_dirty()
QTimer.singleShot(0, saveable.save)
def save(self, name, is_exit=False, explicit=False, silent=False,
force=False):
"""Save a saveable by name.
Args:
is_exit: Whether we're currently exiting qutebrowser.
explicit: Whether this save operation was triggered explicitly.
silent: Don't write information to log. Used to reduce log spam
when autosaving.
force: Force saving, no matter what.
"""
self.saveables[name].save(is_exit=is_exit, explicit=explicit,
silent=silent, force=force)
def save_all(self, *args, **kwargs):
"""Save all saveables."""
for saveable in self.saveables:
self.save(saveable, *args, **kwargs)
@pyqtSlot()
def autosave(self):
"""Slot used when the configs are auto-saved."""
for (key, saveable) in self.saveables.items():
try:
saveable.save(silent=True)
except OSError as e:
message.error("Failed to auto-save {}: {}".format(key, e))
@cmdutils.register(instance='save-manager', name='save',
star_args_optional=True)
def save_command(self, *what):
"""Save configs and state.
Args:
*what: What to save (`config`/`key-config`/`cookies`/...).
If not given, everything is saved.
"""
if what:
explicit = True
else:
what = tuple(self.saveables)
explicit = False
for key in what:
if key not in self.saveables:
message.error("{} is nothing which can be saved".format(key))
else:
try:
self.save(key, explicit=explicit, force=True)
except OSError as e:
message.error("Could not save {}: {}".format(key, e))
log.save.debug(":save saved {}".format(', '.join(what)))
@pyqtSlot()
def shutdown(self):
"""Save all saveables when shutting down."""
for key in self.saveables:
try:
self.save(key, is_exit=True)
except OSError as e:
error.handle_fatal_exc(
e, "Error while saving!",
pre_text="Error while saving {}".format(key),
no_err_windows=objects.args.no_err_windows)
| gpl-3.0 |
les69/calvin-base | calvin/actorstore/systemactors/net/UDPListener.py | 2 | 3328 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class UDPListener(Actor):
"""
Listen for UDP messages on a given port.
Control port takes control commands of the form (uri only applicable for connect.)
{
"command" : "connect"/"disconnect",
"uri": "udp://<ipv4 address>:<port>"
}
Input:
control_in : JSON containing host & port to listen to.
Output:
data_out : Data received on the UDP port will be sent as tokens.
"""
@manage(['host', 'port'])
def init(self):
self.host = None
self.port = None
self.listener = None
self.setup()
def listen(self):
self.listener = self['server'].start(self.host, self.port, "udp")
def did_migrate(self):
self.setup()
if self.port is not None:
self.listen()
def setup(self):
self.use('calvinsys.network.serverhandler', shorthand='server')
self.use('calvinsys.native.python-re', shorthand='regexp')
@condition(action_output=['data_out'])
@guard(lambda self: self.listener and self.listener.have_data())
def receive(self):
data = self.listener.data_get()
return ActionResult(production=(data,))
# URI parsing - 0: protocol, 1: host, 2: port
URI_REGEXP = r'([^:]+)://([^/:]*):([0-9]+)'
def parse_uri(self, uri):
status = False
try:
parsed_uri = self['regexp'].findall(self.URI_REGEXP, uri)[0]
protocol = parsed_uri[0]
if protocol != 'udp':
_log.warn("Protocol '%s' not supported, assuming udp" % (protocol,))
self.host = parsed_uri[1]
self.port = int(parsed_uri[2])
status = True
except:
_log.warn("malformed or erroneous control uri '%s'" % (uri,))
self.host = None
self.port = None
return status
@condition(action_input=['control_in'])
@guard(lambda self, control: control.get('command', '') == 'listen' and not self.listener)
def new_port(self, control):
if self.parse_uri(control.get('uri', '')):
self.listen()
return ActionResult()
@condition(action_input=['control_in'])
@guard(lambda self, control: control.get('command', '') == 'stop' and self.listener)
def close_port(self, control):
self.listener.stop()
del self.listener
self.listener = None
return ActionResult(production=())
action_priority = (new_port, close_port, receive)
requires = ['calvinsys.network.serverhandler', 'calvinsys.native.python-re']
| apache-2.0 |
guozhangwang/kafka | tests/kafkatest/services/log_compaction_tester.py | 3 | 4139 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin, CORE_LIBS_JAR_NAME, CORE_DEPENDANT_TEST_LIBS_JAR_NAME
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH
class LogCompactionTester(KafkaPathResolverMixin, BackgroundThreadService):
OUTPUT_DIR = "/mnt/logcompaction_tester"
LOG_PATH = os.path.join(OUTPUT_DIR, "logcompaction_tester_stdout.log")
VERIFICATION_STRING = "Data verification is completed"
logs = {
"tool_logs": {
"path": LOG_PATH,
"collect_default": True}
}
def __init__(self, context, kafka, security_protocol="PLAINTEXT", stop_timeout_sec=30, tls_version=None):
super(LogCompactionTester, self).__init__(context, 1)
self.kafka = kafka
self.security_protocol = security_protocol
self.tls_version = tls_version
self.security_config = SecurityConfig(self.context, security_protocol, tls_version=tls_version)
self.stop_timeout_sec = stop_timeout_sec
self.log_compaction_completed = False
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % LogCompactionTester.OUTPUT_DIR)
cmd = self.start_cmd(node)
self.logger.info("LogCompactionTester %d command: %s" % (idx, cmd))
self.security_config.setup_node(node)
for line in node.account.ssh_capture(cmd):
self.logger.debug("Checking line:{}".format(line))
if line.startswith(LogCompactionTester.VERIFICATION_STRING):
self.log_compaction_completed = True
def start_cmd(self, node):
core_libs_jar = self.path.jar(CORE_LIBS_JAR_NAME, DEV_BRANCH)
core_dependant_test_libs_jar = self.path.jar(CORE_DEPENDANT_TEST_LIBS_JAR_NAME, DEV_BRANCH)
cmd = "for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_libs_jar
cmd += " for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_dependant_test_libs_jar
cmd += " export CLASSPATH;"
cmd += self.path.script("kafka-run-class.sh", node)
cmd += " %s" % self.java_class_name()
cmd += " --bootstrap-server %s --messages 1000000 --sleep 20 --duplicates 10 --percent-deletes 10" % (self.kafka.bootstrap_servers(self.security_protocol))
cmd += " 2>> %s | tee -a %s &" % (self.logs["tool_logs"]["path"], self.logs["tool_logs"]["path"])
return cmd
def stop_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=True,
allow_fail=True)
stopped = self.wait_node(node, timeout_sec=self.stop_timeout_sec)
assert stopped, "Node %s: did not stop within the specified timeout of %s seconds" % \
(str(node.account), str(self.stop_timeout_sec))
def clean_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=False,
allow_fail=True)
node.account.ssh("rm -rf %s" % LogCompactionTester.OUTPUT_DIR, allow_fail=False)
def java_class_name(self):
return "kafka.tools.LogCompactionTester"
@property
def is_done(self):
return self.log_compaction_completed
| apache-2.0 |
agopalak/football_pred | pre_proc/proc_data.py | 1 | 4667 |
import sys
import yaml
import re
import datetime as DT
import logging
from rainbow_logging_handler import RainbowLoggingHandler
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn_pandas import DataFrameMapper
# Capturing current module. Needed to call getattr on this module
this_module = sys.modules[__name__]
# Setup logging module
# TODO: Figure out a standard way to install/handle logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(filename)s:%(lineno)4s - %(funcName)15s()] %(levelname)8s: %(message)s')
# Setup RainbowLoggingHandler
handler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'yellow', True))
handler.setFormatter(formatter)
logger.addHandler(handler)
# Converting Boolean to String during YAML load
# Done to workaround quirkness with PyYAML
def bool_constructor(self, node):
value = self.construct_yaml_bool(node)
if value == False:
return 'False'
else:
return 'True'
yaml.Loader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)
# Load data from CSV, configuration file
# Process data and provide input/output data frames
def load_data(data_csv, data_cfg):
# Load Data YAML configuration file
with open(data_cfg, 'r') as yf:
data = yaml.load(yf)
# Read CSV into data frame
df = pd.read_csv(data_csv)
# Filling holes with zeros
df.fillna(0, inplace=True)
# Process Columns
for item in data:
if item['include'] == False:
continue
else:
colnum = item['column']
logger.info('Processing Column %s', colnum)
# Create a column data frame
col_df = df.iloc[:, [colnum-1]].copy()
logger.debug(col_df.columns)
logger.debug('Preprocess Column Input\n%s', col_df.head())
# Apply transformations
col_df = do_transform(col_df, item['transform'])
logger.debug('Preprocess Column Output\n%s', col_df.head())
# Perform Data Transformations
def do_transform(df, tf):
for func in tf:
funckey, funcval = func.items()[0]
# Getting transformation call name
transform = getattr(this_module, funckey, None)
# Splitting funcval to individual function arguments
# First argument is True/False to indicate if transform is called
try:
pattern = re.compile('\s*,\s*')
funcvals = pattern.split(funcval)
logger.debug('Funcvals --> %s', funcvals)
except AttributeError:
funcvals = [funcval]
# Calling transformation
if funcvals[0] == 'True':
try:
logger.debug('Funckey --> %s', funckey)
df = transform(df, funcvals[1:])
except AttributeError:
logger.error('Function %s has not been implemented!', funckey)
return df
# Performs feature scaling on data frame
# TODO: scale - Add implementation to handle val
def scale(df, val):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
mms = preprocessing.MinMaxScaler()
return pd.DataFrame(mms.fit_transform(df.values.ravel().reshape(-1, 1)), columns=df.columns)
# conv2num: Converts column data to ordered integers
# TODO: conv2num - Add implementation to handle args
def conv2num(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
le = preprocessing.LabelEncoder()
return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)
# conv2bin: Converts column data to binary
# TODO: conv2bin - Add implementation to handle args
def conv2bin(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
le = preprocessing.LabelBinarizer()
return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)
# conv2timedelta: Converts column data to age
# TODO: conv2timedelta - Current returns in years. May need to make it more scalable
def conv2timedelta(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
if args[1] == 'now':
refdate = pd.Timestamp(DT.datetime.now())
else:
refdate = pd.Timestamp(DT.datetime.strptime(args[1], args[0]))
logger.debug('Reference date is: %s', refdate)
df = pd.DataFrame((refdate - pd.to_datetime(df.values.ravel())), columns=df.columns)
return df.apply(lambda x: (x/np.timedelta64(1, 'Y')).astype(int))
# Main Program
if __name__ == '__main__':
load_data('nflData.csv', 'datacfg.yaml')
| mit |
Kongsea/tensorflow | tensorflow/python/eager/graph_only_ops.py | 69 | 2363 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph-only versions of a few op functions, for internal use only."""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def graph_zeros_like(tensor):
"""Graph-only version of tf.zeros_like(), for internal use only."""
g = ops._get_graph_from_inputs([tensor]) # pylint: disable=protected-access
with g.as_default(), ops.name_scope(None, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
dtype = tensor.dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
op = g.create_op("ZerosLike", [tensor], [dtype], input_types=[dtype],
attrs={"T": dtype_value}, name=name)
result, = op.outputs
return result
def graph_placeholder(dtype, shape, name=None):
"""Graph-only version of tf.placeholder(), for internal use only."""
dtype = dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
if isinstance(shape, (list, tuple)):
shape = tensor_shape.TensorShape(shape)
assert isinstance(shape, tensor_shape.TensorShape)
shape = attr_value_pb2.AttrValue(shape=shape.as_proto())
g = ops.get_default_graph()
with ops.name_scope(name, "placeholder", []) as name:
op = g.create_op("Placeholder", [], [dtype], input_types=[],
attrs={"dtype": dtype_value, "shape": shape}, name=name)
result, = op.outputs
return result
| apache-2.0 |
petrutlucian94/nova | nova/tests/unit/virt/xenapi/test_volumeops.py | 65 | 24052 | # Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VolumeOpsTestBase, self).setUp()
self._setup_mock_volumeops()
def _setup_mock_volumeops(self):
self.session = stubs.FakeSessionForVolumeTests('fake_uri')
self.ops = volumeops.VolumeOps(self.session)
class VolumeDetachTestCase(VolumeOpsTestBase):
def test_detach_volume_call(self):
registered_calls = []
def regcall(label):
def side_effect(*args, **kwargs):
registered_calls.append(label)
return side_effect
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
volumeops.volume_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
regcall('destroy_vbd'))
volumeops.volume_utils.find_sr_from_vbd(
'session', 'vbdref').WithSideEffects(
regcall('find_sr_from_vbd')).AndReturn('srref')
volumeops.volume_utils.purge_sr('session', 'srref')
self.mox.ReplayAll()
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
self.assertEqual(
['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = "vbd_ref"
self.ops.detach_volume({}, "name", "/dev/xvdd")
mock_vm.assert_called_once_with(self.session, "name")
mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = None
self.ops.detach_volume({}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_raises(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops.detach_volume, {}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = False
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
mock_shutdown.assert_called_once_with(self.session, "vm_ref")
mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
mock_destroy.assert_called_once_with(self.session, "vbd_ref")
mock_purge.assert_called_once_with(self.session, "sr_ref")
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = True
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
expected = [mock.call(self.session, "vbd_ref_1"),
mock.call(self.session, "vbd_ref_2")]
self.assertEqual(expected, mock_destroy.call_args_list)
mock_purge.assert_called_with(self.session, "sr_ref")
self.assertFalse(mock_unplug.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = []
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = ["1"]
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_detach.assert_called_once_with("vm_ref", ["1"])
def test_get_all_volume_vbd_refs_no_vbds(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = []
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
self.assertFalse(mock_conf.called)
def test_get_all_volume_vbd_refs_no_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1"]
mock_conf.return_value = {}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
mock_conf.assert_called_once_with("1")
def test_get_all_volume_vbd_refs_with_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1", "2"]
mock_conf.return_value = {"osvol": True}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual(["1", "2"], list(result))
mock_get.assert_called_once_with("vm_ref")
class AttachVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda")
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
True)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
False)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
self.ops.connect_volume({})
mock_attach.assert_called_once_with({})
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
self.assertFalse(mock_attach.called)
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.return_value = "vdi_ref"
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info, "vm_ref",
"name", 2, True)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, "name")
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
True)
@mock.patch.object(volume_utils, "forget_sr")
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver, mock_forget):
connection_info = {"data": {}}
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops._attach_volume, connection_info)
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_forget.assert_called_once_with(self.session, "sr_ref")
self.assertFalse(mock_attach.called)
def test_check_is_supported_driver_type_pass_iscsi(self):
conn_info = {"driver_volume_type": "iscsi"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_xensm(self):
conn_info = {"driver_volume_type": "xensm"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_bad(self):
conn_info = {"driver_volume_type": "bad"}
self.assertRaises(exception.VolumeDriverNotFound,
self.ops._check_is_supported_driver_type, conn_info)
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = None
mock_introduce_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
mock_introduce_sr.assert_called_once_with(self.session, "uuid",
"label", "params")
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
self.assertFalse(mock_introduce_sr.called)
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_regular(self, mock_intro):
mock_intro.return_value = "vdi"
result = self.ops._connect_hypervisor_to_volume("sr", {})
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"vdi_uuid": "id"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
vdi_uuid="id")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_lun(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"target_lun": "lun"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
target_lun="lun")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = False
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
mock_plug.assert_called_once_with("vbd", "vm")
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = True
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
self.assertFalse(mock_shutdown.called)
class FindBadVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_vbds(self, mock_get_all):
mock_get_all.return_value = []
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["1", "2"]
mock_find_sr.return_value = "sr_ref"
with mock.patch.object(self.session.SR, "scan") as mock_scan:
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
expected_find = [mock.call(self.session, "1"),
mock.call(self.session, "2")]
self.assertEqual(expected_find, mock_find_sr.call_args_list)
expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
self.assertEqual(expected_scan, mock_scan.call_args_list)
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_scan.assert_called_once_with("sr_ref")
mock_get.assert_called_once_with("vbd_ref")
self.assertEqual(["/dev/xvdb"], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['foo', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
self.assertRaises(FakeException,
self.ops.find_bad_volumes, "vm_ref")
mock_scan.assert_called_once_with("sr_ref")
class CleanupFromVDIsTestCase(VolumeOpsTestBase):
def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs):
find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
in vdi_refs]
find_sr_from_vdi.assert_has_calls(find_sr_calls)
purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
in sr_refs]
purge_sr.assert_has_calls(purge_sr_calls)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi',
side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref2']
find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
sr_refs[0]]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
purge_sr.side_effects = [test.TestingException, None]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
| apache-2.0 |
amir-qayyum-khan/edx-platform | common/lib/xmodule/xmodule/assetstore/assetmgr.py | 148 | 2171 | """
Asset Manager
Interface allowing course asset saving/retrieving.
Handles:
- saving asset in the BlobStore -and- saving asset metadata in course modulestore.
- retrieving asset metadata from course modulestore -and- returning URL to asset -or- asset bytes.
Phase 1: Checks to see if an asset's metadata can be found in the course's modulestore.
If not found, fails over to access the asset from the contentstore.
At first, the asset metadata will never be found, since saving isn't implemented yet.
Note: Hotfix (PLAT-734) No asset calls find_asset_metadata, and directly accesses from contentstore.
"""
from contracts import contract, new_contract
from opaque_keys.edx.keys import AssetKey
from xmodule.contentstore.django import contentstore
new_contract('AssetKey', AssetKey)
class AssetException(Exception):
"""
Base exception class for all exceptions related to assets.
"""
pass
class AssetMetadataNotFound(AssetException):
"""
Thrown when no asset metadata is present in the course modulestore for the particular asset requested.
"""
pass
class AssetMetadataFoundTemporary(AssetException):
"""
TEMPORARY: Thrown if asset metadata is actually found in the course modulestore.
"""
pass
class AssetManager(object):
"""
Manager for saving/loading course assets.
"""
@staticmethod
@contract(asset_key='AssetKey', throw_on_not_found='bool', as_stream='bool')
def find(asset_key, throw_on_not_found=True, as_stream=False):
"""
Finds course asset in the deprecated contentstore.
This method was previously searching for the course asset in the assetstore first, then in the deprecated
contentstore. However, the asset was never found in the assetstore since an asset's metadata is
not yet stored there.(removed calls to modulestore().find_asset_metadata(asset_key))
The assetstore search was removed due to performance issues caused by each call unpickling the pickled and
compressed course structure from the structure cache.
"""
return contentstore().find(asset_key, throw_on_not_found, as_stream)
| agpl-3.0 |
terhorst/psmcpp | test/unit/scrm_sim.py | 2 | 4001 | #!/usr/bin/env python2.7
from __future__ import print_function, division
from subprocess import check_output
from math import log
import random
import numpy as np
import itertools
import os
import json
from collections import Counter, namedtuple, defaultdict
from phylogenies import leaves, newick2tree, parent_of
from sum_product import NodeState
SCRM_PATH = os.environ['SCRM_PATH']
def build_command_line(root, L, lineages_per_taxon):
'''Given a tree, build a scrm command line which will simulate from it.'''
ejopts = []
Iopts = []
enopts = []
N0 = root.N
# tfac = 1.0 / 4.0 / N0 / 25.0
# Times are already scaled in Jack's implementation
tfac = 1.0
# rho = 1e-9 * 4 * N0 * (L - 1)
theta = 10.
lineages = []
lineage_map = {}
for i, leaf_node in list(enumerate(leaves(root), 1)):
nsamp = lineages_per_taxon
Iopts.append(nsamp)
lineage_map[leaf_node] = i
lineages += [leaf_node.leaf_name] * nsamp
age = leaf_node.edge_length * tfac
enopts.append((age, i, leaf_node.N / N0))
p = parent_of(root, leaf_node)
while True:
if p not in lineage_map:
lineage_map[p] = i
if p.edge_length == float("inf"):
break
age += p.edge_length * tfac
old_p = p
p = parent_of(root, p)
enopts.append((age, i, p.N / N0))
else:
# We have a join-on time
ejopts.append((age, i, lineage_map[p]))
break
cmdline = ["-I %d %s" % (len(Iopts), " ".join(map(str, Iopts)))]
for ej in ejopts:
cmdline.append("-ej %g %d %d" % ej)
for en in enopts:
cmdline.append("-en %g %d %g" % en)
cmdline = ["%s %d 1 -t %g" % (SCRM_PATH, sum(Iopts), theta)] + cmdline
print(cmdline)
return lineages, " ".join(cmdline)
def run_simulation(tree, L, lineages_per_taxon):
lineages, cmd = build_command_line(tree, L, lineages_per_taxon)
species = list(set(lineages))
n_lineages = Counter(lineages)
N0 = tree.N
print(cmd)
output = [l.strip() for l in check_output(cmd, shell=True).split("\n")]
def f(x):
if x == "//":
f.i += 1
return f.i
f.i = 0
for k, lines in itertools.groupby(output, f):
if k == 0:
continue
# Skip preamble
next(lines)
# segsites
segsites = int(next(lines).split(" ")[1])
# positions
next(lines)
# at haplotypes
lin_counts = defaultdict(lambda: np.zeros(segsites, dtype=int))
for hap, lin in zip(lines, lineages):
hap = list(map(int, hap))
lin_counts[lin] += hap
return [{lin: NodeState(n_derived=lin_counts[lin][i],
n_ancestral=n_lineages[lin] - lin_counts[lin][i])
for lin in lineages}
for i in range(segsites)]
def build_splits(lineages, seqs_path, outgroup):
splits = Counter()
with open(seqs_path, "rt") as f:
next(f)
seqdata = [(lineages[int(spec) - 1], seq) for spec, seq in
(line.strip().split() for line in f)]
specs = [s[0] for s in seqdata]
c = Counter()
nt = namedtuple("Spectrum", sorted({ell for ell in lineages if ell != outgroup}))
for col in zip(*[s[1] for s in seqdata]):
# This is not a true dict (potentially multiple of same key) but there should
# be only one outgroup lineage
dbase = dict(zip(specs, col))
abase = dbase[outgroup]
d = {}
for spec, base in zip(specs, col):
d.setdefault(spec, [0, 0])[int(base != abase)] += 1
d = {k: tuple(v) for k, v in d.items() if k != outgroup}
if not any(all(d[k][i] == 0 for k in specs if k != outgroup) for i in [0]):
c[nt(**d)] += 1
return c
if __name__ == "__main__":
test_scrm_sim(mktree(10.0), "outgroup")
| gpl-3.0 |
level12/blazeweb | tests/apps/blazewebtestapp/components/tests/views.py | 2 | 5636 | from blazeweb.globals import rg
from blazeweb.content import getcontent
from blazeweb.utils import redirect
from blazeweb.views import View, forward, jsonify
from werkzeug.exceptions import ServiceUnavailable
from formencode.validators import UnicodeString, Int
class Rvb(View):
def default(self):
# this view is used as a error doc handler, so we need to set the
# status code appropriately
if rg.respctx.error_doc_code:
self.status_code = rg.respctx.error_doc_code
self.retval = 'Hello World!'
class RvbWithSnippet(View):
def default(self):
self.retval = getcontent('tests:HwSnippet').primary
class Get(View):
def get(self):
self.retval = 'Hello World!'
class Post(View):
def post(self):
return 'Hello World!'
class Prep(View):
def init(self):
self.retval = 'Hello World!'
def default(self):
pass
class NoActionMethod(View):
def init(self):
self.retval = 'Hello World!'
class DoForward(View):
def default(self):
forward('tests:ForwardTo')
class ForwardTo(View):
def default(self):
return 'forward to me'
class RaiseExc(View):
def default(self):
raise ValueError('exception for testing')
class Text(View):
def default(self):
self.render_template(default_ext='txt')
class TextWithSnippet(View):
def default(self):
self.assign('output', getcontent('tests:text_snippet.txt'))
self.render_template(default_ext='txt')
class TextWithSnippet2(View):
def default(self):
self.render_template(default_ext='txt')
class Html(View):
def default(self):
self.render_template()
class Redirect(View):
def default(self):
redirect('/some/other/page')
class PermRedirect(View):
def default(self):
redirect('/some/other/page', permanent=True)
class CustRedirect(View):
def default(self):
redirect('/some/other/page', code=303)
class HttpExceptionRaise(View):
def default(self):
raise ServiceUnavailable()
class ForwardLoop(View):
def default(self):
forward('tests:ForwardLoop')
class UrlArguments(View):
def default(self, towho='World', anum=None):
if anum is None:
return 'Hello %s!' % towho
else:
return 'Give me a name!'
class GetArguments(View):
def init(self):
self.add_processor('towho', UnicodeString())
def default(self, greeting='Hello', towho='World', anum=None):
if anum is None:
return '%s %s!' % (greeting, towho)
else:
return 'Give me a name!'
class GetArguments2(View):
def init(self):
self.add_processor('towho', UnicodeString())
self.add_processor('num', Int())
def default(self, towho='World', num=None):
if num:
return 'Hello %s, %d!' % (towho, num)
else:
return 'Hello %s!' % towho
class GetArguments3(View):
def init(self):
self.add_processor('towho', UnicodeString())
self.add_processor('num', Int(), show_msg=True)
self.add_processor('num2', Int(), custom_msg='num: must be an integer')
self.strict_args = True
def default(self, towho='World', num=None, num2=None):
if num:
return 'Hello %s, %d!' % (towho, num)
else:
return 'Hello %s!' % towho
class RequiredGetArguments(View):
def init(self):
self.add_processor('towho', UnicodeString(), show_msg=True)
self.add_processor('num', Int, required=True, show_msg=True)
self.add_processor('num2', Int, strict=True, show_msg=True)
self.add_processor('num3', Int, show_msg=True)
def default(self, towho='World', num=None, num2=10, num3=10):
if num:
return 'Hello %s, %d %d %d!' % (towho, num, num2, num3)
class ListGetArguments(View):
def init(self):
self.add_processor('nums', Int(), show_msg=True, takes_list=True)
def default(self, nums=[]):
return str(nums)
class CustomValidator(View):
def init(self):
self.add_processor('num', self.validate_num)
def default(self, num=10):
return str(num)
def validate_num(self, value):
return int(value)
class BadValidator(View):
def init(self):
self.add_processor('num', 'notavalidator')
def default(self, num=10):
return num
class HtmlTemplateFileArg(View):
def default(self):
self.render_template('filearg.html')
class TemplateInheritance(View):
def default(self):
self.render_template()
class ParentTemplate(View):
def default(self):
self.render_template()
class ParentTemplateInheritance(View):
def default(self):
self.render_template()
class ModLevelPriority(View):
def default(self):
self.render_template()
class HtmlSnippetWithCss(View):
def default(self):
self.render_template()
class HtmlSnippetWithCssParent(View):
def default(self):
self.retval = getview('tests:HtmlSnippetWithCss') # noqa
self.render_template()
class UserMessages(View):
def default(self):
if rg.respctx.error_doc_code:
self.status_code = rg.respctx.error_doc_code
self.render_template()
class TemplateChooser(View):
def default(self, rtype):
if rtype == 'endpoint':
self.render_endpoint('app_level.html')
if rtype == 'content':
self.render_endpoint('tests:HwSnippet')
class JsonifyException(View):
@jsonify
def default(self):
foo # noqa
| bsd-3-clause |
daviddoria/PointGraphsPhase1 | Utilities/vtkTclTest2Py/mccases.py | 10 | 1065 | """This is python equivalent of Wrapping/Tcl/vtktesting/mccases.tcl.
Used for setting vertex values for clipping, cutting, and contouring tests.
This script is used while running python tests translated from Tcl."""
def case1 ( scalars, IN, OUT, caseLabel ):
scalars.InsertValue(0,IN )
scalars.InsertValue(1,OUT)
scalars.InsertValue(2,OUT)
scalars.InsertValue(3,OUT)
scalars.InsertValue(4,OUT)
scalars.InsertValue(5,OUT)
scalars.InsertValue(6,OUT)
scalars.InsertValue(7,OUT)
if IN == 1:
caseLabel.SetText("Case 1 - 00000001")
else :
caseLabel.SetText("Case 1c - 11111110")
pass
def case2 ( scalars, IN, OUT, caseLabel ):
scalars.InsertValue(0,IN)
scalars.InsertValue(1,IN)
scalars.InsertValue(2,OUT)
scalars.InsertValue(3,OUT)
scalars.InsertValue(4,OUT)
scalars.InsertValue(5,OUT)
scalars.InsertValue(6,OUT)
scalars.InsertValue(7,OUT)
if IN == 1:
caseLabel.SetText("Case 2 - 00000011")
else:
caseLabel.SetText("Case 2c - 11111100")
pass
| bsd-3-clause |
nicklhy/mxnet | tests/nightly/dist_lenet.py | 52 | 1839 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# distributed lenet
import os, sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../../example/image-classification"))
sys.path.append(os.path.join(curr_path, "../../python"))
import mxnet as mx
import argparse
import train_mnist
import logging
if __name__ == '__main__':
args = train_mnist.parse_args()
args.batch_size = 100
data_shape = (1, 28, 28)
loader = train_mnist.get_iterator(data_shape)
kv = mx.kvstore.create(args.kv_store)
(train, val) = loader(args, kv)
net = train_mnist.get_lenet()
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
mx.model.FeedForward.create(
ctx = mx.gpu(kv.rank),
kvstore = kv,
symbol = net,
X = train,
eval_data = val,
num_epoch = args.num_epochs,
learning_rate = args.lr,
momentum = 0.9,
wd = 0.00001)
| apache-2.0 |
jamiefolsom/edx-platform | lms/djangoapps/teams/management/commands/reindex_course_team.py | 34 | 2408 | """ Management command to update course_teams' search index. """
from django.core.management import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from optparse import make_option
from textwrap import dedent
from teams.models import CourseTeam
class Command(BaseCommand):
"""
Command to reindex course_teams (single, multiple or all available).
Examples:
./manage.py reindex_course_team team1 team2 - reindexes course teams with team_ids team1 and team2
./manage.py reindex_course_team --all - reindexes all available course teams
"""
help = dedent(__doc__)
can_import_settings = True
args = "<course_team_id course_team_id ...>"
option_list = BaseCommand.option_list + (
make_option(
'--all',
action='store_true',
dest='all',
default=False,
help='Reindex all course teams'
),
)
def _get_course_team(self, team_id):
""" Returns course_team object from team_id. """
try:
result = CourseTeam.objects.get(team_id=team_id)
except ObjectDoesNotExist:
raise CommandError(u"Argument {0} is not a course_team team_id".format(team_id))
return result
def handle(self, *args, **options):
"""
By convention set by django developers, this method actually executes command's actions.
So, there could be no better docstring than emphasize this once again.
"""
# This is ugly, but there is a really strange circular dependency that doesn't
# happen anywhere else that I can't figure out how to avoid it :(
from teams.search_indexes import CourseTeamIndexer
if len(args) == 0 and not options.get('all', False):
raise CommandError(u"reindex_course_team requires one or more arguments: <course_team_id>")
elif not settings.FEATURES.get('ENABLE_TEAMS', False):
raise CommandError(u"ENABLE_TEAMS must be enabled to use course team indexing")
if options.get('all', False):
course_teams = CourseTeam.objects.all()
else:
course_teams = map(self._get_course_team, args)
for course_team in course_teams:
print "Indexing {id}".format(id=course_team.team_id)
CourseTeamIndexer.index(course_team)
| agpl-3.0 |
Ali-aqrabawi/ezclinic | lib/django/contrib/auth/management/commands/changepassword.py | 65 | 2685 | from __future__ import unicode_literals
import getpass
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
class Command(BaseCommand):
help = "Change a user's password for django.contrib.auth."
requires_migrations_checks = True
requires_system_checks = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def add_arguments(self, parser):
parser.add_argument(
'username', nargs='?',
help='Username to change password for; by default, it\'s the current username.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".',
)
def handle(self, *args, **options):
if options['username']:
username = options['username']
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options['database']).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
password_validated = False
while (p1 != p2 or not password_validated) and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count += 1
# Don't validate passwords that don't match.
continue
try:
validate_password(p2, u)
except ValidationError as err:
self.stderr.write('\n'.join(err.messages))
count += 1
else:
password_validated = True
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| mit |
DaniilLeksin/gc | wx/tools/Editra/src/syntax/_inno.py | 3 | 7749 | ###############################################################################
# Name: inno.py #
# Purpose: Syntax configuration module for Inno Setup Scripts #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: inno.py
AUTHOR: Cody Preord
@summary: Lexer configuration module for Inno Setup Scripts
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: _inno.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx
import wx.stc as stc
import re
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
SECTION_KW = (0, "code components custommessages dirs files icons ini "
"installdelete langoptions languages messages registry run "
"setup types tasks uninstalldelete uninstallrun _istool")
KEYWORDS = (1, "allowcancelduringinstall allownoicons allowrootdirectory "
"allowuncpath alwaysrestart alwaysshowcomponentslist "
"alwaysshowdironreadypage alwaysshowgrouponreadypage "
"alwaysusepersonalgroup appcomments appcontact appcopyright "
"appenddefaultdirname appenddefaultgroupname appid appmodifypath "
"appmutex appname apppublisher apppublisherurl appreadmefile "
"appsupporturl appupdatesurl appvername appversion "
"architecturesallowed architecturesinstallin64bitmode backcolor "
"backcolor2 backcolordirection backsolid changesassociations "
"changesenvironment compression copyrightfontname "
"copyrightfontsize createappdir createuninstallregkey "
"defaultdirname defaultgroupname defaultuserinfoname "
"defaultuserinfoorg defaultuserinfoserial dialogfontname "
"dialogfontsize direxistswarning disabledirpage "
"disablefinishedpage disableprogramgrouppage disablereadymemo "
"disablereadypage disablestartupprompt diskclustersize "
"diskslicesize diskspanning enablesdirdoesntexistwarning "
"encryption extradiskspacerequired flatcomponentslist "
"infoafterfile infobeforefile internalcompresslevel "
"languagedetectionmethod languagecodepage languageid languagename "
"licensefile mergeduplicatefiles minversion onlybelowversion "
"outputbasefilename outputdir outputmanifestfile password "
"privilegesrequired reservebytes restartifneededbyrun "
"setupiconfile showcomponentsizes showlanguagedialog "
"showtaskstreelines slicesperdisk solidcompression sourcedir "
"timestamprounding timestampsinutc titlefontname titlefontsize "
"touchdate touchtime uninstallable uninstalldisplayicon "
"uninstalldisplayname uninstallfilesdir uninstalllogmode "
"uninstallrestartcomputer updateuninstalllogappname "
"usepreviousappdir usepreviousgroup useprevioussetuptype "
"useprevioustasks useprevioususerinfo userinfopage usesetupldr "
"versioninfocompany versioninfocopyright versioninfodescription "
"versioninfotextversion versioninfoversion welcomefontname "
"welcomefontsize windowshowcaption windowstartmaximized "
"windowresizable windowvisible wizardimagebackcolor "
"wizardimagefile wizardimagestretch wizardsmallimagefile")
PARAM_KW = (2, "afterinstall attribs beforeinstall check comment components "
"copymode description destdir destname excludes "
"extradiskspacerequired filename flags fontinstall "
"groupdescription hotkey infoafterfile infobeforefile "
"iconfilename iconindex key languages licensefile messagesfile "
"minversion name onlybelowversion parameters permissions root "
"runonceid section source statusmsg string subkey tasks type "
"types valuedata valuename valuetype workingdir")
PREPROC_KW = (3, "append define dim else emit endif endsub error expr file for "
"if ifdef ifexist ifndef ifnexist include insert pragma sub "
"undef")
PASCAL_KW = (4, "begin break case const continue do downto else end except "
"finally for function if of procedure repeat then to try until "
"uses var while with")
USER_DEF = (5, "")
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [(stc.STC_INNO_COMMENT, 'comment_style'),
(stc.STC_INNO_COMMENT_PASCAL, 'comment_style'),
(stc.STC_INNO_DEFAULT, 'default_style'),
(stc.STC_INNO_IDENTIFIER, 'default_style'),
(stc.STC_INNO_KEYWORD, 'keyword_style'),
(stc.STC_INNO_KEYWORD_PASCAL, 'keyword4_style'),
(stc.STC_INNO_KEYWORD_USER, 'default_style'),
(stc.STC_INNO_PARAMETER, 'keyword2_style'),
(stc.STC_INNO_PREPROC, 'pre_style'),
(stc.STC_INNO_SECTION, 'scalar_style'),
(stc.STC_INNO_STRING_DOUBLE, 'string_style'),
(stc.STC_INNO_STRING_SINGLE, 'char_style')]
if wx.VERSION >= (2, 9, 0, 0, ''):
SYNTAX_ITEMS.append((stc.STC_INNO_INLINE_EXPANSION, 'default_style')) #TODO
else:
SYNTAX_ITEMS.append((stc.STC_INNO_PREPROC_INLINE, 'pre_style'))
#---- Extra Properties ----#
FOLD = ("fold", "1")
FOLD_COMP = ("fold.compact", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Inno Setup Scripts"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_INNOSETUP)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, AutoIndenter)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [SECTION_KW, KEYWORDS, PARAM_KW, PREPROC_KW, PASCAL_KW]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
# Note: Inno can also use pascal comments (i.e {})
return [u';']
#-----------------------------------------------------------------------------#
def AutoIndenter(estc, pos, ichar):
"""Auto indent Inno Setup Scripts.
@param estc: EditraStyledTextCtrl
@param pos: current carat position
@param ichar: Indentation character
"""
rtxt = u''
line = estc.GetCurrentLine()
text = estc.GetTextRange(estc.PositionFromLine(line), pos)
eolch = estc.GetEOLChar()
indent = estc.GetLineIndentation(line)
if ichar == u"\t":
tabw = estc.GetTabWidth()
else:
tabw = estc.GetIndent()
i_space = indent / tabw
ndent = eolch + ichar * i_space
rtxt = ndent + ((indent - (tabw * i_space)) * u' ')
if_pat = re.compile('if\s+.*\sthen')
text = text.strip()
if text == u'begin' or if_pat.match(text):
rtxt += ichar
# Put text in the buffer
estc.AddText(rtxt)
| apache-2.0 |
mylukin/Creeper | requests/packages/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
thefinn93/CouchPotatoServer | libs/git/ref_container.py | 122 | 2242 | # Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import exceptions
class RefContainer(object):
def getBranches(self):
raise NotImplementedError()
def getTags(self):
raise NotImplementedError()
########################### Looking for specific refs ##########################
def _getByName(self, func, name):
for ref in func():
if ref.name == name:
return ref
raise exceptions.NonexistentRefException(name)
def getBranchByName(self, name):
return self._getByName(self.getBranches, name)
def hasBranch(self, name):
try:
self.getBranchByName(name)
return True
except exceptions.NonexistentRefException:
return False
| gpl-3.0 |
zstackio/zstack-woodpecker | integrationtest/vm/virtualrouter/test_update_vm_cpu_memory2.py | 2 | 1813 | '''
Test change cpu and memory configuration when VM is running
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
#import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Test update instance offering')
vm = test_stub.create_basic_vm()
vm.stop()
instance_offering = test_lib.lib_get_instance_offering_by_uuid(vm.get_vm().instanceOfferingUuid)
test_obj_dict.add_vm(vm)
vm_ops.update_vm(vm.get_vm().uuid, instance_offering.cpuNum * 2, None)
vm_ops.update_vm(vm.get_vm().uuid, None, instance_offering.memorySize * 2)
vm.update()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum change is expected to change")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize change is expected to change")
vm.start()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum change is expected to take effect after Vm restart")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize change is expected to take effect after Vm restart")
vm.check()
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Test update instance cpu memory Pass')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
mdshuai/UATFramework | steps/common.py | 3 | 3982 | '''Common test methods'''
from behave import *
@given(u'"{host}" hosts from dynamic inventory')
def step_impl(context, host):
context.inventory = "dynamic"
context.target_host = host
@given(u'"{host}" hosts from static inventory')
def step_impl(context, host):
context.inventory = "static"
context.target_host = host
@given(u'"{rpm}" is already installed on "{host}"')
def step_impl(context, rpm, host):
'''Install RPM on host but fail if not already installed'''
r = context.remote_cmd("yum",
host,
remote_user="root",
module_args='name=%s state=present' % rpm)
if r:
for i in r:
assert i['msg'] == '' and i['results'] != []
else:
assert False
@given(u'"{rpm}" is already installed')
def step_impl(context, rpm):
'''Install RPM on host but fail if not already installed'''
context.execute_steps(u"""
given "{package_name}" is already installed on "{host}"
""".format(package_name=rpm,host=context.target_host))
@given(u'"{rpms}" are already installed on "{host}"')
def step_impl(context, rpms, host):
'''Install RPM on host but fail if not already installed'''
r = context.remote_cmd("yum",
host,
remote_user="root",
module_args='name=%s' % rpms)
if r:
for i in r:
assert i['msg'] == '' and i['results'] != []
else:
assert False
@given(u'"{rpms}" are already installed')
def step_impl(context, rpms):
'''Install RPM on host but fail if not already installed'''
context.execute_steps(u"""
"given {package_names}" are already installed on "{host}"
""".format(package_names=rpms,host=context.target_host))
@given(u'"{unit}" is already running on "{host}"')
def step_impl(context, unit, host):
'''Ensure service is running but fail if not'''
r = context.remote_cmd("service",
host,
module_args='name=%s state=running enabled=yes' % unit)
if r:
for i in r:
assert i['changed'] is False
else:
assert False
@then(u'"{unit}" is started and enabled on "{host}"')
def step_impl(context, unit, host):
'''Start service but fail if already running'''
r = context.remote_cmd('service',
host,
module_args='name=%s state=running enabled=yes' % unit)
if r:
for i in r:
assert i['state'] == 'started' and i['enabled'] is True
else:
assert False
@then(u'"{unit}" is restarted on "{host}"')
def step_impl(context, unit, host):
'''Restart service'''
r = context.remote_cmd('service',
host,
module_args='name=%s state=restarted' % unit)
if r:
for i in r:
assert i['state'] == 'started' and i['changed'] is True
else:
assert False
@given(u'"{host}" hosts can be pinged')
@given('"{host}" host')
def step(context, host):
'''Verify we can ping the host
host: a host from the ansible inventory file'''
assert context.remote_cmd('ping', host)
@given('run command "{cmd}" on "{host}"')
@when('run command "{cmd}" on "{host}"')
@then('run command "{cmd}" on "{host}"')
def step(context, cmd, host):
'''Run an Ansible module on a host directly from scenario
cmd: a module name plus arguments
<module> key=value [key=value ...]
or...
<module> <param>
host: a host from the inventory file'''
module, args = None, None
if ' ' in cmd:
# we only split on the first space to get the module name
# since module_args are also space-delimited
module, args = cmd.split(' ', 1)
else:
module = cmd
assert context.remote_cmd(module,
host,
module_args=args)
| gpl-2.0 |
vabs22/zulip | docs/conf.py | 5 | 9917 | # -*- coding: utf-8 -*-
#
# zulip-contributor-docs documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 17 16:24:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
if False:
from typing import Any, Dict, List, Optional
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [] # type: List[str]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zulip'
copyright = u'2015-2017, The Zulip Team'
author = u'The Zulip Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None # type: Optional[str]
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Read The Docs can't import sphinx_rtd_theme, so don't import it there.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zulip-contributor-docsdoc'
def setup(app):
# type: (Any) -> None
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to _static
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
} # type: Dict[str, str]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zulip-contributor-docs.tex', u'Zulip Documentation',
u'The Zulip Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
author, 'zulip-contributor-docs', 'Documentation for contributing to Zulip.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
| apache-2.0 |
PlayUAV/MissionPlanner | Lib/site-packages/numpy/ma/bench.py | 51 | 6658 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
# encoding: utf-8
import timeit
#import IPython.ipapi
#ip = IPython.ipapi.get()
#from IPython import ipmagic
import numpy
#from numpy import ma
#from numpy.ma import filled
#from numpy.ma.testutils import assert_equal
#####---------------------------------------------------------------------------
#---- --- Global variables ---
#####---------------------------------------------------------------------------
# Small arrays ..................................
xs = numpy.random.uniform(-1,1,6).reshape(2,3)
ys = numpy.random.uniform(-1,1,6).reshape(2,3)
zs = xs + 1j * ys
m1 = [[True, False, False], [False, False, True]]
m2 = [[True, False, True], [False, False, True]]
nmxs = numpy.ma.array(xs, mask=m1)
nmys = numpy.ma.array(ys, mask=m2)
nmzs = numpy.ma.array(zs, mask=m1)
# Big arrays ....................................
xl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
yl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
zl = xl + 1j * yl
maskx = xl > 0.8
masky = yl < -0.8
nmxl = numpy.ma.array(xl, mask=maskx)
nmyl = numpy.ma.array(yl, mask=masky)
nmzl = numpy.ma.array(zl, mask=maskx)
#####---------------------------------------------------------------------------
#---- --- Functions ---
#####---------------------------------------------------------------------------
def timer(s, v='', nloop=500, nrep=3):
units = ["s", "ms", "µs", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
print "%s : %-50s : " % (v,s),
varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
Timer = timeit.Timer(stmt=s, setup=setup)
best = min(Timer.repeat(nrep, nloop)) / nloop
if best > 0.0:
order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
else:
order = 3
print "%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
3,
best * scaling[order],
units[order])
# ip.magic('timeit -n%i %s' % (nloop,s))
def compare_functions_1v(func, nloop=500,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
funcname = func.__name__
print "-"*50
print "%s on small arrays" % funcname
module, data = "numpy.ma","nmxs"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
#
print "%s on large arrays" % funcname
module, data = "numpy.ma","nmxl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
def compare_methods(methodname, args, vars='x', nloop=500, test=True,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
print "-"*50
print "%s on small arrays" % methodname
data, ver = "nm%ss" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
#
print "%s on large arrays" % methodname
data, ver = "nm%sl" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
return
def compare_functions_2v(func, nloop=500, test=True,
xs=xs, nmxs=nmxs,
ys=ys, nmys=nmys,
xl=xl, nmxl=nmxl,
yl=yl, nmyl=nmyl):
funcname = func.__name__
print "-"*50
print "%s on small arrays" % funcname
module, data = "numpy.ma","nmxs,nmys"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
#
print "%s on large arrays" % funcname
module, data = "numpy.ma","nmxl,nmyl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
###############################################################################
################################################################################
if __name__ == '__main__':
# # Small arrays ..................................
# xs = numpy.random.uniform(-1,1,6).reshape(2,3)
# ys = numpy.random.uniform(-1,1,6).reshape(2,3)
# zs = xs + 1j * ys
# m1 = [[True, False, False], [False, False, True]]
# m2 = [[True, False, True], [False, False, True]]
# nmxs = numpy.ma.array(xs, mask=m1)
# nmys = numpy.ma.array(ys, mask=m2)
# nmzs = numpy.ma.array(zs, mask=m1)
# mmxs = maskedarray.array(xs, mask=m1)
# mmys = maskedarray.array(ys, mask=m2)
# mmzs = maskedarray.array(zs, mask=m1)
# # Big arrays ....................................
# xl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
# yl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
# zl = xl + 1j * yl
# maskx = xl > 0.8
# masky = yl < -0.8
# nmxl = numpy.ma.array(xl, mask=maskx)
# nmyl = numpy.ma.array(yl, mask=masky)
# nmzl = numpy.ma.array(zl, mask=maskx)
# mmxl = maskedarray.array(xl, mask=maskx, shrink=True)
# mmyl = maskedarray.array(yl, mask=masky, shrink=True)
# mmzl = maskedarray.array(zl, mask=maskx, shrink=True)
#
compare_functions_1v(numpy.sin)
compare_functions_1v(numpy.log)
compare_functions_1v(numpy.sqrt)
#....................................................................
compare_functions_2v(numpy.multiply)
compare_functions_2v(numpy.divide)
compare_functions_2v(numpy.power)
#....................................................................
compare_methods('ravel','', nloop=1000)
compare_methods('conjugate','','z', nloop=1000)
compare_methods('transpose','', nloop=1000)
compare_methods('compressed','', nloop=1000)
compare_methods('__getitem__','0', nloop=1000)
compare_methods('__getitem__','(0,0)', nloop=1000)
compare_methods('__getitem__','[0,-1]', nloop=1000)
compare_methods('__setitem__','0, 17', nloop=1000, test=False)
compare_methods('__setitem__','(0,0), 17', nloop=1000, test=False)
#....................................................................
print "-"*50
print "__setitem__ on small arrays"
timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000)
print "-"*50
print "__setitem__ on large arrays"
timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000)
#....................................................................
print "-"*50
print "where on small arrays"
timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ',nloop=1000)
print "-"*50
print "where on large arrays"
timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ',nloop=100)
| gpl-3.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/tensorflow/contrib/metrics/python/metrics/classification.py | 23 | 2583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope('accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
| apache-2.0 |
asajeffrey/servo | components/script/dom/bindings/codegen/parser/tests/test_empty_sequence_default_value.py | 15 | 1132 | import WebIDL
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface X {
const sequence<long> foo = [];
};
""")
results = parser.finish()
except Exception as x:
threw = True
harness.ok(threw, "Constant cannot have [] as a default value")
parser = parser.reset()
parser.parse("""
interface X {
void foo(optional sequence<long> arg = []);
};
""")
results = parser.finish();
harness.ok(isinstance(
results[0].members[0].signatures()[0][1][0].defaultValue,
WebIDL.IDLEmptySequenceValue),
"Should have IDLEmptySequenceValue as default value of argument")
parser = parser.reset()
parser.parse("""
dictionary X {
sequence<long> foo = [];
};
""")
results = parser.finish();
harness.ok(isinstance(results[0].members[0].defaultValue,
WebIDL.IDLEmptySequenceValue),
"Should have IDLEmptySequenceValue as default value of "
"dictionary member")
| mpl-2.0 |
Tiotao/morpherpy | env/Lib/site-packages/sqlalchemy/util/_collections.py | 7 | 24509 | # util/_collections.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
import itertools
import weakref
import operator
from .compat import threading
EMPTY_SET = frozenset()
class KeyedTuple(tuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`.Query` object's use case.
.. versionchanged:: 0.8
Compatibility methods with ``collections.namedtuple()`` have been
added including :attr:`.KeyedTuple._fields` and
:meth:`.KeyedTuple._asdict`.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
t._labels = []
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return [l for l in self._labels if l is not None]
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. versionadded:: 0.8
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple(self.keys())
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
.. versionadded:: 0.8
"""
return dict((key, self.__dict__[key]) for key in self.keys())
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter([self[key] for key in self._list])
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.__contains__,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.__contains__,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).union(other))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).difference(other))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).intersection(other))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(
self._working_set(members).symmetric_difference(other))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.itervalues())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
class WeakSequence(object):
def __init__(self, elements):
self._storage = weakref.WeakValueDictionary(
(idx, element) for idx, element in enumerate(elements)
)
def __iter__(self):
return self._storage.itervalues()
def __getitem__(self, index):
try:
return self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, basestring) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
def _inc_counter(self):
self._counter += 1
return self._counter
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# if we couldnt find a key, most
# likely some other thread broke in
# on us. loop around and try again
break
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value forthe current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
| mit |
amazinger2013/OpenSesame | libqtopensesame/sketchpad_elements/_textline.py | 2 | 2960 | #-*- coding:utf-8 -*-
"""
This file is part of openexp.
openexp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
openexp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with openexp. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from libopensesame.exceptions import osexception
from libqtopensesame.misc import _
from libqtopensesame.sketchpad_elements._base_element import base_element
from libopensesame.sketchpad_elements import textline as textline_runtime
from PyQt4 import QtCore, QtGui
class textline(base_element, textline_runtime):
"""
desc:
A textline element.
See base_element for docstrings and function descriptions.
"""
def show_edit_dialog(self):
"""
desc:
The show-edit dialog for the textline only edits the text, not the
full element script.
"""
text = self.experiment.text_input(_(u'Edit text'),
message=_(u'Please enter a text for the textline'),
content=self.properties[u'text'].replace(u'<br />', u'\n'),
parent=self.sketchpad._edit_widget)
if text == None:
return
self.properties[u'text'] = self.clean_text(text)
self.sketchpad.draw()
@classmethod
def mouse_press(cls, sketchpad, pos):
text = sketchpad.experiment.text_input(title=_(u'New textline'),
message=_(u'Please enter a text for the textline'),
parent=sketchpad._edit_widget)
if text == None:
return None
properties = {
u'x': pos[0],
u'y': pos[1],
u'text': cls.clean_text(text),
u'color': sketchpad.current_color(),
u'center': sketchpad.current_center(),
u'font_family': sketchpad.current_font_family(),
u'font_size': sketchpad.current_font_size(),
u'font_bold': sketchpad.current_font_bold(),
u'font_italic': sketchpad.current_font_italic(),
u'html': sketchpad.current_html(),
u'show_if' : sketchpad.current_show_if()
}
return textline(sketchpad, properties=properties)
@staticmethod
def clean_text(text):
"""
desc:
Cleans text by removing quotes and converting newlines to <br />
tags.
arguments:
text: The text to clean.
type: [str, unicode, QString]
returns:
desc: Clean text.
type: unicode
"""
text = unicode(text)
text = text.replace(os.linesep, u'<br />')
text = text.replace(u'\n', u'<br />')
text = text.replace(u'"', u'')
return text
@staticmethod
def requires_text():
return True
@staticmethod
def requires_color():
return True
@staticmethod
def requires_center():
return True
@staticmethod
def cursor():
return u'cursor-text', -1, -1
| gpl-3.0 |
pubnub/Zopkio | zopkio/reporters/html_reporter.py | 3 | 10389 | # Copyright 2014 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Class used to generate the report.
"""
import os
from jinja2 import Environment, FileSystemLoader
import zopkio.constants as constants
import zopkio.runtime as runtime
import zopkio.utils as utils
class _ReportInfo(object):
"""
Holds data shared among all report pages
"""
def __init__(self, output_dir, logs_dir, naarad_dir):
self.output_dir = os.path.abspath(output_dir)
self.resource_dir = os.path.join(output_dir, "resources/")
self.logs_dir = os.path.abspath(logs_dir)
self.naarad_dir = os.path.abspath(naarad_dir)
self.config_to_test_names_map = {}
self.report_file_sfx = "_report.html"
self.home_page = os.path.join(output_dir, "report.html")
self.diff_page = os.path.join(output_dir, "diff.html")
self.log_page = os.path.join(output_dir, "log.html")
self.project_url = "https://github.com/linkedin/Zopkio"
self.results_map = {
"passed": constants.PASSED,
"failed": constants.FAILED,
"skipped": constants.SKIPPED
}
class Reporter(object):
"""
Class that converts the aggregated output into a user-friendly web page.
"""
def __init__(self, report_name, output_dir, logs_dir, naarad_dir):
"""
:param report_name: used in the title of the front-end
:param output_dir: directory where the report will be generated
:param logs_dir: directory of where the logs will be collected
:param naarad_dir: directory containing the naarad reports
"""
self.name = report_name
self.env = Environment(loader=FileSystemLoader(constants.WEB_RESOURCE_DIR)) # used to load html pages for Jinja2
self.data_source = runtime.get_collector()
self.report_info = _ReportInfo(output_dir, logs_dir, naarad_dir)
def get_config_to_test_names_map(self):
config_to_test_names_map = {}
for config_name in self.data_source.get_config_names():
config_to_test_names_map[config_name] = self.data_source.get_test_names(config_name)
return config_to_test_names_map
def get_report_location(self):
"""
Returns the filename of the landing page
"""
return self.report_info.home_page
def generate(self):
"""
Generates the report
"""
self._setup()
header_html = self._generate_header()
footer_html = self._generate_footer()
results_topbar_html = self._generate_topbar("results")
summary_topbar_html = self._generate_topbar("summary")
logs_topbar_html = self._generate_topbar("logs")
diff_topbar_html = self._generate_topbar("diff")
summary_body_html = self._generate_summary_body()
diff_body_html = self._generate_diff_body()
summary_html = header_html + summary_topbar_html + summary_body_html + footer_html
diff_html = header_html + diff_topbar_html + diff_body_html+ footer_html
Reporter._make_file(summary_html, self.report_info.home_page)
Reporter._make_file(diff_html,self.report_info.diff_page)
log_body_html = self._generate_log_body()
log_html = header_html + logs_topbar_html + log_body_html+footer_html
Reporter._make_file(log_html, self.report_info.log_page)
for config_name in self.report_info.config_to_test_names_map.keys():
config_dir = os.path.join(self.report_info.resource_dir, config_name)
utils.makedirs(config_dir)
config_body_html = self._generate_config_body(config_name)
config_html = header_html + results_topbar_html + config_body_html + footer_html
config_file = os.path.join(config_dir, config_name + self.report_info.report_file_sfx)
Reporter._make_file(config_html, config_file)
for test_name in self.data_source.get_test_names(config_name):
test_body_html = self._generate_test_body(config_name, test_name)
test_html = header_html + results_topbar_html + test_body_html + footer_html
test_file = os.path.join(config_dir, test_name + self.report_info.report_file_sfx)
Reporter._make_file(test_html, test_file)
def _generate_config_body(self, config_name):
summary_stats = [
self.data_source.count_tests(config_name),
self.data_source.count_tests_with_result(config_name, constants.PASSED),
self.data_source.count_tests_with_result(config_name, constants.FAILED),
self.data_source.count_tests_with_result(config_name, constants.SKIPPED),
self.data_source.get_config_exec_time(config_name),
self.data_source.get_config_start_time(config_name),
self.data_source.get_config_end_time(config_name)
]
config_template = self.env.get_template("config_page.html")
config_body_html = config_template.render(
config_data=self.data_source.get_config_result(config_name),
tests=self.data_source.get_test_results(config_name),
report_info=self.report_info,
summary=summary_stats
)
return config_body_html
def _generate_log_body(self):
log_template = self.env.get_template("logs_page.html")
log_body_html = log_template.render(logs_dir=self.report_info.logs_dir)
return log_body_html
def _generate_footer(self):
footer_template = self.env.get_template("footer.html")
footer_html = footer_template.render()
return footer_html
def _generate_header(self):
CSS_INCLUDES = [
"web_resources/style.css"
]
CSS_INCLUDES[:] = [os.path.join(constants.PROJECT_ROOT_DIR, css_include) for css_include in CSS_INCLUDES]
JS_INCLUDES = [
"web_resources/script.js"
]
JS_INCLUDES[:] = [os.path.join(constants.PROJECT_ROOT_DIR, js_include) for js_include in JS_INCLUDES]
header_template = self.env.get_template("header.html")
header_html = header_template.render(
page_title=self.name,
css_includes=CSS_INCLUDES,
js_includes=JS_INCLUDES
)
return header_html
def _generate_diff_body(self):
diff_body_html = ""
config_tests_dict = {}
config_data_dict = {}
#generate diff page only if multiple configs exist
if (len(self.report_info.config_to_test_names_map.keys()) > 1):
# get list of test names in sorted order
test_names = self.data_source.get_test_results(self.report_info.config_to_test_names_map.keys()[0])
test_names.sort(key=lambda x: x.name)
for config_name in self.report_info.config_to_test_names_map.keys():
config_tests = self.data_source.get_test_results(config_name)
config_tests.sort(key=lambda x: x.name)
config_tests_dict[config_name] = config_tests
config_data_dict[config_name] = self.data_source.get_config_result(config_name)
diff_template = self.env.get_template("diff.html")
diff_body_html = diff_template.render(
test_names = test_names,
report_info = self.report_info,
config_names = self.report_info.config_to_test_names_map.keys(),
config_tests_dict = config_tests_dict,
config_data_dict = config_data_dict
)
return diff_body_html
def _generate_summary_body(self):
summary_stats = [
self.data_source.count_all_tests(),
self.data_source.count_all_tests_with_result(constants.PASSED),
self.data_source.count_all_tests_with_result(constants.FAILED),
self.data_source.count_all_tests_with_result(constants.SKIPPED),
self.data_source.get_total_config_exec_time(),
self.data_source.get_summary_start_time(),
self.data_source.get_summary_end_time()
]
config_failure_map = {}
config_total_tests_map = {}
config_test_failure_map = {}
config_test_skipped_map = {}
config_test_passed_map = {}
for config_name in self.report_info.config_to_test_names_map.keys():
config_total_tests_map[config_name] = self.data_source.count_tests(config_name)
config_failure_map[config_name] = self.data_source.get_config_result(config_name).result
config_test_failure_map[config_name] = self.data_source.count_tests_with_result(config_name, constants.FAILED)
config_test_skipped_map[config_name] = self.data_source.count_tests_with_result(config_name, constants.SKIPPED)
config_test_passed_map[config_name] = self.data_source.count_tests_with_result(config_name, constants.PASSED)
summary_template = self.env.get_template("landing_page.html")
summary_body = summary_template.render(
report_info=self.report_info,
summary=summary_stats,
config_fail=config_failure_map,
config_fail_map=config_test_failure_map,
config_skip_map=config_test_skipped_map,
config_tests_map = config_total_tests_map,
config_pass_map = config_test_passed_map
)
return summary_body
def _generate_topbar(self, active_page):
topbar_template = self.env.get_template("topbar.html")
topbar_html = topbar_template.render(
report_info=self.report_info,
active=active_page,
)
return topbar_html
def _generate_test_body(self, config_name, test_name):
test_template = self.env.get_template("test_page.html")
test_body = test_template.render(
config_name=config_name,
test_data=self.data_source.get_test_result(config_name, test_name),
report_info=self.report_info,
config_data=self.data_source.get_config_result(config_name)
)
return test_body
@staticmethod
def _make_file(html, location):
with open(location, "w") as f:
f.write(html)
def _setup(self):
utils.makedirs(self.report_info.output_dir)
utils.makedirs(self.report_info.resource_dir)
self.report_info.config_to_test_names_map = self.get_config_to_test_names_map()
| apache-2.0 |
jindongh/kombu | kombu/transport/virtual/exchange.py | 33 | 4580 | """
kombu.transport.virtual.exchange
================================
Implementations of the standard exchanges defined
by the AMQ protocol (excluding the `headers` exchange).
"""
from __future__ import absolute_import
from kombu.utils import escape_regex
import re
class ExchangeType(object):
"""Implements the specifics for an exchange type.
:param channel: AMQ Channel
"""
type = None
def __init__(self, channel):
self.channel = channel
def lookup(self, table, exchange, routing_key, default):
"""Lookup all queues matching `routing_key` in `exchange`.
:returns: `default` if no queues matched.
"""
raise NotImplementedError('subclass responsibility')
def prepare_bind(self, queue, exchange, routing_key, arguments):
"""Return tuple of `(routing_key, regex, queue)` to be stored
for bindings to this exchange."""
return routing_key, None, queue
def equivalent(self, prev, exchange, type,
durable, auto_delete, arguments):
"""Return true if `prev` and `exchange` is equivalent."""
return (type == prev['type'] and
durable == prev['durable'] and
auto_delete == prev['auto_delete'] and
(arguments or {}) == (prev['arguments'] or {}))
class DirectExchange(ExchangeType):
"""The `direct` exchange routes based on exact routing keys."""
type = 'direct'
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, _, queue in table
if rkey == routing_key]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
for queue in _lookup(exchange, routing_key):
_put(queue, message, **kwargs)
class TopicExchange(ExchangeType):
"""The `topic` exchange routes messages based on words separated by
dots, using wildcard characters ``*`` (any single word), and ``#``
(one or more words)."""
type = 'topic'
#: map of wildcard to regex conversions
wildcards = {'*': r'.*?[^\.]',
'#': r'.*?'}
#: compiled regex cache
_compiled = {}
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, pattern, queue in table
if self._match(pattern, routing_key)]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
deadletter = self.channel.deadletter_queue
for queue in [q for q in _lookup(exchange, routing_key)
if q and q != deadletter]:
_put(queue, message, **kwargs)
def prepare_bind(self, queue, exchange, routing_key, arguments):
return routing_key, self.key_to_pattern(routing_key), queue
def key_to_pattern(self, rkey):
"""Get the corresponding regex for any routing key."""
return '^%s$' % ('\.'.join(
self.wildcards.get(word, word)
for word in escape_regex(rkey, '.#*').split('.')
))
def _match(self, pattern, string):
"""Same as :func:`re.match`, except the regex is compiled and cached,
then reused on subsequent matches with the same pattern."""
try:
compiled = self._compiled[pattern]
except KeyError:
compiled = self._compiled[pattern] = re.compile(pattern, re.U)
return compiled.match(string)
class FanoutExchange(ExchangeType):
"""The `fanout` exchange implements broadcast messaging by delivering
copies of all messages to all queues bound to the exchange.
To support fanout the virtual channel needs to store the table
as shared state. This requires that the `Channel.supports_fanout`
attribute is set to true, and the `Channel._queue_bind` and
`Channel.get_table` methods are implemented. See the redis backend
for an example implementation of these methods.
"""
type = 'fanout'
def lookup(self, table, exchange, routing_key, default):
return [queue for _, _, queue in table]
def deliver(self, message, exchange, routing_key, **kwargs):
if self.channel.supports_fanout:
self.channel._put_fanout(
exchange, message, routing_key, **kwargs)
#: Map of standard exchange types and corresponding classes.
STANDARD_EXCHANGE_TYPES = {'direct': DirectExchange,
'topic': TopicExchange,
'fanout': FanoutExchange}
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
gcd0318/django | django/core/files/images.py | 429 | 2428 | """
Utility functions for handling images.
Requires Pillow as you might imagine.
"""
import struct
import zlib
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_image_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_image_dimensions()[1]
height = property(_get_height)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from PIL import ImageFile as PillowImageFile
p = PillowImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
# Most of the time Pillow only needs a small chunk to parse the image
# and get the dimensions, but with some TIFF files Pillow needs to
# parse the whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
except struct.error:
# Ignore PIL failing on a too short buffer when reads return
# less bytes than expected. Skip and feed more data to the
# parser (ticket #24544).
pass
if p.image:
return p.image.size
chunk_size *= 2
return (None, None)
finally:
if close:
file.close()
else:
file.seek(file_pos)
| bsd-3-clause |
yize/grunt-tps | tasks/lib/python/Lib/python2.7/test/test_whichdb.py | 91 | 1799 | #! /usr/bin/env python
"""Test script for the whichdb module
based on test_anydbm.py
"""
import os
import test.test_support
import unittest
import whichdb
import glob
_fname = test.test_support.TESTFN
# Silence Py3k warning
anydbm = test.test_support.import_module('anydbm', deprecated=True)
def _delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(_fname + "*"):
try:
os.unlink(f)
except OSError:
pass
class WhichDBTestCase(unittest.TestCase):
# Actual test methods are added to namespace
# after class definition.
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def tearDown(self):
_delete_files()
def setUp(self):
_delete_files()
for name in anydbm._names:
# we define a new test method for each
# candidate database module.
try:
# Silence Py3k warning
mod = test.test_support.import_module(name, deprecated=True)
except unittest.SkipTest:
continue
def test_whichdb_name(self, name=name, mod=mod):
# Check whether whichdb correctly guesses module name
# for databases opened with module mod.
# Try with empty files first
f = mod.open(_fname, 'c')
f.close()
self.assertEqual(name, whichdb.whichdb(_fname))
# Now add a key
f = mod.open(_fname, 'w')
f["1"] = "1"
f.close()
self.assertEqual(name, whichdb.whichdb(_fname))
setattr(WhichDBTestCase,"test_whichdb_%s" % name, test_whichdb_name)
def test_main():
try:
test.test_support.run_unittest(WhichDBTestCase)
finally:
_delete_files()
if __name__ == "__main__":
test_main()
| mit |
heihei1252/lightblue-0.4 | src/mac/_macutil.py | 68 | 9048 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Mac-specific utility functions and constants.
from Foundation import NSObject, NSDate, NSPoint, NSDefaultRunLoopMode, NSTimer
from AppKit import NSApplication, NSEvent, NSApplicationDefined, NSAnyEventMask
import objc
import time
import _IOBluetooth
import _lightbluecommon
# for mac os 10.5
try:
from Foundation import NSUIntegerMax
NSAnyEventMask = NSUIntegerMax
except:
pass
# values of constants used in _IOBluetooth.framework
kIOReturnSuccess = 0 # defined in <IOKit/IOReturn.h>
kIOBluetoothUserNotificationChannelDirectionIncoming = 1
# defined in <IOBluetooth/IOBluetoothUserLib.h>
kBluetoothHCIErrorPageTimeout = 0x04 # <IOBluetooth/Bluetooth.h>
# defined in <IOBluetooth/IOBluetoothUserLib.h>
kIOBluetoothServiceBrowserControllerOptionsNone = 0L
LIGHTBLUE_NOTIFY_ID = 5444 # any old number
WAIT_MAX_TIMEOUT = 3
# IOBluetoothSDPUUID objects for RFCOMM and OBEX protocol UUIDs
PROTO_UUIDS = {
_lightbluecommon.RFCOMM: _IOBluetooth.IOBluetoothSDPUUID.uuid16_(0x0003),
_lightbluecommon.OBEX: _IOBluetooth.IOBluetoothSDPUUID.uuid16_(0x0008)
}
def formatdevaddr(addr):
"""
Returns address of a device in usual form e.g. "00:00:00:00:00:00"
- addr: address as returned by device.getAddressString() on an
IOBluetoothDevice
"""
# make uppercase cos PyS60 & Linux seem to always return uppercase
# addresses
# can safely encode to ascii cos BT addresses are only in hex (pyobjc
# returns all strings in unicode)
return addr.replace("-", ":").encode('ascii').upper()
def createbtdevaddr(addr):
# in mac 10.5, can use BluetoothDeviceAddress directly
chars = btaddrtochars(addr)
try:
btdevaddr = _IOBluetooth.BluetoothDeviceAddress(chars)
return btdevaddr
except:
return chars
def btaddrtochars(addr):
"""
Takes a bluetooth address and returns a tuple with the corresponding
char values. This can then be used to construct a
IOBluetoothDevice object, providing the signature of the withAddress:
selector has been set (as in _setpyobjcsignatures() in this module).
For example:
>>> chars = btaddrtochars("00:0e:0a:00:a2:00")
>>> chars
(0, 14, 10, 0, 162, 0)
>>> device = _IOBluetooth.IOBluetoothDevice.withAddress_(chars)
>>> type(device)
<objective-c class IOBluetoothDevice at 0xa4024988>
>>> device.getAddressString()
u'00-0e-0a-00-a2-00'
"""
if not _lightbluecommon._isbtaddr(addr):
raise TypeError("address %s not valid bluetooth address" % str(addr))
if addr.find(":") == -1:
addr = addr.replace("-", ":") # consider alternative addr separator
# unhexlify gives binary value like '\x0e', then ord to get the char value.
# unhexlify throws TypeError if value is not a hex pair.
import binascii
chars = [ord(binascii.unhexlify(part)) for part in addr.split(":")]
return tuple(chars)
def looponce():
app = NSApplication.sharedApplication()
# to push the run loops I seem to have to do this twice
# use NSEventTrackingRunLoopMode or NSDefaultRunLoopMode?
for i in range(2):
event = app.nextEventMatchingMask_untilDate_inMode_dequeue_(
NSAnyEventMask, NSDate.dateWithTimeIntervalSinceNow_(0.02),
NSDefaultRunLoopMode, False)
def waituntil(conditionfunc, timeout=None):
"""
Waits until conditionfunc() returns true, or <timeout> seconds have passed.
(If timeout=None, this waits indefinitely until conditionfunc() returns
true.) Returns false if the process timed out, otherwise returns true.
Note!! You must call interruptwait() when you know that conditionfunc()
should be checked (e.g. if you are waiting for data and you know some data
has arrived) so that this can check conditionfunc(); otherwise it will just
continue to wait. (This allows the function to wait for an event that is
sent by interruptwait() instead of polling conditionfunc().)
This allows the caller to wait while the main event loop processes its
events. This must be done for certain situations, e.g. to receive socket
data or to accept client connections on a server socket, since IOBluetooth
requires the presence of an event loop to run these operations.
This function doesn't need to be called if there is something else that is
already processing the main event loop, e.g. if called from within a Cocoa
application.
"""
app = NSApplication.sharedApplication()
starttime = time.time()
if timeout is None:
timeout = NSDate.distantFuture().timeIntervalSinceNow()
if not isinstance(timeout, (int, float)):
raise TypeError("timeout must be int or float, was %s" % \
type(timeout))
endtime = starttime + timeout
while True:
currtime = time.time()
if currtime >= endtime:
return False
# use WAIT_MAX_TIMEOUT, don't wait forever in case of KeyboardInterrupt
e = app.nextEventMatchingMask_untilDate_inMode_dequeue_(NSAnyEventMask, NSDate.dateWithTimeIntervalSinceNow_(min(endtime - currtime, WAIT_MAX_TIMEOUT)), NSDefaultRunLoopMode, True)
if e is not None:
if (e.type() == NSApplicationDefined and e.subtype() == LIGHTBLUE_NOTIFY_ID):
if conditionfunc():
return True
else:
app.postEvent_atStart_(e, True)
def interruptwait():
"""
If waituntil() has been called, this will interrupt the waiting process so
it can check whether it should stop waiting.
"""
evt = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(NSApplicationDefined, NSPoint(), NSApplicationDefined, 0, 1, None, LIGHTBLUE_NOTIFY_ID, 0, 0)
NSApplication.sharedApplication().postEvent_atStart_(evt, True)
class BBCocoaSleeper(NSObject):
def init(self):
self = super(BBCocoaSleeper, self).init()
self.timedout = False
return self
def sleep(self, timeout):
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
timeout, self, "timedOut:", None, False)
self.timedout = False
waituntil(lambda: self.timedout)
def timedOut_(self, timer):
self.timedout = True
interruptwait()
timedOut_ = objc.selector(timedOut_, signature="v@:@")
def waitfor(timeout):
sleeper = BBCocoaSleeper.alloc().init()
sleeper.sleep(timeout)
class BBFileLikeObjectReader(NSObject):
"""
Provides a suitable delegate class for the BBDelegatingInputStream class in
LightAquaBlue.framework.
This basically provides a wrapper for a python file-like object so that it
can be read through a NSInputStream.
"""
def initWithFileLikeObject_(self, fileobj):
self = super(BBFileLikeObjectReader, self).init()
self.__fileobj = fileobj
return self
initWithFileLikeObject_ = objc.selector(initWithFileLikeObject_,
signature="@@:@")
def readDataWithMaxLength_(self, maxlength):
try:
data = self.__fileobj.read(maxlength)
except Exception:
return None
return buffer(data)
readDataWithMaxLength_ = objc.selector(readDataWithMaxLength_,
signature="@@:I") #"@12@0:4I8" #"@:I"
class BBFileLikeObjectWriter(NSObject):
"""
Provides a suitable delegate class for the BBDelegatingOutputStream class in
LightAquaBlue.framework.
This basically provides a wrapper for a python file-like object so that it
can be written to through a NSOutputStream.
"""
def initWithFileLikeObject_(self, fileobj):
self = super(BBFileLikeObjectWriter, self).init()
self.__fileobj = fileobj
return self
initWithFileLikeObject_ = objc.selector(initWithFileLikeObject_,
signature="@@:@")
def write_(self, data):
try:
self.__fileobj.write(data)
except Exception:
return -1
return data.length()
write_ = objc.selector(write_, signature="i12@0:4@8") #i12@0:4@8 #i@:@
| gpl-3.0 |
jtrebosc/JTutils | TSpy/zeroim3D.py | 1 | 1091 | # -*- coding: utf-8 -*-
import sys
import os
PYTHONPATH=os.getenv("PYTHONPATH","not_defined")
if "not_defined" in PYTHONPATH:
MSG("cannot acces to PYTHONPATH environment. It's required for accessing to brukerPARIO lib" )
EXIT()
#add the Library path for importing brukerPARIO
sys.path.append(PYTHONPATH)
import brukerPARIO
from os.path import getsize
# from os import system as execute
import subprocess
def get_os_version():
ver = sys.platform.lower()
if ver.startswith('java'):
import java.lang
ver = java.lang.System.getProperty("os.name").lower()
return ver
OS=get_os_version()
dt=CURDATA()
dat=brukerPARIO.dataset(dt)
fn3iii=dat.returnprocpath()+"/3iii"
try :
sz3iii=getsize(fn3iii)
except :
MSG("No file 3iii")
EXIT()
if OS.startswith('mac') or OS.startswith('linux'):
# for Linux with wine
CMD="dd if=/dev/zero of=%s bs=%d count=1" % (fn3iii,sz3iii)
else:
# for windows using topspin cygwin setup
CMD="to be defined"
MSG("not implemented for Windows yet")
EXIT()
# execute(CMD)
subprocess.call(CMD.split())
| bsd-3-clause |
dstockwell/catapult | third_party/Paste/paste/util/threadedprint.py | 50 | 8210 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
threadedprint.py
================
:author: Ian Bicking
:date: 12 Jul 2004
Multi-threaded printing; allows the output produced via print to be
separated according to the thread.
To use this, you must install the catcher, like::
threadedprint.install()
The installation optionally takes one of three parameters:
default
The default destination for print statements (e.g., ``sys.stdout``).
factory
A function that will produce the stream for a thread, given the
thread's name.
paramwriter
Instead of writing to a file-like stream, this function will be
called like ``paramwriter(thread_name, text)`` for every write.
The thread name is the value returned by
``threading.currentThread().getName()``, a string (typically something
like Thread-N).
You can also submit file-like objects for specific threads, which will
override any of these parameters. To do this, call ``register(stream,
[threadName])``. ``threadName`` is optional, and if not provided the
stream will be registered for the current thread.
If no specific stream is registered for a thread, and no default has
been provided, then an error will occur when anything is written to
``sys.stdout`` (or printed).
Note: the stream's ``write`` method will be called in the thread the
text came from, so you should consider thread safety, especially if
multiple threads share the same writer.
Note: if you want access to the original standard out, use
``sys.__stdout__``.
You may also uninstall this, via::
threadedprint.uninstall()
TODO
----
* Something with ``sys.stderr``.
* Some default handlers. Maybe something that hooks into `logging`.
* Possibly cache the results of ``factory`` calls. This would be a
semantic change.
"""
import threading
import sys
from paste.util import filemixin
class PrintCatcher(filemixin.FileMixin):
def __init__(self, default=None, factory=None, paramwriter=None,
leave_stdout=False):
assert len(filter(lambda x: x is not None,
[default, factory, paramwriter])) <= 1, (
"You can only provide one of default, factory, or paramwriter")
if leave_stdout:
assert not default, (
"You cannot pass in both default (%r) and "
"leave_stdout=True" % default)
default = sys.stdout
if default:
self._defaultfunc = self._writedefault
elif factory:
self._defaultfunc = self._writefactory
elif paramwriter:
self._defaultfunc = self._writeparam
else:
self._defaultfunc = self._writeerror
self._default = default
self._factory = factory
self._paramwriter = paramwriter
self._catchers = {}
def write(self, v, currentThread=threading.currentThread):
name = currentThread().getName()
catchers = self._catchers
if not catchers.has_key(name):
self._defaultfunc(name, v)
else:
catcher = catchers[name]
catcher.write(v)
def seek(self, *args):
# Weird, but Google App Engine is seeking on stdout
name = threading.currentThread().getName()
catchers = self._catchers
if not name in catchers:
self._default.seek(*args)
else:
catchers[name].seek(*args)
def read(self, *args):
name = threading.currentThread().getName()
catchers = self._catchers
if not name in catchers:
self._default.read(*args)
else:
catchers[name].read(*args)
def _writedefault(self, name, v):
self._default.write(v)
def _writefactory(self, name, v):
self._factory(name).write(v)
def _writeparam(self, name, v):
self._paramwriter(name, v)
def _writeerror(self, name, v):
assert False, (
"There is no PrintCatcher output stream for the thread %r"
% name)
def register(self, catcher, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
self._catchers[name] = catcher
def deregister(self, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
assert self._catchers.has_key(name), (
"There is no PrintCatcher catcher for the thread %r" % name)
del self._catchers[name]
_printcatcher = None
_oldstdout = None
def install(**kw):
global _printcatcher, _oldstdout, register, deregister
if (not _printcatcher or sys.stdout is not _printcatcher):
_oldstdout = sys.stdout
_printcatcher = sys.stdout = PrintCatcher(**kw)
register = _printcatcher.register
deregister = _printcatcher.deregister
def uninstall():
global _printcatcher, _oldstdout, register, deregister
if _printcatcher:
sys.stdout = _oldstdout
_printcatcher = _oldstdout = None
register = not_installed_error
deregister = not_installed_error
def not_installed_error(*args, **kw):
assert False, (
"threadedprint has not yet been installed (call "
"threadedprint.install())")
register = deregister = not_installed_error
class StdinCatcher(filemixin.FileMixin):
def __init__(self, default=None, factory=None, paramwriter=None):
assert len(filter(lambda x: x is not None,
[default, factory, paramwriter])) <= 1, (
"You can only provide one of default, factory, or paramwriter")
if default:
self._defaultfunc = self._readdefault
elif factory:
self._defaultfunc = self._readfactory
elif paramwriter:
self._defaultfunc = self._readparam
else:
self._defaultfunc = self._readerror
self._default = default
self._factory = factory
self._paramwriter = paramwriter
self._catchers = {}
def read(self, size=None, currentThread=threading.currentThread):
name = currentThread().getName()
catchers = self._catchers
if not catchers.has_key(name):
return self._defaultfunc(name, size)
else:
catcher = catchers[name]
return catcher.read(size)
def _readdefault(self, name, size):
self._default.read(size)
def _readfactory(self, name, size):
self._factory(name).read(size)
def _readparam(self, name, size):
self._paramreader(name, size)
def _readerror(self, name, size):
assert False, (
"There is no StdinCatcher output stream for the thread %r"
% name)
def register(self, catcher, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
self._catchers[name] = catcher
def deregister(self, catcher, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
assert self._catchers.has_key(name), (
"There is no StdinCatcher catcher for the thread %r" % name)
del self._catchers[name]
_stdincatcher = None
_oldstdin = None
def install_stdin(**kw):
global _stdincatcher, _oldstdin, register_stdin, deregister_stdin
if not _stdincatcher:
_oldstdin = sys.stdin
_stdincatcher = sys.stdin = StdinCatcher(**kw)
register_stdin = _stdincatcher.register
deregister_stdin = _stdincatcher.deregister
def uninstall_stdin():
global _stdincatcher, _oldstdin, register_stdin, deregister_stdin
if _stdincatcher:
sys.stdin = _oldstdin
_stdincatcher = _oldstdin = None
register_stdin = deregister_stdin = not_installed_error_stdin
def not_installed_error_stdin(*args, **kw):
assert False, (
"threadedprint has not yet been installed for stdin (call "
"threadedprint.install_stdin())")
| bsd-3-clause |
RoanokeHobby/Robots | CamJamRobot/9-avoidance.py | 1 | 4715 | # CamJam EduKit 3 - Robotics
# Worksheet 9 – Obstacle Avoidance
# Copyright (c) 2016 CamJam-EduKit
# The MIT License (MIT)
import RPi.GPIO as GPIO # Import the GPIO Library
import time # Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set variables for the GPIO motor pins
# Reversed the pins for a test
pinMotorAForwards = 9
pinMotorABackwards = 10
pinMotorBForwards = 7
pinMotorBBackwards = 8
# Define GPIO pins to use on the Pi
pinTrigger = 17
pinEcho = 18
# How many times to turn the pin on and off each second
Frequency = 20
# How long the pin stays on each cycle, as a percent
DutyCycleA = 30
DutyCycleB = 30
# Settng the duty cycle to 0 means the motors will not turn
Stop = 0
# Set the GPIO Pin mode to be Output
GPIO.setup(pinMotorAForwards, GPIO.OUT)
GPIO.setup(pinMotorABackwards, GPIO.OUT)
GPIO.setup(pinMotorBForwards, GPIO.OUT)
GPIO.setup(pinMotorBBackwards, GPIO.OUT)
# Set pins as output and input
GPIO.setup(pinTrigger, GPIO.OUT) # Trigger
GPIO.setup(pinEcho, GPIO.IN) # Echo
# Distance Variables
HowNear = 15.0
ReverseTime = 0.5
TurnTime = 0.75
# Set the GPIO to software PWM at 'Frequency' Hertz
pwmMotorAForwards = GPIO.PWM(pinMotorAForwards, Frequency)
pwmMotorABackwards = GPIO.PWM(pinMotorABackwards, Frequency)
pwmMotorBForwards = GPIO.PWM(pinMotorBForwards, Frequency)
pwmMotorBBackwards = GPIO.PWM(pinMotorBBackwards, Frequency)
# Start the software PWM with a duty cycle of 0 (i.e. not moving)
pwmMotorAForwards.start(Stop)
pwmMotorABackwards.start(Stop)
pwmMotorBForwards.start(Stop)
pwmMotorBBackwards.start(Stop)
# Turn all motors off
def StopMotors():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors forwards
def Forwards():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors backwards
def Backwards():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Turn left
def Left():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn Right
def Right():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Take a distance measurement
def Measure():
GPIO.output(pinTrigger, True)
time.sleep(0.00001)
GPIO.output(pinTrigger, False)
StartTime = time.time()
StopTime = StartTime
while GPIO.input(pinEcho)==0:
StartTime = time.time()
StopTime = StartTime
while GPIO.input(pinEcho)==1:
StopTime = time.time()
# If the sensor is too close to an object, the Pi cannot
# see the echo quickly enough, so we have to detect that
# problem and say what has happened.
if StopTime-StartTime >= 0.04:
print("Hold on there! You're too close for me to see.")
StopTime = StartTime
break
ElapsedTime = StopTime - StartTime
Distance = (ElapsedTime * 34300)/2
return Distance
# Return True if the ultrasonic sensor sees an obstacle
def IsNearObstacle(localHowNear):
Distance = Measure()
print("Is Near Obstacle: "+str(Distance))
if Distance < localHowNear:
return True
else:
return False
# Move back a little, then turn right
def AvoidObstacle():
# Back off a little
print("Go Backwards")
Backwards()
time.sleep(ReverseTime)
StopMotors()
# Turn right
print("Turn Right")
Right()
time.sleep(TurnTime)
StopMotors()
# Your code to control the robot goes below this line
try:
# Set trigger to False (Low)
GPIO.output(pinTrigger, False)
# Allow module to settle
# time.sleep(0.25)
print("Loading ...")
print("Waiting for sensor to stablize")
time.sleep(1)
print("Three ...")
time.sleep(1)
print("Two ...")
time.sleep(1)
print("One ...")
print("Ultrasonic Measurement")
#repeat the next indented block forever
while True:
Forwards()
time.sleep(0.05)
if IsNearObstacle(HowNear):
StopMotors()
AvoidObstacle()
# If you press CTRL+C, cleanup and stop
except KeyboardInterrupt:
GPIO.cleanup()
| gpl-3.0 |
noironetworks/neutron | neutron/tests/unit/common/moved_globals_code1.py | 9 | 1026 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Used by test cases in test__deprecate.py
"""
from neutron.common import _deprecate
from neutron.tests.unit.common import moved_globals_target
# a has been moved to moved_globals_target.a
b = 'barasingha'
# c has been renamed to d
d = 'capybara'
# e has been moved to moved_globals_target.f
g = 'gelada'
_deprecate._moved_global('c', new_name='d')
_deprecate._moved_global('e', new_name='f', new_module=moved_globals_target)
_deprecate._MovedGlobals(moved_globals_target)
| apache-2.0 |
aburan28/blaze | blaze/tests/test_get_set.py | 7 | 1665 | from __future__ import absolute_import, division, print_function
import blaze
from blaze.datadescriptor import dd_as_py
import numpy as np
import unittest
from blaze.py2help import skip
from blaze.tests.common import MayBeUriTest
class getitem(unittest.TestCase):
caps={'compress': False} # the default is non-compressed arrays
def test_scalar(self):
a = blaze.array(np.arange(3), caps=self.caps)
self.assertEqual(dd_as_py(a[0]._data), 0)
@skip('slices should implemented')
def test_1d(self):
a = blaze.array(np.arange(3), caps=self.caps)
print("a:", a, self.caps)
self.assertEqual(dd_as_py(a[0:2]._data), [0,1])
def test_2d(self):
a = blaze.array(np.arange(3*3).reshape(3,3), caps=self.caps)
self.assertEqual(dd_as_py(a[1]._data), [3,4,5])
class getitem_blz(getitem):
caps={'compress': True}
class setitem(unittest.TestCase):
caps={'compress': False} # the default is non-compressed arrays
def test_scalar(self):
a = blaze.array(np.arange(3), caps=self.caps)
a[0] = 1
self.assertEqual(dd_as_py(a[0]._data), 1)
@skip('slices should be implemented')
def test_1d(self):
a = blaze.array(np.arange(3), caps=self.caps)
a[0:2] = 2
self.assertEqual(dd_as_py(a[0:2]._data), [2,2])
def test_2d(self):
a = blaze.array(np.arange(3*3).reshape(3,3), caps=self.caps)
a[1] = 2
self.assertEqual(dd_as_py(a[1]._data), [2,2,2])
# BLZ is going to be read and append only for the time being
# class setitem_blz(setitem):
# caps={'compress': True}
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
chokribr/invenio | invenio/legacy/bibclassify/engine.py | 4 | 27111 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibClassify engine.
This module is the main module of BibClassify. its two main methods are
output_keywords_for_sources and get_keywords_from_text. The first one output
keywords for a list of sources (local files or URLs, PDF or text) while the
second one outputs the keywords for text lines (which are obtained using the
module bibclassify_text_normalizer).
This module also takes care of the different outputs (text, MARCXML or HTML).
But unfortunately there is a confusion between running in a standalone mode
and producing output suitable for printing, and running in a web-based
mode where the webtemplate is used. For the moment the pieces of the representation
code are left in this module.
"""
from __future__ import print_function
import os
import re
from six import iteritems
import config as bconfig
from invenio.legacy.bibclassify import ontology_reader as reader
import text_extractor as extractor
import text_normalizer as normalizer
import keyword_analyzer as keyworder
import acronym_analyzer as acronymer
from invenio.utils.text import encode_for_xml
from invenio.utils.filedownload import download_url
log = bconfig.get_logger("bibclassify.engine")
# ---------------------------------------------------------------------
# API
# ---------------------------------------------------------------------
def output_keywords_for_sources(input_sources, taxonomy_name, output_mode="text",
output_limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER, spires=False,
match_mode="full", no_cache=False, with_author_keywords=False,
rebuild_cache=False, only_core_tags=False, extract_acronyms=False,
api=False, **kwargs):
"""Output the keywords for each source in sources."""
from invenio.legacy.refextract.engine import get_plaintext_document_body
# Inner function which does the job and it would be too much work to
# refactor the call (and it must be outside the loop, before it did
# not process multiple files)
def process_lines():
if output_mode == "text":
print("Input file: %s" % source)
line_nb = len(text_lines)
word_nb = 0
for line in text_lines:
word_nb += len(re.findall("\S+", line))
log.info("Remote file has %d lines and %d words." % (line_nb, word_nb))
output = get_keywords_from_text(
text_lines,
taxonomy_name,
output_mode=output_mode,
output_limit=output_limit,
spires=spires,
match_mode=match_mode,
no_cache=no_cache,
with_author_keywords=with_author_keywords,
rebuild_cache=rebuild_cache,
only_core_tags=only_core_tags,
extract_acronyms=extract_acronyms
)
if api:
return output
else:
if isinstance(output, dict):
for i in output:
print(output[i])
# Get the fulltext for each source.
for entry in input_sources:
log.info("Trying to read input file %s." % entry)
text_lines = None
source = ""
if os.path.isdir(entry):
for filename in os.listdir(entry):
if filename.startswith('.'):
continue
filename = os.path.join(entry, filename)
if os.path.isfile(filename):
text_lines, dummy = get_plaintext_document_body(filename)
if text_lines:
source = filename
process_lines()
elif os.path.isfile(entry):
text_lines, dummy = get_plaintext_document_body(entry)
if text_lines:
source = os.path.basename(entry)
process_lines()
else:
# Treat as a URL.
local_file = download_url(entry)
text_lines, dummy = get_plaintext_document_body(local_file)
if text_lines:
source = entry.split("/")[-1]
process_lines()
def get_keywords_from_local_file(local_file, taxonomy_name, output_mode="text",
output_limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER, spires=False,
match_mode="full", no_cache=False, with_author_keywords=False,
rebuild_cache=False, only_core_tags=False, extract_acronyms=False, api=False,
**kwargs):
"""Output keywords reading a local file.
Arguments and output are the same as for :see: get_keywords_from_text().
"""
log.info("Analyzing keywords for local file %s." % local_file)
text_lines = extractor.text_lines_from_local_file(local_file)
return get_keywords_from_text(text_lines,
taxonomy_name,
output_mode=output_mode,
output_limit=output_limit,
spires=spires,
match_mode=match_mode,
no_cache=no_cache,
with_author_keywords=with_author_keywords,
rebuild_cache=rebuild_cache,
only_core_tags=only_core_tags,
extract_acronyms=extract_acronyms)
def get_keywords_from_text(text_lines, taxonomy_name, output_mode="text",
output_limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER,
spires=False, match_mode="full", no_cache=False,
with_author_keywords=False, rebuild_cache=False,
only_core_tags=False, extract_acronyms=False,
**kwargs):
"""Extract keywords from the list of strings.
:param text_lines: list of strings (will be normalized before being
joined into one string)
:param taxonomy_name: string, name of the taxonomy_name
:param output_mode: string - text|html|marcxml|raw
:param output_limit: int
:param spires: boolean, if True marcxml output reflect spires codes.
:param match_mode: str - partial|full; in partial mode only
beginning of the fulltext is searched.
:param no_cache: boolean, means loaded definitions will not be saved.
:param with_author_keywords: boolean, extract keywords from the pdfs.
:param rebuild_cache: boolean
:param only_core_tags: boolean
:return: if output_mode=raw, it will return
(single_keywords, composite_keywords, author_keywords, acronyms)
for other output modes it returns formatted string
"""
cache = reader.get_cache(taxonomy_name)
if not cache:
reader.set_cache(taxonomy_name,
reader.get_regular_expressions(taxonomy_name,
rebuild=rebuild_cache,
no_cache=no_cache))
cache = reader.get_cache(taxonomy_name)
_skw = cache[0]
_ckw = cache[1]
text_lines = normalizer.cut_references(text_lines)
fulltext = normalizer.normalize_fulltext("\n".join(text_lines))
if match_mode == "partial":
fulltext = _get_partial_text(fulltext)
author_keywords = None
if with_author_keywords:
author_keywords = extract_author_keywords(_skw, _ckw, fulltext)
acronyms = {}
if extract_acronyms:
acronyms = extract_abbreviations(fulltext)
single_keywords = extract_single_keywords(_skw, fulltext)
composite_keywords = extract_composite_keywords(_ckw, fulltext, single_keywords)
if only_core_tags:
single_keywords = clean_before_output(_filter_core_keywors(single_keywords))
composite_keywords = _filter_core_keywors(composite_keywords)
else:
# Filter out the "nonstandalone" keywords
single_keywords = clean_before_output(single_keywords)
return get_keywords_output(single_keywords, composite_keywords, taxonomy_name,
author_keywords, acronyms, output_mode, output_limit,
spires, only_core_tags)
def extract_single_keywords(skw_db, fulltext):
"""Find single keywords in the fulltext.
:var skw_db: list of KeywordToken objects
:var fulltext: string, which will be searched
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], ],
..
}
or empty {}
"""
return keyworder.get_single_keywords(skw_db, fulltext) or {}
def extract_composite_keywords(ckw_db, fulltext, skw_spans):
"""Returns a list of composite keywords bound with the number of
occurrences found in the text string.
:var ckw_db: list of KewordToken objects (they are supposed to be composite ones)
:var fulltext: string to search in
:skw_spans: dictionary of already identified single keywords
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], [info_about_matches] ],
..
}
or empty {}
"""
return keyworder.get_composite_keywords(ckw_db, fulltext, skw_spans) or {}
def extract_abbreviations(fulltext):
"""Extract acronyms from the fulltext
:var fulltext: utf-8 string
:return: dictionary of matches in a formt {
<keyword object>, [matched skw or ckw object, ....]
}
or empty {}
"""
acronyms = {}
K = reader.KeywordToken
for k, v in acronymer.get_acronyms(fulltext).items():
acronyms[K(k, type='acronym')] = v
return acronyms
def extract_author_keywords(skw_db, ckw_db, fulltext):
"""Finds out human defined keyowrds in a text string. Searches for
the string "Keywords:" and its declinations and matches the
following words.
:var skw_db: list single kw object
:var ckw_db: list of composite kw objects
:var fulltext: utf-8 string
:return: dictionary of matches in a formt {
<keyword object>, [matched skw or ckw object, ....]
}
or empty {}
"""
akw = {}
K = reader.KeywordToken
for k, v in keyworder.get_author_keywords(skw_db, ckw_db, fulltext).items():
akw[K(k, type='author-kw')] = v
return akw
# ---------------------------------------------------------------------
# presentation functions
# ---------------------------------------------------------------------
def get_keywords_output(single_keywords, composite_keywords, taxonomy_name,
author_keywords=None, acronyms=None, style="text", output_limit=0,
spires=False, only_core_tags=False):
"""Returns a formatted string representing the keywords according
to the chosen style. This is the main routing call, this function will
also strip unwanted keywords before output and limits the number
of returned keywords
:var single_keywords: list of single keywords
:var composite_keywords: list of composite keywords
:var taxonomy_name: string, taxonomy name
:keyword author_keywords: dictionary of author keywords extracted from fulltext
:keyword acronyms: dictionary of extracted acronyms
:keyword style: text|html|marc
:keyword output_limit: int, number of maximum keywords printed (it applies
to single and composite keywords separately)
:keyword spires: boolen meaning spires output style
:keyword only_core_tags: boolean
"""
categories = {}
# sort the keywords, but don't limit them (that will be done later)
single_keywords_p = _sort_kw_matches(single_keywords)
composite_keywords_p = _sort_kw_matches(composite_keywords)
for w in single_keywords_p:
categories[w[0].concept] = w[0].type
for w in single_keywords_p:
categories[w[0].concept] = w[0].type
complete_output = _output_complete(single_keywords_p, composite_keywords_p,
author_keywords, acronyms, spires,
only_core_tags, limit=output_limit)
functions = {"text": _output_text, "marcxml": _output_marc, "html":
_output_html, "dict": _output_dict}
my_styles = {}
for s in style:
if s != "raw":
my_styles[s] = functions[s](complete_output, categories)
else:
if output_limit > 0:
my_styles["raw"] = (_kw(_sort_kw_matches(single_keywords, output_limit)),
_kw(_sort_kw_matches(composite_keywords, output_limit)),
author_keywords, # this we don't limit (?)
_kw(_sort_kw_matches(acronyms, output_limit)))
else:
my_styles["raw"] = (single_keywords_p, composite_keywords_p, author_keywords, acronyms)
return my_styles
def build_marc(recid, single_keywords, composite_keywords,
spires=False, author_keywords=None, acronyms=None):
"""Create xml record.
:var recid: ingeter
:var single_keywords: dictionary of kws
:var composite_keywords: dictionary of kws
:keyword spires: please don't use, left for historical
reasons
:keyword author_keywords: dictionary of extracted keywords
:keyword acronyms: dictionary of extracted acronyms
:return: str, marxml
"""
output = ['<collection><record>\n'
'<controlfield tag="001">%s</controlfield>' % recid]
# no need to sort
single_keywords = single_keywords.items()
composite_keywords = composite_keywords.items()
output.append(_output_marc(single_keywords, composite_keywords, author_keywords, acronyms))
output.append('</record></collection>')
return '\n'.join(output)
def _output_marc(output_complete, categories, kw_field=bconfig.CFG_MAIN_FIELD,
auth_field=bconfig.CFG_AUTH_FIELD, acro_field=bconfig.CFG_ACRON_FIELD,
provenience='BibClassify'):
"""Output the keywords in the MARCXML format.
:var skw_matches: list of single keywords
:var ckw_matches: list of composite keywords
:var author_keywords: dictionary of extracted author keywords
:var acronyms: dictionary of acronyms
:var spires: boolean, True=generate spires output - BUT NOTE: it is
here only not to break compatibility, in fact spires output
should never be used for xml because if we read marc back
into the KeywordToken objects, we would not find them
:keyword provenience: string that identifies source (authority) that
assigned the contents of the field
:return: string, formatted MARC"""
kw_template = ('<datafield tag="%s" ind1="%s" ind2="%s">\n'
' <subfield code="2">%s</subfield>\n'
' <subfield code="a">%s</subfield>\n'
' <subfield code="n">%s</subfield>\n'
' <subfield code="9">%s</subfield>\n'
'</datafield>\n')
output = []
tag, ind1, ind2 = _parse_marc_code(kw_field)
for keywords in (output_complete["Single keywords"], output_complete["Core keywords"]):
for kw in keywords:
output.append(kw_template % (tag, ind1, ind2, encode_for_xml(provenience),
encode_for_xml(kw), keywords[kw],
encode_for_xml(categories[kw])))
for field, keywords in ((auth_field, output_complete["Author keywords"]),
(acro_field, output_complete["Acronyms"])):
if keywords and len(keywords) and field: # field='' we shall not save the keywords
tag, ind1, ind2 = _parse_marc_code(field)
for kw, info in keywords.items():
output.append(kw_template % (tag, ind1, ind2, encode_for_xml(provenience),
encode_for_xml(kw), '', encode_for_xml(categories[kw])))
return "".join(output)
def _output_complete(skw_matches=None, ckw_matches=None, author_keywords=None,
acronyms=None, spires=False, only_core_tags=False,
limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER):
if limit:
resized_skw = skw_matches[0:limit]
resized_ckw = ckw_matches[0:limit]
else:
resized_skw = skw_matches
resized_ckw = ckw_matches
results = {"Core keywords": _get_core_keywords(skw_matches, ckw_matches, spires=spires)}
if not only_core_tags:
results["Author keywords"] = _get_author_keywords(author_keywords, spires=spires)
results["Composite keywords"] = _get_compositekws(resized_ckw, spires=spires)
results["Single keywords"] = _get_singlekws(resized_skw, spires=spires)
results["Field codes"] = _get_fieldcodes(resized_skw, resized_ckw, spires=spires)
results["Acronyms"] = _get_acronyms(acronyms)
return results
def _output_dict(complete_output, categories):
return {
"complete_output": complete_output,
"categories": categories
}
def _output_text(complete_output, categories):
"""Output the results obtained in text format.
:return: str, html formatted output
"""
output = ""
for result in complete_output:
list_result = complete_output[result]
if list_result:
list_result_sorted = sorted(list_result, key=lambda x: list_result[x],
reverse=True)
output += "\n\n{0}:\n".format(result)
for element in list_result_sorted:
output += "\n{0} {1}".format(list_result[element], element)
output += "\n--\n{0}".format(_signature())
return output
def _output_html(complete_output, categories):
"""Output the same as txt output does, but HTML formatted.
:var skw_matches: sorted list of single keywords
:var ckw_matches: sorted list of composite keywords
:var author_keywords: dictionary of extracted author keywords
:var acronyms: dictionary of acronyms
:var spires: boolean
:var only_core_tags: boolean
:keyword limit: int, number of printed keywords
:return: str, html formatted output
"""
return """<html>
<head>
<title>Automatically generated keywords by bibclassify</title>
</head>
<body>
{0}
</body>
</html>""".format(
_output_text(complete_output).replace('\n', '<br>')
).replace('\n', '')
def _get_singlekws(skw_matches, spires=False):
"""
:var skw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: list of formatted keywords
"""
output = {}
for single_keyword, info in skw_matches:
output[single_keyword.output(spires)] = len(info[0])
return output
def _get_compositekws(ckw_matches, spires=False):
"""
:var ckw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: list of formatted keywords
"""
output = {}
for composite_keyword, info in ckw_matches:
output[composite_keyword.output(spires)] = {"numbers": len(info[0]),
"details": info[1]}
return output
def _get_acronyms(acronyms):
"""Return a formatted list of acronyms."""
acronyms_str = {}
if acronyms:
for acronym, expansions in iteritems(acronyms):
expansions_str = ", ".join(["%s (%d)" % expansion
for expansion in expansions])
acronyms_str[acronym] = expansions_str
return acronyms
def _get_author_keywords(author_keywords, spires=False):
"""Format the output for the author keywords.
:return: list of formatted author keywors
"""
out = {}
if author_keywords:
for keyword, matches in author_keywords.items():
skw_matches = matches[0] # dictionary of single keywords
ckw_matches = matches[1] # dict of composite keywords
matches_str = []
for ckw, spans in ckw_matches.items():
matches_str.append(ckw.output(spires))
for skw, spans in skw_matches.items():
matches_str.append(skw.output(spires))
if matches_str:
out[keyword] = matches_str
else:
out[keyword] = 0
return out
def _get_fieldcodes(skw_matches, ckw_matches, spires=False):
"""Return the output for the field codes.
:var skw_matches: dict of {keyword: [info,...]}
:var ckw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: string"""
fieldcodes = {}
output = {}
for skw, _ in skw_matches:
for fieldcode in skw.fieldcodes:
fieldcodes.setdefault(fieldcode, set()).add(skw.output(spires))
for ckw, _ in ckw_matches:
if len(ckw.fieldcodes):
for fieldcode in ckw.fieldcodes:
fieldcodes.setdefault(fieldcode, set()).add(ckw.output(spires))
else: # inherit field-codes from the composites
for kw in ckw.getComponents():
for fieldcode in kw.fieldcodes:
fieldcodes.setdefault(fieldcode, set()).add('%s*' % ckw.output(spires))
fieldcodes.setdefault('*', set()).add(kw.output(spires))
for fieldcode, keywords in fieldcodes.items():
output[fieldcode] = ', '.join(keywords)
return output
def _get_core_keywords(skw_matches, ckw_matches, spires=False):
"""Return the output for the field codes.
:var skw_matches: dict of {keyword: [info,...]}
:var ckw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: set of formatted core keywords
"""
output = {}
category = {}
def _get_value_kw(kw):
"""Help to sort the Core keywords."""
i = 0
while kw[i].isdigit():
i += 1
if i > 0:
return int(kw[:i])
else:
return 0
for skw, info in skw_matches:
if skw.core:
output[skw.output(spires)] = len(info[0])
category[skw.output(spires)] = skw.type
for ckw, info in ckw_matches:
if ckw.core:
output[ckw.output(spires)] = len(info[0])
else:
#test if one of the components is not core
i = 0
for c in ckw.getComponents():
if c.core:
output[c.output(spires)] = info[1][i]
i += 1
return output
def _filter_core_keywors(keywords):
matches = {}
for kw, info in keywords.items():
if kw.core:
matches[kw] = info
return matches
def _signature():
"""Print out the bibclassify signature.
#todo: add information about taxonomy, rdflib"""
return 'bibclassify v%s' % (bconfig.VERSION,)
def clean_before_output(kw_matches):
"""Return a clean copy of the keywords data structure.
Stripped off the standalone and other unwanted elements"""
filtered_kw_matches = {}
for kw_match, info in iteritems(kw_matches):
if not kw_match.nostandalone:
filtered_kw_matches[kw_match] = info
return filtered_kw_matches
# ---------------------------------------------------------------------
# helper functions
# ---------------------------------------------------------------------
def _skw_matches_comparator(kw0, kw1):
"""
Compare 2 single keywords objects.
First by the number of their spans (ie. how many times they were found),
if it is equal it compares them by lenghts of their labels.
"""
list_comparison = cmp(len(kw1[1][0]), len(kw0[1][0]))
if list_comparison:
return list_comparison
if kw0[0].isComposite() and kw1[0].isComposite():
component_avg0 = sum(kw0[1][1]) / len(kw0[1][1])
component_avg1 = sum(kw1[1][1]) / len(kw1[1][1])
component_comparison = cmp(component_avg1, component_avg0)
if component_comparison:
return component_comparison
return cmp(len(str(kw1[0])), len(str(kw0[0])))
def _kw(keywords):
"""Turn list of keywords into dictionary."""
r = {}
for k, v in keywords:
r[k] = v
return r
def _sort_kw_matches(skw_matches, limit=0):
"""Return a resized version of keywords to the given length."""
sorted_keywords = list(skw_matches.items())
sorted_keywords.sort(_skw_matches_comparator)
return limit and sorted_keywords[:limit] or sorted_keywords
def _get_partial_text(fulltext):
"""
Return a short version of the fulltext used with the partial matching mode.
The version is composed of 20% in the beginning and 20% in the middle of the
text."""
length = len(fulltext)
get_index = lambda x: int(float(x) / 100 * length)
partial_text = [fulltext[get_index(start):get_index(end)]
for start, end in bconfig.CFG_BIBCLASSIFY_PARTIAL_TEXT]
return "\n".join(partial_text)
def save_keywords(filename, xml):
tmp_dir = os.path.dirname(filename)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
file_desc = open(filename, "w")
file_desc.write(xml)
file_desc.close()
def get_tmp_file(recid):
tmp_directory = "%s/bibclassify" % bconfig.CFG_TMPDIR
if not os.path.isdir(tmp_directory):
os.mkdir(tmp_directory)
filename = "bibclassify_%s.xml" % recid
abs_path = os.path.join(tmp_directory, filename)
return abs_path
def _parse_marc_code(field):
"""Parse marc field and return default indicators if not filled in."""
field = str(field)
if len(field) < 4:
raise Exception('Wrong field code: %s' % field)
else:
field += '__'
tag = field[0:3]
ind1 = field[3].replace('_', '')
ind2 = field[4].replace('_', '')
return tag, ind1, ind2
if __name__ == "__main__":
log.error("Please use bibclassify_cli from now on.")
| gpl-2.0 |
sharpbitmessage/PyBitmessage | regenerateaddresses.py | 1 | 8348 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'regenerateaddresses.ui'
#
# Created: Thu Jan 24 15:52:24 2013
# by: PyQt4 UI code generator 4.9.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_regenerateAddressesDialog(object):
def setupUi(self, regenerateAddressesDialog):
regenerateAddressesDialog.setObjectName(_fromUtf8("regenerateAddressesDialog"))
regenerateAddressesDialog.resize(532, 332)
self.gridLayout_2 = QtGui.QGridLayout(regenerateAddressesDialog)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.buttonBox = QtGui.QDialogButtonBox(regenerateAddressesDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 0, 1, 1)
self.groupBox = QtGui.QGroupBox(regenerateAddressesDialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_6 = QtGui.QLabel(self.groupBox)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 1, 0, 1, 1)
self.lineEditPassphrase = QtGui.QLineEdit(self.groupBox)
self.lineEditPassphrase.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText)
self.lineEditPassphrase.setEchoMode(QtGui.QLineEdit.Password)
self.lineEditPassphrase.setObjectName(_fromUtf8("lineEditPassphrase"))
self.gridLayout.addWidget(self.lineEditPassphrase, 2, 0, 1, 5)
self.label_11 = QtGui.QLabel(self.groupBox)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout.addWidget(self.label_11, 3, 0, 1, 3)
self.spinBoxNumberOfAddressesToMake = QtGui.QSpinBox(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBoxNumberOfAddressesToMake.sizePolicy().hasHeightForWidth())
self.spinBoxNumberOfAddressesToMake.setSizePolicy(sizePolicy)
self.spinBoxNumberOfAddressesToMake.setMinimum(1)
self.spinBoxNumberOfAddressesToMake.setProperty("value", 8)
self.spinBoxNumberOfAddressesToMake.setObjectName(_fromUtf8("spinBoxNumberOfAddressesToMake"))
self.gridLayout.addWidget(self.spinBoxNumberOfAddressesToMake, 3, 3, 1, 1)
spacerItem = QtGui.QSpacerItem(132, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 4, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 4, 0, 1, 1)
self.lineEditAddressVersionNumber = QtGui.QLineEdit(self.groupBox)
self.lineEditAddressVersionNumber.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditAddressVersionNumber.sizePolicy().hasHeightForWidth())
self.lineEditAddressVersionNumber.setSizePolicy(sizePolicy)
self.lineEditAddressVersionNumber.setMaximumSize(QtCore.QSize(31, 16777215))
self.lineEditAddressVersionNumber.setObjectName(_fromUtf8("lineEditAddressVersionNumber"))
self.gridLayout.addWidget(self.lineEditAddressVersionNumber, 4, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 4, 2, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 5, 0, 1, 1)
self.lineEditStreamNumber = QtGui.QLineEdit(self.groupBox)
self.lineEditStreamNumber.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditStreamNumber.sizePolicy().hasHeightForWidth())
self.lineEditStreamNumber.setSizePolicy(sizePolicy)
self.lineEditStreamNumber.setMaximumSize(QtCore.QSize(31, 16777215))
self.lineEditStreamNumber.setObjectName(_fromUtf8("lineEditStreamNumber"))
self.gridLayout.addWidget(self.lineEditStreamNumber, 5, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(325, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 5, 2, 1, 3)
self.checkBoxEighteenByteRipe = QtGui.QCheckBox(self.groupBox)
self.checkBoxEighteenByteRipe.setObjectName(_fromUtf8("checkBoxEighteenByteRipe"))
self.gridLayout.addWidget(self.checkBoxEighteenByteRipe, 6, 0, 1, 5)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setWordWrap(True)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 7, 0, 1, 5)
self.label = QtGui.QLabel(self.groupBox)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 5)
self.gridLayout_2.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(regenerateAddressesDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), regenerateAddressesDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), regenerateAddressesDialog.reject)
QtCore.QMetaObject.connectSlotsByName(regenerateAddressesDialog)
def retranslateUi(self, regenerateAddressesDialog):
regenerateAddressesDialog.setWindowTitle(QtGui.QApplication.translate("regenerateAddressesDialog", "Regenerate Existing Addresses", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("regenerateAddressesDialog", "Regenerate existing addresses", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Passphrase", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Number of addresses to make based on your passphrase:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Address version Number:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditAddressVersionNumber.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "2", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Stream number:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditStreamNumber.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "1", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxEighteenByteRipe.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Spend several minutes of extra computing time to make the address(es) 1 or 2 characters shorter", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "You must check (or not check) this box just like you did (or didn\'t) when you made your addresses the first time.", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "If you have previously made deterministic addresses but lost them due to an accident (like hard drive failure), you can regenerate them here. If you used the random number generator to make your addresses then this form will be of no use to you.", None, QtGui.QApplication.UnicodeUTF8))
| mit |
emchristiansen/gtest-sbt-cpp | test/gtest_test_utils.py | 408 | 10444 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 3 | 12567 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
# Compare analytic and numeric gradient of kernels.
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
# Check that parameter vector theta of kernel is set correctly.
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i + 1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
# Auto-correlation and cross-correlation should be consistent.
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
# Test that diag method of kernel returns consistent results.
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
# Test stationarity of kernels.
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert_equal(attr_value1, attr_value2)
def test_kernel_clone():
# Test that sklearn's clone works correctly on kernels.
bounds = (1e-5, 1e5)
for kernel in kernels:
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
# Check that all constructor parameters are equal.
assert_equal(kernel.get_params(), kernel_cloned.get_params())
# Check that all hyperparameters are equal.
yield check_hyperparameters_equal, kernel, kernel_cloned
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert_equal(kernel_cloned_clone.get_params(),
kernel_cloned.get_params())
assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))
yield (check_hyperparameters_equal, kernel_cloned,
kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
# Check that GP kernels can also be used as pairwise kernels.
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
# Check that set_params()/get_params() is consistent with kernel.theta.
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
def test_repr_kernels():
# Smoke-test for repr in kernels.
for kernel in kernels:
repr(kernel)
| bsd-3-clause |
supersven/intellij-community | python/lib/Lib/site-packages/django/utils/unittest/compatibility.py | 575 | 2096 | import os
import sys
try:
from functools import wraps
except ImportError:
# only needed for Python 2.4
def wraps(_):
def _wraps(func):
return func
return _wraps
__unittest = True
def _relpath_nt(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
# default to posixpath definition
def _relpath_posix(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
if os.path is sys.modules.get('ntpath'):
relpath = _relpath_nt
else:
relpath = _relpath_posix
| apache-2.0 |
jay-tyler/ansible | lib/ansible/playbook/taggable.py | 128 | 3278 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
from six import string_types
from ansible.errors import AnsibleError
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Taggable:
untagged = frozenset(['untagged'])
_tags = FieldAttribute(isa='list', default=[], listof=(string_types,int))
def __init__(self):
super(Taggable, self).__init__()
def _load_tags(self, attr, ds):
if isinstance(ds, list):
return ds
elif isinstance(ds, basestring):
return [ ds ]
else:
raise AnsibleError('tags must be specified as a list', obj=ds)
def _get_attr_tags(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
tags = self._attributes['tags']
if tags is None:
tags = []
if hasattr(self, '_get_parent_attribute'):
tags = self._get_parent_attribute('tags', extend=True)
return tags
def evaluate_tags(self, only_tags, skip_tags, all_vars):
''' this checks if the current item should be executed depending on tag options '''
should_run = True
if self.tags:
templar = Templar(loader=self._loader, variables=all_vars)
tags = templar.template(self.tags)
if not isinstance(tags, list):
if tags.find(',') != -1:
tags = set(tags.split(','))
else:
tags = set([tags])
else:
tags = set([i for i,_ in itertools.groupby(tags)])
else:
# this makes isdisjoint work for untagged
tags = self.untagged
if only_tags:
should_run = False
if 'always' in tags or 'all' in only_tags:
should_run = True
elif not tags.isdisjoint(only_tags):
should_run = True
elif 'tagged' in only_tags and tags != self.untagged:
should_run = True
if should_run and skip_tags:
# Check for tags that we need to skip
if 'all' in skip_tags:
if 'always' not in tags or 'always' in skip_tags:
should_run = False
elif not tags.isdisjoint(skip_tags):
should_run = False
elif 'tagged' in skip_tags and tags != self.untagged:
should_run = False
return should_run
| gpl-3.0 |
pselle/calibre | src/calibre/ebooks/lrf/input.py | 14 | 14450 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import textwrap, operator
from copy import deepcopy, copy
from lxml import etree
from calibre import guess_type
class Canvas(etree.XSLTExtension):
def __init__(self, doc, styles, text_block, log):
self.doc = doc
self.styles = styles
self.text_block = text_block
self.log = log
self.processed = set([])
def execute(self, context, self_node, input_node, output_parent):
cid = input_node.get('objid', None)
if cid is None or cid in self.processed:
return
self.processed.add(cid)
input_node = self.doc.xpath('//Canvas[@objid="%s"]'%cid)[0]
objects = list(self.get_objects(input_node))
if len(objects) == 1 and objects[0][0].tag == 'ImageBlock':
self.image_page(input_node, objects[0][0], output_parent)
else:
canvases = [input_node]
for x in input_node.itersiblings():
if x.tag == 'Canvas':
oid = x.get('objid', None)
if oid is not None:
canvases.append(x)
self.processed.add(oid)
else:
break
table = etree.Element('table')
table.text = '\n\t'
for canvas in canvases:
oid = canvas.get('objid')
tr = table.makeelement('tr')
tr.set('id', oid)
tr.tail = '\n\t'
table.append(tr)
for obj, x, y in self.get_objects(canvas):
if obj.tag != 'TextBlock':
self.log.warn(obj.tag, 'elements in Canvas not supported')
continue
td = table.makeelement('td')
self.text_block.render_block(obj, td)
tr.append(td)
output_parent.append(table)
def image_page(self, input_node, block, output_parent):
div = etree.Element('div')
div.set('id', input_node.get('objid', 'scuzzy'))
div.set('class', 'image_page')
width = self.styles.to_num(block.get("xsize", None))
height = self.styles.to_num(block.get("ysize", None))
img = div.makeelement('img')
if width is not None:
img.set('width', str(int(width)))
if height is not None:
img.set('height', str(int(height)))
ref = block.get('refstream', None)
if ref is not None:
imstr = self.doc.xpath('//ImageStream[@objid="%s"]'%ref)
if imstr:
src = imstr[0].get('file', None)
if src:
img.set('src', src)
div.append(img)
output_parent.append(div)
def get_objects(self, node):
for x in node.xpath('descendant::PutObj[@refobj and @x1 and @y1]'):
objs = node.xpath('//*[@objid="%s"]'%x.get('refobj'))
x, y = map(self.styles.to_num, (x.get('x1'), x.get('y1')))
if objs and x is not None and y is not None:
yield objs[0], int(x), int(y)
class MediaType(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
name = input_node.get('file', None)
typ = guess_type(name)[0]
if not typ:
typ = 'application/octet-stream'
output_parent.text = typ
class ImageBlock(etree.XSLTExtension):
def __init__(self, canvas):
etree.XSLTExtension.__init__(self)
self.canvas = canvas
def execute(self, context, self_node, input_node, output_parent):
self.canvas.image_page(input_node, input_node, output_parent)
class RuledLine(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
hr = etree.Element('hr')
output_parent.append(hr)
class TextBlock(etree.XSLTExtension):
def __init__(self, styles, char_button_map, plot_map, log):
etree.XSLTExtension.__init__(self)
self.styles = styles
self.log = log
self.char_button_map = char_button_map
self.plot_map = plot_map
def execute(self, context, self_node, input_node, output_parent):
input_node = deepcopy(input_node)
div = etree.Element('div')
self.render_block(input_node, div)
output_parent.append(div)
def render_block(self, node, root):
ts = node.get('textstyle', None)
classes = []
bs = node.get('blockstyle')
if bs in self.styles.block_style_map:
classes.append('bs%d'%self.styles.block_style_map[bs])
if ts in self.styles.text_style_map:
classes.append('ts%d'%self.styles.text_style_map[ts])
if classes:
root.set('class', ' '.join(classes))
objid = node.get('objid', None)
if objid:
root.set('id', objid)
root.text = node.text
self.root = root
self.parent = root
self.add_text_to = (self.parent, 'text')
self.fix_deep_nesting(node)
for child in node:
self.process_child(child)
def fix_deep_nesting(self, node):
deepest = 1
def depth(node):
parent = node.getparent()
ans = 1
while parent is not None:
ans += 1
parent = parent.getparent()
return ans
for span in node.xpath('descendant::Span'):
d = depth(span)
if d > deepest:
deepest = d
if d > 500:
break
if deepest < 500:
return
self.log.warn('Found deeply nested spans. Flattening.')
#with open('/t/before.xml', 'wb') as f:
# f.write(etree.tostring(node, method='xml'))
spans = [(depth(span), span) for span in node.xpath('descendant::Span')]
spans.sort(key=operator.itemgetter(0), reverse=True)
for depth, span in spans:
if depth < 3:
continue
p = span.getparent()
gp = p.getparent()
idx = p.index(span)
pidx = gp.index(p)
children = list(p)[idx:]
t = children[-1].tail
t = t if t else ''
children[-1].tail = t + (p.tail if p.tail else '')
p.tail = ''
pattrib = dict(**p.attrib) if p.tag == 'Span' else {}
for child in children:
p.remove(child)
if pattrib and child.tag == "Span":
attrib = copy(pattrib)
attrib.update(child.attrib)
child.attrib.update(attrib)
for child in reversed(children):
gp.insert(pidx+1, child)
#with open('/t/after.xml', 'wb') as f:
# f.write(etree.tostring(node, method='xml'))
def add_text(self, text):
if text:
if getattr(self.add_text_to[0], self.add_text_to[1]) is None:
setattr(self.add_text_to[0], self.add_text_to[1], '')
setattr(self.add_text_to[0], self.add_text_to[1],
getattr(self.add_text_to[0], self.add_text_to[1])+ text)
def process_container(self, child, tgt):
idx = self.styles.get_text_styles(child)
if idx is not None:
tgt.set('class', 'ts%d'%idx)
self.parent.append(tgt)
orig_parent = self.parent
self.parent = tgt
self.add_text_to = (self.parent, 'text')
self.add_text(child.text)
for gchild in child:
self.process_child(gchild)
self.parent = orig_parent
self.add_text_to = (tgt, 'tail')
self.add_text(child.tail)
def process_child(self, child):
if child.tag == 'CR':
if self.parent == self.root or self.parent.tag == 'p':
self.parent = self.root.makeelement('p')
self.root.append(self.parent)
self.add_text_to = (self.parent, 'text')
else:
br = self.parent.makeelement('br')
self.parent.append(br)
self.add_text_to = (br, 'tail')
self.add_text(child.tail)
elif child.tag in ('P', 'Span', 'EmpLine', 'NoBR'):
span = self.root.makeelement('span')
if child.tag == 'EmpLine':
td = 'underline' if child.get('emplineposition', 'before') == 'before' else 'overline'
span.set('style', 'text-decoration: '+td)
self.process_container(child, span)
elif child.tag == 'Sup':
sup = self.root.makeelement('sup')
self.process_container(child, sup)
elif child.tag == 'Sub':
sub = self.root.makeelement('sub')
self.process_container(child, sub)
elif child.tag == 'Italic':
sup = self.root.makeelement('i')
self.process_container(child, sup)
elif child.tag == 'CharButton':
a = self.root.makeelement('a')
oid = child.get('refobj', None)
if oid in self.char_button_map:
a.set('href', self.char_button_map[oid])
self.process_container(child, a)
elif child.tag == 'Plot':
xsize = self.styles.to_num(child.get('xsize', None), 166./720)
ysize = self.styles.to_num(child.get('ysize', None), 166./720)
img = self.root.makeelement('img')
if xsize is not None:
img.set('width', str(int(xsize)))
if ysize is not None:
img.set('height', str(int(ysize)))
ro = child.get('refobj', None)
if ro in self.plot_map:
img.set('src', self.plot_map[ro])
self.parent.append(img)
self.add_text_to = (img, 'tail')
self.add_text(child.tail)
else:
self.log.warn('Unhandled Text element:', child.tag)
class Styles(etree.XSLTExtension):
def __init__(self):
etree.XSLTExtension.__init__(self)
self.text_styles, self.block_styles = [], []
self.text_style_map, self.block_style_map = {}, {}
self.CSS = textwrap.dedent('''
.image_page { text-align:center }
''')
def write(self, name='styles.css'):
def join(style):
ans = ['%s : %s;'%(k, v) for k, v in style.items()]
if ans:
ans[-1] = ans[-1][:-1]
return '\n\t'.join(ans)
with open(name, 'wb') as f:
f.write(self.CSS)
for (w, sel) in [(self.text_styles, 'ts'), (self.block_styles,
'bs')]:
for i, s in enumerate(w):
if not s:
continue
rsel = '.%s%d'%(sel, i)
s = join(s)
f.write(rsel + ' {\n\t' + s + '\n}\n\n')
def execute(self, context, self_node, input_node, output_parent):
if input_node.tag == 'TextStyle':
idx = self.get_text_styles(input_node)
if idx is not None:
self.text_style_map[input_node.get('objid')] = idx
else:
idx = self.get_block_styles(input_node)
self.block_style_map[input_node.get('objid')] = idx
def px_to_pt(self, px):
try:
px = float(px)
return px * 72./166.
except:
return None
def color(self, val):
try:
val = int(val, 16)
r, g, b, a = val & 0xFF, (val>>8)&0xFF, (val>>16)&0xFF, (val>>24)&0xFF
if a == 255:
return None
if a == 0:
return 'rgb(%d,%d,%d)'%(r,g,b)
return 'rgba(%d,%d,%d,%f)'%(r,g,b,1.-a/255.)
except:
return None
def get_block_styles(self, node):
ans = {}
sm = self.px_to_pt(node.get('sidemargin', None))
if sm is not None:
ans['margin-left'] = ans['margin-right'] = '%fpt'%sm
ts = self.px_to_pt(node.get('topskip', None))
if ts is not None:
ans['margin-top'] = '%fpt'%ts
fs = self.px_to_pt(node.get('footskip', None))
if fs is not None:
ans['margin-bottom'] = '%fpt'%fs
fw = self.px_to_pt(node.get('framewidth', None))
if fw is not None:
ans['border-width'] = '%fpt'%fw
ans['border-style'] = 'solid'
fc = self.color(node.get('framecolor', None))
if fc is not None:
ans['border-color'] = fc
bc = self.color(node.get('bgcolor', None))
if bc is not None:
ans['background-color'] = bc
if ans not in self.block_styles:
self.block_styles.append(ans)
return self.block_styles.index(ans)
def to_num(self, val, factor=1.):
try:
return float(val)*factor
except:
return None
def get_text_styles(self, node):
ans = {}
fs = self.to_num(node.get('fontsize', None), 0.1)
if fs is not None:
ans['font-size'] = '%fpt'%fs
fw = self.to_num(node.get('fontweight', None))
if fw is not None:
ans['font-weight'] = ('bold' if fw >= 700 else 'normal')
#fn = getattr(obj, 'fontfacename', None)
#if fn is not None:
# fn = cls.FONT_MAP[fn]
# item('font-family: %s;'%fn)
fg = self.color(node.get('textcolor', None))
if fg is not None:
ans['color'] = fg
bg = self.color(node.get('textbgcolor', None))
if bg is not None:
ans['background-color'] = bg
al = node.get('align', None)
if al is not None:
all = dict(head='left', center='center', foot='right')
ans['text-align'] = all.get(al, 'left')
#lh = self.to_num(node.get('linespace', None), 0.1)
#if lh is not None:
# ans['line-height'] = '%fpt'%lh
pi = self.to_num(node.get('parindent', None), 0.1)
if pi is not None:
ans['text-indent'] = '%fpt'%pi
if not ans:
return None
if ans not in self.text_styles:
self.text_styles.append(ans)
return self.text_styles.index(ans)
| gpl-3.0 |
EliasTouil/simpleBlog | simpleBlog/Lib/sre_compile.py | 123 | 19817 | # -*- coding: utf-8 -*-
#
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import _sre, sys
import sre_parse
from sre_constants import *
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFFL
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
# Sets of lowercase characters which have the same uppercase.
_equivalences = (
# LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I
(0x69, 0x131), # iı
# LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S
(0x73, 0x17f), # sſ
# MICRO SIGN, GREEK SMALL LETTER MU
(0xb5, 0x3bc), # µμ
# COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI
(0x345, 0x3b9, 0x1fbe), # \u0345ιι
# GREEK SMALL LETTER BETA, GREEK BETA SYMBOL
(0x3b2, 0x3d0), # βϐ
# GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL
(0x3b5, 0x3f5), # εϵ
# GREEK SMALL LETTER THETA, GREEK THETA SYMBOL
(0x3b8, 0x3d1), # θϑ
# GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL
(0x3ba, 0x3f0), # κϰ
# GREEK SMALL LETTER PI, GREEK PI SYMBOL
(0x3c0, 0x3d6), # πϖ
# GREEK SMALL LETTER RHO, GREEK RHO SYMBOL
(0x3c1, 0x3f1), # ρϱ
# GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA
(0x3c2, 0x3c3), # ςσ
# GREEK SMALL LETTER PHI, GREEK PHI SYMBOL
(0x3c6, 0x3d5), # φϕ
# LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE
(0x1e61, 0x1e9b), # ṡẛ
)
# Maps the lowercase code to lowercase codes which have the same uppercase.
_ignorecase_fixes = {i: tuple(j for j in t if i != j)
for t in _equivalences for i in t}
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
if (flags & SRE_FLAG_IGNORECASE and
not (flags & SRE_FLAG_LOCALE) and
flags & SRE_FLAG_UNICODE):
fixes = _ignorecase_fixes
else:
fixes = None
for op, av in pattern:
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
lo = _sre.getlower(av, flags)
if fixes and lo in fixes:
emit(OPCODES[IN_IGNORE])
skip = _len(code); emit(0)
if op is NOT_LITERAL:
emit(OPCODES[NEGATE])
for k in (lo,) + fixes[lo]:
emit(OPCODES[LITERAL])
emit(k)
emit(OPCODES[FAILURE])
code[skip] = _len(code) - skip
else:
emit(OPCODES[OP_IGNORE[op]])
emit(lo)
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = None
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup, fixes)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error, "internal: unsupported template operator"
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error, "look-behind requires fixed-width pattern"
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError, ("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None, fixes=None):
# compile charset subprogram
emit = code.append
for op, av in _optimize_charset(charset, fixup, fixes,
flags & SRE_FLAG_UNICODE):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(av)
elif op is RANGE:
emit(av[0])
emit(av[1])
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error, "internal: unsupported set operator"
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup, fixes, isunicode):
# internal: optimize character set
out = []
tail = []
charmap = bytearray(256)
for op, av in charset:
while True:
try:
if op is LITERAL:
if fixup:
i = fixup(av)
charmap[i] = 1
if fixes and i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
charmap[av] = 1
elif op is RANGE:
r = range(av[0], av[1]+1)
if fixup:
r = map(fixup, r)
if fixup and fixes:
for i in r:
charmap[i] = 1
if i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
for i in r:
charmap[i] = 1
elif op is NEGATE:
out.append((op, av))
else:
tail.append((op, av))
except IndexError:
if len(charmap) == 256:
# character set contains non-UCS1 character codes
charmap += b'\0' * 0xff00
continue
# character set contains non-BMP character codes
if fixup and isunicode and op is RANGE:
lo, hi = av
ranges = [av]
# There are only two ranges of cased astral characters:
# 10400-1044F (Deseret) and 118A0-118DF (Warang Citi).
_fixup_range(max(0x10000, lo), min(0x11fff, hi),
ranges, fixup)
for lo, hi in ranges:
if lo == hi:
tail.append((LITERAL, hi))
else:
tail.append((RANGE, (lo, hi)))
else:
tail.append((op, av))
break
# compress character map
runs = []
q = 0
while True:
p = charmap.find(b'\1', q)
if p < 0:
break
if len(runs) >= 2:
runs = None
break
q = charmap.find(b'\0', p)
if q < 0:
runs.append((p, len(charmap)))
break
runs.append((p, q))
if runs is not None:
# use literal/range
for p, q in runs:
if q - p == 1:
out.append((LITERAL, p))
else:
out.append((RANGE, (p, q - 1)))
out += tail
# if the case was changed or new representation is more compact
if fixup or len(out) < len(charset):
return out
# else original character set is good enough
return charset
# use bitmap
if len(charmap) == 256:
data = _mk_bitmap(charmap)
out.append((CHARSET, data))
out += tail
return out
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 32-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (64 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of 256-bit chunks (8 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of Unicode has not yet been developed.
charmap = bytes(charmap) # should be hashable
comps = {}
mapping = bytearray(256)
block = 0
data = bytearray()
for i in range(0, 65536, 256):
chunk = charmap[i: i + 256]
if chunk in comps:
mapping[i // 256] = comps[chunk]
else:
mapping[i // 256] = comps[chunk] = block
block += 1
data += chunk
data = _mk_bitmap(data)
data[0:0] = [block] + _bytes_to_codes(mapping)
out.append((BIGCHARSET, data))
out += tail
return out
def _fixup_range(lo, hi, ranges, fixup):
for i in map(fixup, range(lo, hi+1)):
for k, (lo, hi) in enumerate(ranges):
if i < lo:
if l == lo - 1:
ranges[k] = (i, hi)
else:
ranges.insert(k, (i, i))
break
elif i > hi:
if i == hi + 1:
ranges[k] = (lo, i)
break
else:
break
else:
ranges.append((i, i))
_CODEBITS = _sre.CODESIZE * 8
_BITS_TRANS = b'0' + b'1' * 255
def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int):
s = bytes(bits).translate(_BITS_TRANS)[::-1]
return [_int(s[i - _CODEBITS: i], 2)
for i in range(len(s), 0, -_CODEBITS)]
def _bytes_to_codes(b):
# Convert block indices to word array
import array
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
a = array.array(code, bytes(b))
assert a.itemsize == _sre.CODESIZE
assert len(a) * a.itemsize == len(b)
return a.tolist()
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in xrange(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
try:
unicode
except NameError:
STRING_TYPES = (type(""),)
else:
STRING_TYPES = (type(""), type(unicode("")))
def isstring(obj):
for tp in STRING_TYPES:
if isinstance(obj, tp):
return 1
return 0
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| gpl-3.0 |
EmadMokhtar/halaqat | students/forms.py | 1 | 6022 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Div, Layout
from django import forms
from django.utils.translation import ugettext_lazy as _
from students.models import Student
class StudentForm(forms.ModelForm):
dob = forms.DateField(widget=forms.DateInput(
attrs={'class': 'datepicker'}),
label=_('DOB'))
address = forms.CharField(widget=forms.Textarea(),
label=_('Address'))
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div(
HTML(_('<h3 class="panel-title">Basic Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('civil_id', css_class='col-md-6'),
Div('dob', css_class='col-md-6'),
css_class='row'
),
Div(
Div('gender', css_class='col-md-6'),
Div('nationality', css_class='col-md-6'),
css_class='row'
),
Div(
Div('school', css_class='col-md-6'),
Div('grade', css_class='col-md-6'),
css_class='row'
),
css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Contact Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('mobile_number', css_class='col-md-6'),
Div('home_number', css_class='col-md-6'),
css_class='row'),
Div(
Div('parent_number', css_class='col-md-6'),
Div('parent_email', css_class='col-md-6'),
css_class='row'),
Div(
Div('address', css_class='col-md-12'),
css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Halaqat Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('halaqat_class', css_class='col-md-6'),
Div('status', css_class='col-md-6'), css_class='row'),
Div(
Div('chapter_memorized', css_class='col-md-6'),
Div('chapter_memorized_with_center', css_class='col-md-6'),
css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default',
),
)
class Meta:
model = Student
fields = ('dob', 'gender', 'civil_id', 'mobile_number', 'home_number',
'parent_number', 'grade', 'school', 'nationality', 'address',
'parent_email', 'halaqat_class', 'chapter_memorized',
'chapter_memorized_with_center', 'status')
class StudentChangeForm(forms.ModelForm):
dob = forms.DateField(widget=forms.DateInput(
attrs={'class': 'datepicker'}),
label=_('DOB'))
address = forms.CharField(widget=forms.Textarea(),
label=_('Address'))
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div(
HTML(_('<h3 class="panel-title">Basic Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('civil_id', css_class='col-md-6'),
Div('dob', css_class='col-md-6'), css_class='row'
),
Div(
Div('gender', css_class='col-md-6'),
Div('nationality', css_class='col-md-6'), css_class='row'
),
Div(
Div('school', css_class='col-md-6'),
Div('grade', css_class='col-md-6'), css_class='row'
), css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Contact Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('mobile_number', css_class='col-md-6'),
Div('home_number', css_class='col-md-6'), css_class='row'),
Div(
Div('parent_number', css_class='col-md-6'),
Div('parent_email', css_class='col-md-6'),
css_class='row'),
Div(Div('address', css_class='col-md-12'), css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Halaqat Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('halaqat_class', css_class='col-md-6'),
Div('status', css_class='col-md-6'), css_class='row'),
Div(
Div('chapter_memorized', css_class='col-md-6'),
Div('chapter_memorized_with_center', css_class='col-md-6'),
css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default',
),
)
class Meta:
model = Student
fields = ('dob', 'gender', 'civil_id', 'mobile_number', 'home_number',
'parent_number', 'grade', 'school', 'nationality', 'address',
'parent_email', 'halaqat_class', 'chapter_memorized',
'chapter_memorized_with_center', 'status')
| mit |
punchagan/zulip | zerver/views/zephyr.py | 2 | 2717 | import base64
import logging
import re
import shlex
import subprocess
from typing import Optional
import orjson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from zerver.decorator import authenticated_json_view
from zerver.lib.ccache import make_ccache
from zerver.lib.pysa import mark_sanitized
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.users import get_api_key
from zerver.models import UserProfile
# Hack for mit.edu users whose Kerberos usernames don't match what they zephyr
# as. The key is for Kerberos and the value is for zephyr.
kerberos_alter_egos = {
"golem": "ctl",
}
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(
request: HttpRequest, user_profile: UserProfile, cred: Optional[str] = REQ(default=None)
) -> HttpResponse:
global kerberos_alter_egos
if cred is None:
return json_error(_("Could not find Kerberos credential"))
if not user_profile.realm.webathena_enabled:
return json_error(_("Webathena login not enabled"))
try:
parsed_cred = orjson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user in kerberos_alter_egos:
user = kerberos_alter_egos[user]
assert user == user_profile.email.split("@")[0]
# Limit characters in usernames to valid MIT usernames
# This is important for security since DNS is not secure.
assert re.match(r"^[a-z0-9_.-]+$", user) is not None
ccache = make_ccache(parsed_cred)
# 'user' has been verified to contain only benign characters that won't
# help with shell injection.
user = mark_sanitized(user)
# 'ccache' is only written to disk by the script and used as a kerberos
# credential cache file.
ccache = mark_sanitized(ccache)
except Exception:
return json_error(_("Invalid Kerberos cache"))
# TODO: Send these data via (say) RabbitMQ
try:
api_key = get_api_key(user_profile)
command = [
"/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache",
user,
api_key,
base64.b64encode(ccache).decode("utf-8"),
]
subprocess.check_call(
["ssh", settings.PERSONAL_ZMIRROR_SERVER, "--", " ".join(map(shlex.quote, command))]
)
except subprocess.CalledProcessError:
logging.exception("Error updating the user's ccache", stack_info=True)
return json_error(_("We were unable to set up mirroring for you"))
return json_success()
| apache-2.0 |
maurizi/otm-core | opentreemap/exporter/user.py | 12 | 4354 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import csv
import json
from datetime import datetime
from contextlib import contextmanager
from django.core.exceptions import ValidationError
from django.db.models import Q
from treemap.lib.dates import DATETIME_FORMAT
from treemap.models import User, Audit
from exporter.util import sanitize_unicode_record
def write_users(data_format, *args, **kwargs):
fn = _write_users_csv if data_format == 'csv' else _write_users_json
fn(*args, **kwargs)
def _write_users_csv(csv_obj, instance, min_join_ts=None, min_edit_ts=None):
field_names = ['username', 'email', 'first_name',
'last_name', 'email_hash',
'allow_email_contact', 'role', 'created', 'organization',
'last_edit_model', 'last_edit_model_id',
'last_edit_instance_id', 'last_edit_field',
'last_edit_previous_value', 'last_edit_current_value',
'last_edit_user_id', 'last_edit_action',
'last_edit_requires_auth', 'last_edit_ref',
'last_edit_created']
writer = csv.DictWriter(csv_obj, field_names)
writer.writeheader()
for user in _users_export(instance, min_join_ts, min_edit_ts):
writer.writerow(_user_as_dict(user, instance))
def _write_users_json(json_obj, instance, min_join_ts=None, min_edit_ts=None):
users = _users_export(instance, min_join_ts, min_edit_ts)
users_list = [_user_as_dict(user, instance) for user in users]
json_obj.write(json.dumps(users_list))
def _users_export(instance, min_join_ts, min_edit_ts):
users = User.objects.filter(instance=instance)\
.order_by('username')
if min_join_ts:
with _date_filter(min_join_ts, 'minJoinDate') as min_join_date:
iuser_ids = Audit.objects.filter(instance=instance)\
.filter(model='InstanceUser')\
.filter(created__gt=min_join_date)\
.distinct('model_id')\
.values_list('model_id', flat=True)
users = users.filter(instanceuser__in=iuser_ids)
if min_edit_ts:
with _date_filter(min_edit_ts, 'minEditDate') as min_edit_date:
user_ids = Audit.objects\
.filter(instance=instance)\
.filter(Q(created__gt=min_edit_date) |
Q(updated__gt=min_edit_date))\
.distinct('user')\
.values_list('user_id', flat=True)
users = users.filter(id__in=user_ids)
return users
def _user_as_dict(user, instance):
iuser = user.get_instance_user(instance)
role_name = None
if iuser:
role_name = iuser.role.name
email = ''
if user.allow_email_contact:
email = user.email
modeldata = {'username': user.username,
'organization': user.get_organization(),
'first_name': user.get_first_name(),
'last_name': user.get_last_name(),
'email': email,
'email_hash': user.email_hash,
'allow_email_contact': str(user.allow_email_contact),
'created': str(user.created),
'role': role_name}
last_edits = Audit.objects.filter(instance=instance,
user=user)\
.order_by('-updated')[:1]
if last_edits:
last_edit = last_edits[0]
modeldata.update({'last_edit_%s' % k: v
for (k, v) in last_edit.dict().iteritems()})
return sanitize_unicode_record(modeldata)
@contextmanager
def _date_filter(timestamp, filter_name):
try:
filter_date = datetime.strptime(timestamp, DATETIME_FORMAT)
except ValueError:
raise ValidationError("%(filter_name)s='%(ts)s' not a valid timestamp "
"of format: %(format)s"
% {"ts": timestamp,
"format": DATETIME_FORMAT,
"filter_name": filter_name})
yield filter_date
| agpl-3.0 |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.py | 229 | 1438 | from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.fcompiler import FCompiler
compilers = ['LaheyFCompiler']
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g', '--chk', '--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d, 'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='lahey')
compiler.customize()
print(compiler.get_version())
| artistic-2.0 |