repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
romain-li/edx-platform | common/lib/xmodule/xmodule/textannotation_module.py | 3 | 6835 | """Text annotation module"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class AnnotatableFields(object):
"""Fields for `TextModule` and `TextDescriptor`."""
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
default=_('Text Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='imagery:red,parallelism:blue',
)
source = String(
display_name=_("Source/Citation"),
help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"),
scope=Scope.settings,
default='None',
)
diacritics = String(
display_name=_("Diacritic Marks"),
help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"),
scope=Scope.settings,
default='',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class TextAnnotationModule(AnnotatableFields, XModule):
''' Text Annotation Module '''
js = {'coffee': [],
'js': []}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'textannotation'
def __init__(self, *args, **kwargs):
super(TextAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default_escaped,
'tag': self.instructor_tags,
'source': self.source,
'instructions_html': self.instructions,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'diacritic_marks': self.diacritics,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('textannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Text Annotation Descriptor '''
module_class = TextAnnotationModule
resources_dir = None
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
TextAnnotationDescriptor.annotation_storage_url,
TextAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
| agpl-3.0 |
mcanthony/node-gyp | gyp/pylib/gyp/input.py | 292 | 114315 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit |
dhhagan/PAM | Python/PAM.py | 1 | 5037 | #PAM.py
import re
import glob, os, time
from numpy import *
from pylab import *
def analyzeFile(fileName,delim):
cols = {}
indexToName = {}
lineNum = 0
goodLines = 0
shortLines = 0
FILE = open(fileName,'r')
for line in FILE:
line = line.strip()
if lineNum < 1:
lineNum += 1
continue
elif lineNum == 1:
headings = line.split(delim)
i = 0
for heading in headings:
heading = heading.strip()
cols[heading] = []
indexToName[i] = heading
i += 1
lineNum += 1
lineLength = len(cols)
else:
data = line.split(delim)
if len(data) == lineLength:
goodLines += 1
i = 0
for point in data:
point = point.strip()
cols[indexToName[i]] += [point]
i += 1
lineNum += 1
else:
shortLines += 1
lineNum += 1
continue
FILE.close
return cols, indexToName, lineNum, shortLines
def numericalSort(value):
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def popDate(fileName):
run = fileName.split('.')[0]
runNo = run.split('_')[-1]
return runNo
def getFile(date,regex):#Works
files = []
files = sorted((glob.glob('*'+regex+'*')),key=numericalSort,reverse=False)
if date.lower() == 'last':
files = files.pop()
else:
files = [item for item in files if re.search(date,item)]
return files
def plotConc(data,ozone,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
#time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
legend1 = []
legend2 = []
fig = plt.figure('Gas Concentration Readings for East St.Louis')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key, value in ozone.items():
ax2.plot_date(x,ozone[key],'-.',xdate=True)
legend2.append(key)
title('Gas Concentrations for East St. Louis', fontsize = 12)
ax1.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
ax2.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
xlabel(r"$Time \, Stamp$", fontsize = 12)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
grid(True)
return
def plotBankRelays(data,relays,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
#x1 = [date.strftime("%m-%d %H:%M:%S") for date in time]
legend1 = []
legend2 = []
#plt.locator_params(axis='x', nbins=4)
fig = plt.figure('VAPS Thermocouple Readings: Chart 2')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key,value in relays.items():
ax2.plot_date(x,relays[key],'--',xdate=True)
legend2.append(key)
title('VAPS Temperatures: Chart 2', fontsize = 12)
ax1.set_ylabel(r'$Temperature(^oC)$', fontsize = 12)
ax2.set_ylabel(r'$Relay \, States$', fontsize = 12)
ax1.set_xlabel(r"$Time \, Stamp$", fontsize = 12)
#print [num2date(item) for item in ax1.get_xticks()]
#ax1.set_xticks(x)
#ax1.set_xticklabels([date.strftime("%m-%d %H:%M %p") for date in time])
#ax1.legend(bbox_to_anchor=(0.,1.02,1.,.102),loc=3,ncol=2,mode="expand",borderaxespad=0.)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
#ax1.xaxis.set_major_formatter(FormatStrFormatter(date.strftime("%m-%d %H:%M:%S")))
plt.subplots_adjust(bottom=0.15)
grid(True)
return
def goodFiles(files,goodHeaders,delim): # Good
irregFiles = 0
goodFiles = []
for file in files:
lineNo = 0
falseCount = 0
FILE = open(file,'r')
for line in FILE:
line = line.strip()
if lineNo == 5:
# Check all the headings to make sure the file is good
head = line.split(delim)
for item in head:
if item in goodHeaders:
continue
else:
falseCount += 1
if falseCount == 0:
goodFiles.append(file)
else:
irregFiles += 1
lineNo += 1
else:
lineNo += 1
continue
FILE.close
return goodFiles, irregFiles
| mit |
pkainz/pylearn2 | pylearn2/scripts/datasets/make_cifar100_patches_8x8.py | 41 | 2282 | """
This script makes a dataset of two million approximately whitened patches,
extracted at random uniformly from the CIFAR-100 train dataset.
This script is intended to reproduce the preprocessing used by Adam Coates
et. al. in their work from the first half of 2011 on the CIFAR-10 and
STL-10 datasets.
"""
from __future__ import print_function
from pylearn2.utils import serial
from pylearn2.datasets import preprocessing
from pylearn2.datasets.cifar100 import CIFAR100
from pylearn2.utils import string
data_dir = string.preprocess('${PYLEARN2_DATA_PATH}')
print('Loading CIFAR-100 train dataset...')
data = CIFAR100(which_set='train')
print("Preparing output directory...")
patch_dir = data_dir + '/cifar100/cifar100_patches_8x8'
serial.mkdir(patch_dir)
README = open(patch_dir + '/README', 'w')
README.write("""
The .pkl files in this directory may be opened in python using
cPickle, pickle, or pylearn2.serial.load.
data.pkl contains a pylearn2 Dataset object defining an unlabeled
dataset of 2 million 8x8 approximately whitened, contrast-normalized
patches drawn uniformly at random from the CIFAR-100 train set.
preprocessor.pkl contains a pylearn2 Pipeline object that was used
to extract the patches and approximately whiten / contrast normalize
them. This object is necessary when extracting features for
supervised learning or test set classification, because the
extracted features must be computed using inputs that have been
whitened with the ZCA matrix learned and stored by this Pipeline.
They were created with the pylearn2 script make_cifar100_patches.py.
All other files in this directory, including this README, were
created by the same script and are necessary for the other files
to function correctly.
""")
README.close()
print("Preprocessing the data...")
pipeline = preprocessing.Pipeline()
pipeline.items.append(
preprocessing.ExtractPatches(patch_shape=(8, 8), num_patches=2*1000*1000))
pipeline.items.append(
preprocessing.GlobalContrastNormalization(sqrt_bias=10., use_std=True))
pipeline.items.append(preprocessing.ZCA())
data.apply_preprocessor(preprocessor=pipeline, can_fit=True)
data.use_design_loc(patch_dir + '/data.npy')
serial.save(patch_dir + '/data.pkl', data)
serial.save(patch_dir + '/preprocessor.pkl', pipeline)
| bsd-3-clause |
MRCSDZ/subtitols | includes/fckeditor/editor/filemanager/browser/default/connectors/py/connector.py | 11 | 22691 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python.
Tested With:
Standard:
Python 2.3.3
Zope:
Zope Version: (Zope 2.8.1-final, python 2.3.5, linux2)
Python Version: 2.3.5 (#4, Mar 10 2005, 01:40:25)
[GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)]
System Platform: linux2
"""
"""
Author Notes (04 December 2005):
This module has gone through quite a few phases of change. Obviously,
I am only supporting that part of the code that I use. Initially
I had the upload directory as a part of zope (ie. uploading files
directly into Zope), before realising that there were too many
complex intricacies within Zope to deal with. Zope is one ugly piece
of code. So I decided to complement Zope by an Apache server (which
I had running anyway, and doing nothing). So I mapped all uploads
from an arbitrary server directory to an arbitrary web directory.
All the FCKeditor uploading occurred this way, and I didn't have to
stuff around with fiddling with Zope objects and the like (which are
terribly complex and something you don't want to do - trust me).
Maybe a Zope expert can touch up the Zope components. In the end,
I had FCKeditor loaded in Zope (probably a bad idea as well), and
I replaced the connector.py with an alias to a server module.
Right now, all Zope components will simple remain as is because
I've had enough of Zope.
See notes right at the end of this file for how I aliased out of Zope.
Anyway, most of you probably wont use Zope, so things are pretty
simple in that regard.
Typically, SERVER_DIR is the root of WEB_DIR (not necessarily).
Most definitely, SERVER_USERFILES_DIR points to WEB_USERFILES_DIR.
"""
import cgi
import re
import os
import string
"""
escape
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
def escape(text, replace=string.replace):
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
"""
getFCKeditorConnector
Creates a new instance of an FCKeditorConnector, and runs it
"""
def getFCKeditorConnector(context=None):
# Called from Zope. Passes the context through
connector = FCKeditorConnector(context=context)
return connector.run()
"""
FCKeditorRequest
A wrapper around the request object
Can handle normal CGI request, or a Zope request
Extend as required
"""
class FCKeditorRequest(object):
def __init__(self, context=None):
if (context is not None):
r = context.REQUEST
else:
r = cgi.FieldStorage()
self.context = context
self.request = r
def isZope(self):
if (self.context is not None):
return True
return False
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
value = None
if (self.isZope()):
value = self.request.get(key, default)
else:
if key in self.request.keys():
value = self.request[key].value
else:
value = default
return value
"""
FCKeditorConnector
The connector class
"""
class FCKeditorConnector(object):
# Configuration for FCKEditor
# can point to another server here, if linked correctly
#WEB_HOST = "http://127.0.0.1/"
WEB_HOST = ""
SERVER_DIR = "/var/www/html/"
WEB_USERFILES_FOLDER = WEB_HOST + "upload/"
SERVER_USERFILES_FOLDER = SERVER_DIR + "upload/"
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
# Class Attributes
parentFolderRe = re.compile("[\/][^\/]+[\/]?$")
"""
Constructor
"""
def __init__(self, context=None):
# The given root path will NOT be shown to the user
# Only the userFilesPath will be shown
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context=context)
self.rootPath = self.SERVER_DIR
self.userFilesFolder = self.SERVER_USERFILES_FOLDER
self.webUserFilesFolder = self.WEB_USERFILES_FOLDER
# Enables / Disables the connector
self.enabled = False # Set to True to enable this connector
# These are instance variables
self.zopeRootContext = None
self.zopeUploadContext = None
# Copied from php module =)
self.allowedExtensions = {
"File": None,
"Image": None,
"Flash": None,
"Media": None
}
self.deniedExtensions = {
"File": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ],
"Image": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ],
"Flash": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ],
"Media": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ]
}
"""
Zope specific functions
"""
def isZope(self):
# The context object is the zope object
if (self.context is not None):
return True
return False
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
"""
Generic manipulation functions
"""
def getUserFilesFolder(self):
return self.userFilesFolder
def getWebUserFilesFolder(self):
return self.webUserFilesFolder
def getAllowedExtensions(self, resourceType):
return self.allowedExtensions[resourceType]
def getDeniedExtensions(self, resourceType):
return self.deniedExtensions[resourceType]
def removeFromStart(self, string, char):
return string.lstrip(char)
def removeFromEnd(self, string, char):
return string.rstrip(char)
def convertToXmlAttribute(self, value):
if (value is None):
value = ""
return escape(value)
def convertToPath(self, path):
if (path[-1] <> "/"):
return path + "/"
else:
return path
def getUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getUserFilesFolder(),
resourceType,
path
)
return url
def getWebUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getWebUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getWebUserFilesFolder(),
resourceType,
path
)
return url
def removeExtension(self, fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(self, fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def getParentFolder(self, folderPath):
parentFolderPath = self.parentFolderRe.sub('', folderPath)
return parentFolderPath
"""
serverMapFolder
Purpose: works out the folder map on the server
"""
def serverMapFolder(self, resourceType, folderPath):
# Get the resource type directory
resourceTypeFolder = "%s%s/" % (
self.getUserFilesFolder(),
resourceType
)
# Ensure that the directory exists
self.createServerFolder(resourceTypeFolder)
# Return the resource type directory combined with the
# required path
return "%s%s" % (
resourceTypeFolder,
self.removeFromStart(folderPath, '/')
)
"""
createServerFolder
Purpose: physically creates a folder on the server
"""
def createServerFolder(self, folderPath):
# Check if the parent exists
parentFolderPath = self.getParentFolder(folderPath)
if not(os.path.exists(parentFolderPath)):
errorMsg = self.createServerFolder(parentFolderPath)
if errorMsg is not None:
return errorMsg
# Check if this exists
if not(os.path.exists(folderPath)):
os.mkdir(folderPath)
os.chmod(folderPath, 0755)
errorMsg = None
else:
if os.path.isdir(folderPath):
errorMsg = None
else:
raise "createServerFolder: Non-folder of same name already exists"
return errorMsg
"""
getRootPath
Purpose: returns the root path on the server
"""
def getRootPath(self):
return self.rootPath
"""
setXmlHeaders
Purpose: to prepare the headers for the xml to return
"""
def setXmlHeaders(self):
#now = self.context.BS_get_now()
#yesterday = now - 1
self.setHeader("Content-Type", "text/xml")
#self.setHeader("Expires", yesterday)
#self.setHeader("Last-Modified", now)
#self.setHeader("Cache-Control", "no-store, no-cache, must-revalidate")
self.printHeaders()
return
def setHeader(self, key, value):
if (self.isZope()):
self.context.REQUEST.RESPONSE.setHeader(key, value)
else:
print "%s: %s" % (key, value)
return
def printHeaders(self):
# For non-Zope requests, we need to print an empty line
# to denote the end of headers
if (not(self.isZope())):
print ""
"""
createXmlFooter
Purpose: returns the xml header
"""
def createXmlHeader(self, command, resourceType, currentFolder):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
self.convertToXmlAttribute(currentFolder),
self.convertToXmlAttribute(
self.getWebUrlFromPath(
resourceType,
currentFolder
)
),
)
return s
"""
createXmlFooter
Purpose: returns the xml footer
"""
def createXmlFooter(self):
s = """</Connector>"""
return s
"""
sendError
Purpose: in the event of an error, return an xml based error
"""
def sendError(self, number, text):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
s += """<Connector>"""
s += """<Error number="%s" text="%s" />""" % (number, text)
s += """</Connector>"""
return s
"""
getFolders
Purpose: command to recieve a list of folders
"""
def getFolders(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFolders(resourceType, currentFolder)
else:
return self.getNonZopeFolders(resourceType, currentFolder)
def getZopeFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getNonZopeFolders(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders node
s = ""
s += """<Folders>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
# Close the folders node
s += """</Folders>"""
return s
"""
getFoldersAndFiles
Purpose: command to recieve a list of folders and files
"""
def getFoldersAndFiles(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFoldersAndFiles(resourceType, currentFolder)
else:
return self.getNonZopeFoldersAndFiles(resourceType, currentFolder)
def getNonZopeFoldersAndFiles(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
# Return it
s = folders + files
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
"""
createFolder
Purpose: command to create a new folder
"""
def createFolder(self, resourceType, currentFolder):
if (self.isZope()):
return self.createZopeFolder(resourceType, currentFolder)
else:
return self.createNonZopeFolder(resourceType, currentFolder)
def createZopeFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
def createNonZopeFolder(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
try:
newFolderPath = currentFolderPath + newFolder
errorMsg = self.createServerFolder(newFolderPath)
if (errorMsg is not None):
errorNo = 110
except:
errorNo = 103
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
"""
getFileName
Purpose: helper function to extrapolate the filename
"""
def getFileName(self, filename):
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
"""
fileUpload
Purpose: command to upload files to server
"""
def fileUpload(self, resourceType, currentFolder):
if (self.isZope()):
return self.zopeFileUpload(resourceType, currentFolder)
else:
return self.nonZopeFileUpload(resourceType, currentFolder)
def zopeFileUpload(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
self.zopeFileUpload(resourceType, currentFolder, count)
return
def nonZopeFileUpload(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileNameOnly = self.removeExtension(newFileName)
newFileExtension = self.getExtension(newFileName).lower()
allowedExtensions = self.getAllowedExtensions(resourceType)
deniedExtensions = self.getDeniedExtensions(resourceType)
if (allowedExtensions is not None):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions is not None):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
if (self.isZope()):
# Upload into zope
self.zopeFileUpload(resourceType, currentFolder)
else:
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
i = 0
while (True):
newFilePath = "%s%s" % (
currentFolderPath,
newFileName
)
if os.path.exists(newFilePath):
i += 1
newFilePath = "%s%s(%s).%s" % (
currentFolderPath,
newFileNameOnly,
i,
newFileExtension
)
errorNo = 201
break
else:
fileHandle = open(newFilePath,'w')
linecount = 0
while (1):
#line = newFile.file.readline()
line = newFile.readline()
if not line: break
fileHandle.write("%s" % line)
linecount += 1
os.chmod(newFilePath, 0777)
break
else:
newFileName = "Extension not allowed"
errorNo = 203
else:
newFileName = "No File"
errorNo = 202
string = """
<script type="text/javascript">
window.parent.frames["frmUpload"].OnUploadCompleted(%s,"%s");
</script>
""" % (
errorNo,
newFileName.replace('"',"'")
)
return string
def run(self):
s = ""
try:
# Check if this is disabled
if not(self.enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations and try again")
# Make sure we have valid inputs
if not(
(self.request.has_key("Command")) and
(self.request.has_key("Type")) and
(self.request.has_key("CurrentFolder"))
):
return
# Get command
command = self.request.get("Command", None)
# Get resource type
resourceType = self.request.get("Type", None)
# folder syntax must start and end with "/"
currentFolder = self.request.get("CurrentFolder", None)
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Check for invalid paths
if (".." in currentFolder):
return self.sendError(102, "")
# File upload doesn't have to return XML, so intercept
# her:e
if (command == "FileUpload"):
return self.fileUpload(resourceType, currentFolder)
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder)
# Execute the command
if (command == "GetFolders"):
f = self.getFolders
elif (command == "GetFoldersAndFiles"):
f = self.getFoldersAndFiles
elif (command == "CreateFolder"):
f = self.createFolder
else:
f = None
if (f is not None):
s += f(resourceType, currentFolder)
s += self.createXmlFooter()
except Exception, e:
s = "ERROR: %s" % e
return s
# Running from command line
if __name__ == '__main__':
# To test the output, uncomment the standard headers
#print "Content-Type: text/html"
#print ""
print getFCKeditorConnector()
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.connector as connector
return connector.getFCKeditorConnector(context=context).run()
"""
| gpl-3.0 |
sguazt/prometheus | tools/giws/datatypes/stringDataGiws.py | 1 | 10567 | #!/usr/bin/python -u
# Copyright or Copr. INRIA/Scilab - Sylvestre LEDRU
#
# Sylvestre LEDRU - <[email protected]> <[email protected]>
#
# This software is a computer program whose purpose is to generate C++ wrapper
# for Java objects/methods.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
# For more information, see the file COPYING
from datatypes.dataGiws import dataGiws
from configGiws import configGiws
from JNIFrameWork import JNIFrameWork
class stringDataGiws(dataGiws):
nativeType="char *"
callMethod="CallObjectMethod"
callStaticMethod="CallStaticObjectMethod"
temporaryVariableName="myStringBuffer"
def getTypeSignature(self):
return "Ljava/lang/String;"
def getJavaTypeSyntax(self):
if self.isArray():
return "jobjectArray"
else:
return "jstring"
def getRealJavaType(self):
return "java.lang.String"
def getDescription(self):
return "Java String"
def getNativeType(self, ForceNotArray=False, UseConst=False):
if self.isArray():
if UseConst:
pointer = " const*"
else:
pointer = "*"
return ("char" + pointer) + pointer * self.getDimensionArray()
else:
if UseConst:
pointer = " const*"
else:
pointer = "*"
return "char" + pointer
def __errorMemoryString(self, detachThread):
# Management of the error when not enought memory to create the string
if configGiws().getThrowsException():
errorMgntMemBis="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName())
else:
errorMgntMemBis="""std::cerr << "Could not convert C string to Java UTF string, memory full." << std::endl;%s
exit(EXIT_FAILURE);"""%(detachThread)
return errorMgntMemBis
def specificPreProcessing(self, parameter, detachThread):
""" Overrides the preprocessing of the array """
name=parameter.getName()
# Management of the error when not enought memory to create the string
if configGiws().getThrowsException():
errorMgntMem="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName())
else:
errorMgntMem="""std::cerr << "Could not allocate Java string array, memory full." << std::endl;%s
exit(EXIT_FAILURE);"""%(detachThread)
errorMgntMemBis = self.__errorMemoryString(detachThread)
if self.isArray():
if self.getDimensionArray() == 1:
return """
// create java array of strings.
jobjectArray %s_ = curEnv->NewObjectArray( %sSize, stringArrayClass, NULL);
if (%s_ == NULL)
{
%s
}
// convert each char * to java strings and fill the java array.
for ( int i = 0; i < %sSize; i++)
{
jstring TempString = curEnv->NewStringUTF( %s[i] );
if (TempString == NULL)
{
%s
}
curEnv->SetObjectArrayElement( %s_, i, TempString);
// avoid keeping reference on too many strings
curEnv->DeleteLocalRef(TempString);
}"""%(name,name,name,errorMgntMem,name,name,errorMgntMemBis,name)
else:
return """
// create java array of array of strings.
jobjectArray %s_ = curEnv->NewObjectArray( %sSize, curEnv->FindClass("[Ljava/lang/String;"), NULL);
if (%s_ == NULL)
{
%s
}
for ( int i = 0; i < %sSize; i++)
{
jobjectArray %sLocal = curEnv->NewObjectArray( %sSizeCol, stringArrayClass, NULL);
// convert each char * to java strings and fill the java array.
for ( int j = 0; j < %sSizeCol; j++) {
jstring TempString = curEnv->NewStringUTF( %s[i][j] );
if (TempString == NULL)
{
%s
}
curEnv->SetObjectArrayElement( %sLocal, j, TempString);
// avoid keeping reference on too many strings
curEnv->DeleteLocalRef(TempString);
}
curEnv->SetObjectArrayElement(%s_, i, %sLocal);
curEnv->DeleteLocalRef(%sLocal);
}"""%(name,name,name,errorMgntMem,name,name,name,name,name,errorMgntMemBis,name,name,name,name)
else:
# Need to store is for the post processing (delete)
self.parameterName=name
tempName=name+"_"
return """
jstring %s = curEnv->NewStringUTF( %s );
if (%s != NULL && %s == NULL)
{
%s
}
"""%(tempName,name,name,tempName,errorMgntMemBis)
def specificPostProcessing(self, detachThread):
""" Called when we are returning a string or an array of string """
# We are doing an exception check here JUST in this case because
# in methodGiws::__createMethodBody we usually do it at the end
# of the method just after deleting the variable
# but when dealing with string, in this method, we are calling some
# methods which override the "exception engine" which drive the JNI
# engine crazy.
str=JNIFrameWork().getExceptionCheckProfile(detachThread)
str=str+"if (res != NULL) { "
if self.isArray():
strCommon=""
strDeclaration=""
if configGiws().getDisableReturnSize()==True:
strCommon+="int lenRow;"
else:
# The size of the array is returned as output argument of the function
strDeclaration="*"
strCommon+="""
%s lenRow = curEnv->GetArrayLength(res);
"""%(strDeclaration)
self.temporaryVariableName="arrayOfString"
if self.getDimensionArray() == 1:
str+=strCommon+"""
char **arrayOfString;
arrayOfString = new char *[%slenRow];
for (jsize i = 0; i < %slenRow; i++){
jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(res, i));
const char *tempString = curEnv->GetStringUTFChars(resString, 0);
arrayOfString[i] = new char[strlen(tempString) + 1];
strcpy(arrayOfString[i], tempString);
curEnv->ReleaseStringUTFChars(resString, tempString);
curEnv->DeleteLocalRef(resString);
}
"""%(strDeclaration, strDeclaration)
return str
else:
if configGiws().getDisableReturnSize()==True:
str+="int lenCol;"
str+=strCommon+"""
char ***arrayOfString;
arrayOfString = new char **[%slenRow];
for (jsize i = 0; i < %slenRow; i++){ /* Line of the array */
jobjectArray resStringLine = reinterpret_cast<jobjectArray>(curEnv->GetObjectArrayElement(res, i));
%slenCol = curEnv->GetArrayLength(resStringLine);
arrayOfString[i]=new char*[%slenCol];
for (jsize j = 0; j < %slenCol; j++){
jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(resStringLine, j));
const char *tempString = curEnv->GetStringUTFChars(resString, 0);
arrayOfString[i][j] = new char[strlen(tempString) + 1];
strcpy(arrayOfString[i][j], tempString);
curEnv->ReleaseStringUTFChars(resString, tempString);
curEnv->DeleteLocalRef(resString);
}
curEnv->DeleteLocalRef(resStringLine);
}
"""%(strDeclaration, strDeclaration, strDeclaration, strDeclaration, strDeclaration)
return str
else:
if hasattr(self,"parameterName"):
str+="""curEnv->DeleteLocalRef(%s);"""%(self.parameterName+"_")
str=str+"""
const char *tempString = curEnv->GetStringUTFChars(res, 0);
char * %s = new char[strlen(tempString) + 1];
strcpy(%s, tempString);
curEnv->ReleaseStringUTFChars(res, tempString);
curEnv->DeleteLocalRef(res);
"""%(self.temporaryVariableName, self.temporaryVariableName)
return str
def getReturnSyntax(self):
str=""
if self.isArray():
str = str + """
curEnv->DeleteLocalRef(res);
return arrayOfString;
"""
else:
str = str + """
return %s;
"""%(self.temporaryVariableName)
str = str + """ } else {
curEnv->DeleteLocalRef(res);
return NULL;
}"""
return str
| apache-2.0 |
vitan/django | tests/template_tests/syntax_tests/test_invalid_string.py | 46 | 2257 | from django.test import SimpleTestCase
from ..utils import setup
class InvalidStringTests(SimpleTestCase):
@setup({'invalidstr01': '{{ var|default:"Foo" }}'})
def test_invalidstr01(self):
output = self.engine.render_to_string('invalidstr01')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, 'Foo')
@setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'})
def test_invalidstr02(self):
output = self.engine.render_to_string('invalidstr02')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'})
def test_invalidstr03(self):
output = self.engine.render_to_string('invalidstr03')
self.assertEqual(output, '')
@setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'})
def test_invalidstr04(self):
output = self.engine.render_to_string('invalidstr04')
self.assertEqual(output, 'No')
@setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'})
def test_invalidstr04_2(self):
output = self.engine.render_to_string('invalidstr04_2')
self.assertEqual(output, 'Yes')
@setup({'invalidstr05': '{{ var }}'})
def test_invalidstr05(self):
output = self.engine.render_to_string('invalidstr05')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr06': '{{ var.prop }}'})
def test_invalidstr06(self):
output = self.engine.render_to_string('invalidstr06')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'})
def test_invalidstr07(self):
output = self.engine.render_to_string('invalidstr07')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
ypid/series60-remote | pc/lib/log.py | 1 | 1490 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2009 Lukas Hetzenecker <[email protected]>
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import logging
class QtStreamHandler(logging.Handler):
def __init__(self, parent, main):
logging.Handler.__init__(self)
self.parent = parent
self.main = main
self.textWidget = parent
self.formater = logging.Formatter("%(message)s")
def setFormatter(self, format):
self.formater = format
def createLock(self):
self.mutex = QMutex()
def acquire(self):
self.mutex.lock()
def release(self):
self.mutex.unlock()
def emit(self,record):
self.textWidget.appendPlainText(self.formater.format(record))
self.textWidget.moveCursor(QTextCursor.StartOfLine)
self.textWidget.ensureCursorVisible()
class QtOutput(object):
def __init__(self, parent, out=None, color=None):
self.textWidget = parent
self.out = out
self.color = color
def write(self, m):
self.textWidget.moveCursor(QTextCursor.End)
if self.color:
tc = self.textWidget.textColor()
self.textWidget.setTextColor(self.color)
self.textWidget.insertPlainText( m )
if self.color:
self.textWidget.setTextColor(tc)
if self.out:
if isinstance(m, unicode):
self.out.write(m.encode("utf8"))
else:
self.out.write(m)
| gpl-2.0 |
hmen89/odoo | addons/gamification/models/goal.py | 24 | 25742 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.safe_eval import safe_eval
from openerp.tools.translate import _
import logging
import time
from datetime import date, datetime, timedelta
_logger = logging.getLogger(__name__)
class gamification_goal_definition(osv.Model):
"""Goal definition
A goal definition contains the way to evaluate an objective
Each module wanting to be able to set goals to the users needs to create
a new gamification_goal_definition
"""
_name = 'gamification.goal.definition'
_description = 'Gamification goal definition'
def _get_suffix(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for goal in self.browse(cr, uid, ids, context=context):
if goal.suffix and not goal.monetary:
res[goal.id] = goal.suffix
elif goal.monetary:
# use the current user's company currency
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if goal.suffix:
res[goal.id] = "%s %s" % (user.company_id.currency_id.symbol, goal.suffix)
else:
res[goal.id] = user.company_id.currency_id.symbol
else:
res[goal.id] = ""
return res
_columns = {
'name': fields.char('Goal Definition', required=True, translate=True),
'description': fields.text('Goal Description'),
'monetary': fields.boolean('Monetary Value', help="The target and current value are defined in the company currency."),
'suffix': fields.char('Suffix', help="The unit of the target and current values", translate=True),
'full_suffix': fields.function(_get_suffix, type="char", string="Full Suffix", help="The currency and suffix field"),
'computation_mode': fields.selection([
('manually', 'Recorded manually'),
('count', 'Automatic: number of records'),
('sum', 'Automatic: sum on a field'),
('python', 'Automatic: execute a specific Python code'),
],
string="Computation Mode",
help="Defined how will be computed the goals. The result of the operation will be stored in the field 'Current'.",
required=True),
'display_mode': fields.selection([
('progress', 'Progressive (using numerical values)'),
('boolean', 'Exclusive (done or not-done)'),
],
string="Displayed as", required=True),
'model_id': fields.many2one('ir.model',
string='Model',
help='The model object for the field to evaluate'),
'field_id': fields.many2one('ir.model.fields',
string='Field to Sum',
help='The field containing the value to evaluate'),
'field_date_id': fields.many2one('ir.model.fields',
string='Date Field',
help='The date to use for the time period evaluated'),
'domain': fields.char("Filter Domain",
help="Domain for filtering records. General rule, not user depending, e.g. [('state', '=', 'done')]. The expression can contain reference to 'user' which is a browse record of the current user if not in batch mode.",
required=True),
'batch_mode': fields.boolean('Batch Mode',
help="Evaluate the expression in batch instead of once for each user"),
'batch_distinctive_field': fields.many2one('ir.model.fields',
string="Distinctive field for batch user",
help="In batch mode, this indicates which field distinct one user form the other, e.g. user_id, partner_id..."),
'batch_user_expression': fields.char("Evaluted expression for batch mode",
help="The value to compare with the distinctive field. The expression can contain reference to 'user' which is a browse record of the current user, e.g. user.id, user.partner_id.id..."),
'compute_code': fields.text('Python Code',
help="Python code to be executed for each user. 'result' should contains the new current value. Evaluated user can be access through object.user_id."),
'condition': fields.selection([
('higher', 'The higher the better'),
('lower', 'The lower the better')
],
string='Goal Performance',
help='A goal is considered as completed when the current value is compared to the value to reach',
required=True),
'action_id': fields.many2one('ir.actions.act_window', string="Action",
help="The action that will be called to update the goal value."),
'res_id_field': fields.char("ID Field of user",
help="The field name on the user profile (res.users) containing the value for res_id for action."),
}
_defaults = {
'condition': 'higher',
'computation_mode': 'manually',
'domain': "[]",
'monetary': False,
'display_mode': 'progress',
}
def number_following(self, cr, uid, model_name="mail.thread", context=None):
"""Return the number of 'model_name' objects the user is following
The model specified in 'model_name' must inherit from mail.thread
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return self.pool.get('mail.followers').search(cr, uid, [('res_model', '=', model_name), ('partner_id', '=', user.partner_id.id)], count=True, context=context)
def _check_domain_validity(self, cr, uid, ids, context=None):
# take admin as should always be present
superuser = self.pool['res.users'].browse(cr, uid, SUPERUSER_ID, context=context)
for definition in self.browse(cr, uid, ids, context=context):
if definition.computation_mode not in ('count', 'sum'):
continue
obj = self.pool[definition.model_id.model]
try:
domain = safe_eval(definition.domain, {'user': superuser})
# demmy search to make sure the domain is valid
obj.search(cr, uid, domain, context=context, count=True)
except (ValueError, SyntaxError), e:
msg = e.message or (e.msg + '\n' + e.text)
raise osv.except_osv(_('Error!'),_("The domain for the definition %s seems incorrect, please check it.\n\n%s" % (definition.name, msg)))
return True
def create(self, cr, uid, vals, context=None):
res_id = super(gamification_goal_definition, self).create(cr, uid, vals, context=context)
if vals.get('computation_mode') in ('count', 'sum'):
self._check_domain_validity(cr, uid, [res_id], context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
res = super(gamification_goal_definition, self).write(cr, uid, ids, vals, context=context)
if vals.get('computation_mode', 'count') in ('count', 'sum') and (vals.get('domain') or vals.get('model_id')):
self._check_domain_validity(cr, uid, ids, context=context)
return res
class gamification_goal(osv.Model):
"""Goal instance for a user
An individual goal for a user on a specified time period"""
_name = 'gamification.goal'
_description = 'Gamification goal instance'
def _get_completion(self, cr, uid, ids, field_name, arg, context=None):
"""Return the percentage of completeness of the goal, between 0 and 100"""
res = dict.fromkeys(ids, 0.0)
for goal in self.browse(cr, uid, ids, context=context):
if goal.definition_condition == 'higher':
if goal.current >= goal.target_goal:
res[goal.id] = 100.0
else:
res[goal.id] = round(100.0 * goal.current / goal.target_goal, 2)
elif goal.current < goal.target_goal:
# a goal 'lower than' has only two values possible: 0 or 100%
res[goal.id] = 100.0
else:
res[goal.id] = 0.0
return res
def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None):
goal_definition = self.pool.get('gamification.goal.definition')
if not definition_id:
return {'value': {'definition_id': False}}
goal_definition = goal_definition.browse(cr, uid, definition_id, context=context)
return {'value': {'computation_mode': goal_definition.computation_mode, 'definition_condition': goal_definition.condition}}
_columns = {
'definition_id': fields.many2one('gamification.goal.definition', string='Goal Definition', required=True, ondelete="cascade"),
'user_id': fields.many2one('res.users', string='User', required=True),
'line_id': fields.many2one('gamification.challenge.line', string='Challenge Line', ondelete="cascade"),
'challenge_id': fields.related('line_id', 'challenge_id',
string="Challenge",
type='many2one',
relation='gamification.challenge',
store=True, readonly=True,
help="Challenge that generated the goal, assign challenge to users to generate goals with a value in this field."),
'start_date': fields.date('Start Date'),
'end_date': fields.date('End Date'), # no start and end = always active
'target_goal': fields.float('To Reach',
required=True,
track_visibility='always'), # no goal = global index
'current': fields.float('Current Value', required=True, track_visibility='always'),
'completeness': fields.function(_get_completion, type='float', string='Completeness'),
'state': fields.selection([
('draft', 'Draft'),
('inprogress', 'In progress'),
('reached', 'Reached'),
('failed', 'Failed'),
('canceled', 'Canceled'),
],
string='State',
required=True,
track_visibility='always'),
'to_update': fields.boolean('To update'),
'closed': fields.boolean('Closed goal', help="These goals will not be recomputed."),
'computation_mode': fields.related('definition_id', 'computation_mode', type='char', string="Computation mode"),
'remind_update_delay': fields.integer('Remind delay',
help="The number of days after which the user assigned to a manual goal will be reminded. Never reminded if no value is specified."),
'last_update': fields.date('Last Update',
help="In case of manual goal, reminders are sent if the goal as not been updated for a while (defined in challenge). Ignored in case of non-manual goal or goal not linked to a challenge."),
'definition_description': fields.related('definition_id', 'description', type='char', string='Definition Description', readonly=True),
'definition_condition': fields.related('definition_id', 'condition', type='char', string='Definition Condition', readonly=True),
'definition_suffix': fields.related('definition_id', 'full_suffix', type="char", string="Suffix", readonly=True),
'definition_display': fields.related('definition_id', 'display_mode', type="char", string="Display Mode", readonly=True),
}
_defaults = {
'current': 0,
'state': 'draft',
'start_date': fields.date.today,
}
_order = 'create_date desc, end_date desc, definition_id, id'
def _check_remind_delay(self, cr, uid, goal, context=None):
"""Verify if a goal has not been updated for some time and send a
reminder message of needed.
:return: data to write on the goal object
"""
if goal.remind_update_delay and goal.last_update:
delta_max = timedelta(days=goal.remind_update_delay)
last_update = datetime.strptime(goal.last_update, DF).date()
if date.today() - last_update > delta_max:
# generate a remind report
temp_obj = self.pool.get('email.template')
template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_goal_reminder', context)
body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.goal', goal.id, context=context)
self.pool['mail.thread'].message_post(cr, uid, 0, body=body_html, partner_ids=[goal.user_id.partner_id.id], context=context, subtype='mail.mt_comment')
return {'to_update': True}
return {}
def update(self, cr, uid, ids, context=None):
"""Update the goals to recomputes values and change of states
If a manual goal is not updated for enough time, the user will be
reminded to do so (done only once, in 'inprogress' state).
If a goal reaches the target value, the status is set to reached
If the end date is passed (at least +1 day, time not considered) without
the target value being reached, the goal is set as failed."""
if context is None:
context = {}
commit = context.get('commit_gamification', False)
goals_by_definition = {}
all_goals = {}
for goal in self.browse(cr, uid, ids, context=context):
if goal.state in ('draft', 'canceled'):
# draft or canceled goals should not be recomputed
continue
goals_by_definition.setdefault(goal.definition_id, []).append(goal)
all_goals[goal.id] = goal
for definition, goals in goals_by_definition.items():
goals_to_write = dict((goal.id, {}) for goal in goals)
if definition.computation_mode == 'manually':
for goal in goals:
goals_to_write[goal.id].update(self._check_remind_delay(cr, uid, goal, context))
elif definition.computation_mode == 'python':
# TODO batch execution
for goal in goals:
# execute the chosen method
cxt = {
'self': self.pool.get('gamification.goal'),
'object': goal,
'pool': self.pool,
'cr': cr,
'context': dict(context), # copy context to prevent side-effects of eval
'uid': uid,
'date': date, 'datetime': datetime, 'timedelta': timedelta, 'time': time
}
code = definition.compute_code.strip()
safe_eval(code, cxt, mode="exec", nocopy=True)
# the result of the evaluated codeis put in the 'result' local variable, propagated to the context
result = cxt.get('result')
if result is not None and type(result) in (float, int, long):
if result != goal.current:
goals_to_write[goal.id]['current'] = result
else:
_logger.exception(_('Invalid return content from the evaluation of code for definition %s' % definition.name))
else: # count or sum
obj = self.pool.get(definition.model_id.model)
field_date_name = definition.field_date_id and definition.field_date_id.name or False
if definition.computation_mode == 'count' and definition.batch_mode:
# batch mode, trying to do as much as possible in one request
general_domain = safe_eval(definition.domain)
field_name = definition.batch_distinctive_field.name
subqueries = {}
for goal in goals:
start_date = field_date_name and goal.start_date or False
end_date = field_date_name and goal.end_date or False
subqueries.setdefault((start_date, end_date), {}).update({goal.id:safe_eval(definition.batch_user_expression, {'user': goal.user_id})})
# the global query should be split by time periods (especially for recurrent goals)
for (start_date, end_date), query_goals in subqueries.items():
subquery_domain = list(general_domain)
subquery_domain.append((field_name, 'in', list(set(query_goals.values()))))
if start_date:
subquery_domain.append((field_date_name, '>=', start_date))
if end_date:
subquery_domain.append((field_date_name, '<=', end_date))
if field_name == 'id':
# grouping on id does not work and is similar to search anyway
user_ids = obj.search(cr, uid, subquery_domain, context=context)
user_values = [{'id': user_id, 'id_count': 1} for user_id in user_ids]
else:
user_values = obj.read_group(cr, uid, subquery_domain, fields=[field_name], groupby=[field_name], context=context)
# user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...]
for goal in [g for g in goals if g.id in query_goals.keys()]:
for user_value in user_values:
queried_value = field_name in user_value and user_value[field_name] or False
if isinstance(queried_value, tuple) and len(queried_value) == 2 and isinstance(queried_value[0], (int, long)):
queried_value = queried_value[0]
if queried_value == query_goals[goal.id]:
new_value = user_value.get(field_name+'_count', goal.current)
if new_value != goal.current:
goals_to_write[goal.id]['current'] = new_value
else:
for goal in goals:
# eval the domain with user replaced by goal user object
domain = safe_eval(definition.domain, {'user': goal.user_id})
# add temporal clause(s) to the domain if fields are filled on the goal
if goal.start_date and field_date_name:
domain.append((field_date_name, '>=', goal.start_date))
if goal.end_date and field_date_name:
domain.append((field_date_name, '<=', goal.end_date))
if definition.computation_mode == 'sum':
field_name = definition.field_id.name
# TODO for master: group on user field in batch mode
res = obj.read_group(cr, uid, domain, [field_name], [], context=context)
new_value = res and res[0][field_name] or 0.0
else: # computation mode = count
new_value = obj.search(cr, uid, domain, context=context, count=True)
# avoid useless write if the new value is the same as the old one
if new_value != goal.current:
goals_to_write[goal.id]['current'] = new_value
for goal_id, value in goals_to_write.items():
if not value:
continue
goal = all_goals[goal_id]
# check goal target reached
if (goal.definition_id.condition == 'higher' and value.get('current', goal.current) >= goal.target_goal) \
or (goal.definition_id.condition == 'lower' and value.get('current', goal.current) <= goal.target_goal):
value['state'] = 'reached'
# check goal failure
elif goal.end_date and fields.date.today() > goal.end_date:
value['state'] = 'failed'
value['closed'] = True
if value:
self.write(cr, uid, [goal.id], value, context=context)
if commit:
cr.commit()
return True
def action_start(self, cr, uid, ids, context=None):
"""Mark a goal as started.
This should only be used when creating goals manually (in draft state)"""
self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
return self.update(cr, uid, ids, context=context)
def action_reach(self, cr, uid, ids, context=None):
"""Mark a goal as reached.
If the target goal condition is not met, the state will be reset to In
Progress at the next goal update until the end date."""
return self.write(cr, uid, ids, {'state': 'reached'}, context=context)
def action_fail(self, cr, uid, ids, context=None):
"""Set the state of the goal to failed.
A failed goal will be ignored in future checks."""
return self.write(cr, uid, ids, {'state': 'failed'}, context=context)
def action_cancel(self, cr, uid, ids, context=None):
"""Reset the completion after setting a goal as reached or failed.
This is only the current state, if the date and/or target criterias
match the conditions for a change of state, this will be applied at the
next goal update."""
return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
def create(self, cr, uid, vals, context=None):
"""Overwrite the create method to add a 'no_remind_goal' field to True"""
context = dict(context or {})
context['no_remind_goal'] = True
return super(gamification_goal, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
"""Overwrite the write method to update the last_update field to today
If the current value is changed and the report frequency is set to On
change, a report is generated
"""
if context is None:
context = {}
vals['last_update'] = fields.date.today()
result = super(gamification_goal, self).write(cr, uid, ids, vals, context=context)
for goal in self.browse(cr, uid, ids, context=context):
if goal.state != "draft" and ('definition_id' in vals or 'user_id' in vals):
# avoid drag&drop in kanban view
raise osv.except_osv(_('Error!'), _('Can not modify the configuration of a started goal'))
if vals.get('current'):
if 'no_remind_goal' in context:
# new goals should not be reported
continue
if goal.challenge_id and goal.challenge_id.report_message_frequency == 'onchange':
self.pool.get('gamification.challenge').report_progress(cr, SUPERUSER_ID, goal.challenge_id, users=[goal.user_id], context=context)
return result
def get_action(self, cr, uid, goal_id, context=None):
"""Get the ir.action related to update the goal
In case of a manual goal, should return a wizard to update the value
:return: action description in a dictionnary
"""
goal = self.browse(cr, uid, goal_id, context=context)
if goal.definition_id.action_id:
# open a the action linked to the goal
action = goal.definition_id.action_id.read()[0]
if goal.definition_id.res_id_field:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
action['res_id'] = safe_eval(goal.definition_id.res_id_field, {'user': current_user})
# if one element to display, should see it in form mode if possible
action['views'] = [(view_id, mode) for (view_id, mode) in action['views'] if mode == 'form'] or action['views']
return action
if goal.computation_mode == 'manually':
# open a wizard window to update the value manually
action = {
'name': _("Update %s") % goal.definition_id.name,
'id': goal_id,
'type': 'ir.actions.act_window',
'views': [[False, 'form']],
'target': 'new',
'context': {'default_goal_id': goal_id, 'default_current': goal.current},
'res_model': 'gamification.goal.wizard'
}
return action
return False
| agpl-3.0 |
superdesk/superdesk-core | apps/ldap/users_service.py | 2 | 1273 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import config
from superdesk.users import UsersService, UsersResource, is_admin # NOQA
logger = logging.getLogger(__name__)
class ADUsersService(UsersService):
"""
Service class for UsersResource and should be used when AD is active.
"""
readonly_fields = ["email", "first_name", "last_name"]
def on_fetched(self, doc):
super().on_fetched(doc)
for document in doc["_items"]:
self.set_defaults(document)
def on_fetched_item(self, doc):
super().on_fetched_item(doc)
self.set_defaults(doc)
def set_defaults(self, doc):
"""Set the readonly fields for LDAP user.
:param dict doc: user
"""
readonly = {}
user_attributes = config.LDAP_USER_ATTRIBUTES
for value in user_attributes.values():
if value in self.readonly_fields:
readonly[value] = True
doc["_readonly"] = readonly
| agpl-3.0 |
vlachoudis/sl4a | python/src/Lib/plat-irix5/IN.py | 66 | 3097 | # Generated by h2py from /usr/include/netinet/in.h
from warnings import warnpy3k
warnpy3k("the IN module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Included from sys/endian.h
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
BYTE_ORDER = LITTLE_ENDIAN
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
def htonl(x): return ntohl(x)
def htons(x): return ntohs(x)
# Included from sys/bsd_types.h
# Included from sys/mkdev.h
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 0x7f
OMAXMIN = 0xff
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 0x1ff
MAXMIN = 0x3ffff
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def major(dev): return __major(MKDEV_VER, dev)
def minor(dev): return __minor(MKDEV_VER, dev)
# Included from sys/select.h
FD_SETSIZE = 1024
NBBY = 8
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_ENCAP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_XTP = 36
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_MAXPORT = 65535
def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_BROADCAST = 0xffffffff
INADDR_LOOPBACK = 0x7F000001
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 7
IP_TOS = 8
IP_TTL = 9
IP_RECVOPTS = 10
IP_RECVRETOPTS = 11
IP_RECVDSTADDR = 12
IP_RETOPTS = 13
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 20
IP_MULTICAST_TTL = 21
IP_MULTICAST_LOOP = 22
IP_ADD_MEMBERSHIP = 23
IP_DROP_MEMBERSHIP = 24
IRIX4_IP_OPTIONS = 1
IRIX4_IP_MULTICAST_IF = 2
IRIX4_IP_MULTICAST_TTL = 3
IRIX4_IP_MULTICAST_LOOP = 4
IRIX4_IP_ADD_MEMBERSHIP = 5
IRIX4_IP_DROP_MEMBERSHIP = 6
IRIX4_IP_HDRINCL = 7
IRIX4_IP_TOS = 8
IRIX4_IP_TTL = 9
IRIX4_IP_RECVOPTS = 10
IRIX4_IP_RECVRETOPTS = 11
IRIX4_IP_RECVDSTADDR = 12
IRIX4_IP_RETOPTS = 13
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
| apache-2.0 |
l2isbad/netdata | collectors/python.d.plugin/python_modules/pyyaml2/composer.py | 4 | 4952 | # SPDX-License-Identifier: MIT
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| gpl-3.0 |
usc-isi/essex-baremetal-support | nova/tests/notifier/test_list_notifier.py | 5 | 3462 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova
from nova import log as logging
import nova.notifier.api
import nova.notifier.log_notifier
import nova.notifier.no_op_notifier
from nova.notifier import list_notifier
from nova import test
class NotifierListTestCase(test.TestCase):
"""Test case for notifications"""
def setUp(self):
super(NotifierListTestCase, self).setUp()
list_notifier._reset_drivers()
# Mock log to add one to exception_count when log.exception is called
def mock_exception(cls, *args):
self.exception_count += 1
self.exception_count = 0
list_notifier_log = logging.getLogger('nova.notifier.list_notifier')
self.stubs.Set(list_notifier_log, "exception", mock_exception)
# Mock no_op notifier to add one to notify_count when called.
def mock_notify(cls, *args):
self.notify_count += 1
self.notify_count = 0
self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify)
# Mock log_notifier to raise RuntimeError when called.
def mock_notify2(cls, *args):
raise RuntimeError("Bad notifier.")
self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2)
def tearDown(self):
list_notifier._reset_drivers()
super(NotifierListTestCase, self).tearDown()
def test_send_notifications_successfully(self):
self.flags(notification_driver='nova.notifier.list_notifier',
list_notifier_drivers=['nova.notifier.no_op_notifier',
'nova.notifier.no_op_notifier'])
nova.notifier.api.notify('publisher_id', 'event_type',
nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.notify_count, 2)
self.assertEqual(self.exception_count, 0)
def test_send_notifications_with_errors(self):
self.flags(notification_driver='nova.notifier.list_notifier',
list_notifier_drivers=['nova.notifier.no_op_notifier',
'nova.notifier.log_notifier'])
nova.notifier.api.notify('publisher_id',
'event_type', nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.notify_count, 1)
self.assertEqual(self.exception_count, 1)
def test_when_driver_fails_to_import(self):
self.flags(notification_driver='nova.notifier.list_notifier',
list_notifier_drivers=['nova.notifier.no_op_notifier',
'nova.notifier.logo_notifier',
'fdsjgsdfhjkhgsfkj'])
nova.notifier.api.notify('publisher_id',
'event_type', nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.exception_count, 2)
self.assertEqual(self.notify_count, 1)
| apache-2.0 |
honghaoz/UW-Info-Session | UW-Info-Session-1.0/GAE Support/uw-info2/libs/requests/packages/chardet/langhungarianmodel.py | 2763 | 12536 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| mit |
virt-who/virt-who | virtwho/manager/subscriptionmanager/subscriptionmanager.py | 1 | 16260 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
Module for communication with subscription-manager, part of virt-who
Copyright (C) 2011 Radek Novacek <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import os
import json
from six.moves.http_client import BadStatusLine
from six import string_types
import rhsm.connection as rhsm_connection
import rhsm.certificate as rhsm_certificate
import rhsm.config as rhsm_config
from virtwho.config import NotSetSentinel
from virtwho.manager import Manager, ManagerError, ManagerFatalError, ManagerThrottleError
from virtwho.virt import AbstractVirtReport
from virtwho.util import generate_correlation_id
class SubscriptionManagerError(ManagerError):
pass
class SubscriptionManagerUnregisteredError(ManagerFatalError):
pass
# Mapping between strings returned from getJob and report statuses
STATE_MAPPING = {
'FINISHED': AbstractVirtReport.STATE_FINISHED,
'CANCELED': AbstractVirtReport.STATE_CANCELED,
'FAILED': AbstractVirtReport.STATE_FAILED,
'RUNNING': AbstractVirtReport.STATE_PROCESSING,
'WAITING': AbstractVirtReport.STATE_PROCESSING,
'CREATED': AbstractVirtReport.STATE_PROCESSING,
}
class NamedOptions(object):
"""
Object used for compatibility with RHSM
"""
pass
class SubscriptionManager(Manager):
sm_type = "sam"
""" Class for interacting subscription-manager. """
def __init__(self, logger, options):
self.logger = logger
self.options = options
self.cert_uuid = None
self.rhsm_config = None
self.cert_file = None
self.key_file = None
self.readConfig()
self.connection = None
self.correlation_id = generate_correlation_id()
def readConfig(self):
""" Parse rhsm.conf in order to obtain consumer
certificate and key paths. """
self.rhsm_config = rhsm_config.initConfig(
rhsm_config.DEFAULT_CONFIG_PATH)
consumer_cert_dir = self.rhsm_config.get("rhsm", "consumerCertDir")
cert = 'cert.pem'
key = 'key.pem'
self.cert_file = os.path.join(consumer_cert_dir, cert)
self.key_file = os.path.join(consumer_cert_dir, key)
def _check_owner_lib(self, kwargs, config):
"""
Try to check values of env and owner. These values has to be
equal to values obtained from Satellite server.
:param kwargs: dictionary possibly containing valid username and
password used for connection to rhsm
:param config: Configuration of virt-who
:return: None
"""
if config is None:
return
# Check 'owner' and 'env' only in situation, when these values
# are set and rhsm_username and rhsm_password are not set
if 'username' not in kwargs and 'password' not in kwargs and \
'owner' in config.keys() and 'env' in config.keys():
pass
else:
return
uuid = self.uuid()
consumer = self.connection.getConsumer(uuid)
if 'environment' in consumer:
environment = consumer['environment']
else:
return
if environment:
environment_name = environment['name']
owner = self.connection.getOwner(uuid)
owner_id = owner['key']
if config['owner'] != owner_id:
raise ManagerError(
"Cannot send data to: %s, because owner from configuration: %s is different" %
(owner_id, config['owner'])
)
if config['env'] != environment_name:
raise ManagerError(
"Cannot send data to: %s, because Satellite env: %s differs from configuration: %s" %
(owner_id, environment_name, config['env'])
)
def _connect(self, config=None):
""" Connect to the subscription-manager. """
kwargs = {
'host': self.rhsm_config.get('server', 'hostname'),
'ssl_port': int(self.rhsm_config.get('server', 'port')),
'handler': self.rhsm_config.get('server', 'prefix'),
'proxy_hostname': self.rhsm_config.get('server', 'proxy_hostname'),
'proxy_port': self.rhsm_config.get('server', 'proxy_port'),
'proxy_user': self.rhsm_config.get('server', 'proxy_user'),
'proxy_password': self.rhsm_config.get('server', 'proxy_password'),
'insecure': self.rhsm_config.get('server', 'insecure')
}
kwargs_to_config = {
'host': 'rhsm_hostname',
'ssl_port': 'rhsm_port',
'handler': 'rhsm_prefix',
'proxy_hostname': 'rhsm_proxy_hostname',
'proxy_port': 'rhsm_proxy_port',
'proxy_user': 'rhsm_proxy_user',
'proxy_password': 'rhsm_proxy_password',
'insecure': 'rhsm_insecure'
}
rhsm_username = None
rhsm_password = None
if config:
try:
rhsm_username = config['rhsm_username']
rhsm_password = config['rhsm_password']
except KeyError:
pass
if rhsm_username == NotSetSentinel:
rhsm_username = None
if rhsm_password == NotSetSentinel:
rhsm_password = None
# Testing for None is necessary, it might be an empty string
for key, value in kwargs.items():
try:
from_config = config[kwargs_to_config[key]]
if from_config is not NotSetSentinel and from_config is \
not None:
if key is 'ssl_port':
from_config = int(from_config)
kwargs[key] = from_config
except KeyError:
continue
if rhsm_username and rhsm_password:
self.logger.debug("Authenticating with RHSM username %s", rhsm_username)
kwargs['username'] = rhsm_username
kwargs['password'] = rhsm_password
else:
self.logger.debug("Authenticating with certificate: %s", self.cert_file)
if not os.access(self.cert_file, os.R_OK):
raise SubscriptionManagerUnregisteredError(
"Unable to read certificate, system is not registered or you are not root")
kwargs['cert_file'] = self.cert_file
kwargs['key_file'] = self.key_file
self.logger.info("X-Correlation-ID: %s", self.correlation_id)
if self.correlation_id:
kwargs['correlation_id'] = self.correlation_id
self.connection = rhsm_connection.UEPConnection(**kwargs)
try:
if not self.connection.ping()['result']:
raise SubscriptionManagerError(
"Unable to obtain status from server, UEPConnection is likely not usable."
)
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
self._check_owner_lib(kwargs, config)
return self.connection
def sendVirtGuests(self, report, options=None):
"""
Update consumer facts with info about virtual guests.
`guests` is a list of `Guest` instances (or it children).
"""
guests = report.guests
self._connect()
# Sort the list
guests.sort(key=lambda item: item.uuid)
serialized_guests = [guest.toDict() for guest in guests]
self.logger.info('Sending update in guests lists for config '
'"%s": %d guests found',
report.config.name, len(guests))
self.logger.debug("Domain info: %s", json.dumps(serialized_guests, indent=4))
# Send list of guest uuids to the server
try:
self.connection.updateConsumer(self.uuid(), guest_uuids=serialized_guests, hypervisor_id=report.hypervisor_id)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
report.state = AbstractVirtReport.STATE_FINISHED
def hypervisorCheckIn(self, report, options=None):
""" Send hosts to guests mapping to subscription manager. """
connection = self._connect(report.config)
is_async = self._is_rhsm_server_async(report, connection)
serialized_mapping = self._hypervisor_mapping(report, is_async, connection)
self.logger.debug("Host-to-guest mapping being sent to '{owner}': {mapping}".format(
owner=report.config['owner'],
mapping=json.dumps(serialized_mapping, indent=4)))
# All subclasses of ConfigSection use dictionary like notation,
# but RHSM uses attribute like notation
if options:
named_options = NamedOptions()
for key, value in options['global'].items():
setattr(named_options, key, value)
else:
named_options = None
try:
try:
result = self.connection.hypervisorCheckIn(
report.config['owner'],
report.config['env'],
serialized_mapping,
options=named_options) # pylint:disable=unexpected-keyword-arg
except TypeError:
# This is temporary workaround until the options parameter gets implemented
# in python-rhsm
self.logger.debug(
"hypervisorCheckIn method in python-rhsm doesn't understand options parameter, ignoring"
)
result = self.connection.hypervisorCheckIn(report.config['owner'], report.config['env'], serialized_mapping)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
if is_async is True:
report.state = AbstractVirtReport.STATE_CREATED
report.job_id = result['id']
else:
report.state = AbstractVirtReport.STATE_FINISHED
return result
def _is_rhsm_server_async(self, report, connection=None):
"""
Check if server has capability 'hypervisor_async'.
"""
if connection is None:
self._connect(report.config)
self.logger.debug("Checking if server has capability 'hypervisor_async'")
is_async = hasattr(self.connection, 'has_capability') and self.connection.has_capability('hypervisors_async')
if is_async:
self.logger.debug("Server has capability 'hypervisors_async'")
else:
self.logger.debug("Server does not have 'hypervisors_async' capability")
return is_async
def _hypervisor_mapping(self, report, is_async, connection=None):
"""
Return mapping of hypervisor
"""
if connection is None:
self._connect(report.config)
mapping = report.association
serialized_mapping = {}
ids_seen = []
if is_async:
hosts = []
# Transform the mapping into the async version
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
hosts.append(hypervisor.toDict())
ids_seen.append(hypervisor.hypervisorId)
serialized_mapping = {'hypervisors': hosts}
else:
# Reformat the data from the mapping to make it fit with
# the old api.
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
guests = [g.toDict() for g in hypervisor.guestIds]
serialized_mapping[hypervisor.hypervisorId] = guests
ids_seen.append(hypervisor.hypervisorId)
return serialized_mapping
def check_report_state(self, report):
# BZ 1554228
job_id = str(report.job_id)
self._connect(report.config)
self.logger.debug('Checking status of job %s', job_id)
try:
result = self.connection.getJob(job_id)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
state = STATE_MAPPING.get(result['state'], AbstractVirtReport.STATE_FAILED)
report.state = state
if state not in (AbstractVirtReport.STATE_FINISHED,
AbstractVirtReport.STATE_CANCELED,
AbstractVirtReport.STATE_FAILED):
self.logger.debug('Job %s not finished', job_id)
else:
# log completed job status
result_data = result.get('resultData', {})
if not result_data:
self.logger.warning("Job status report without resultData: %s", result)
return
if isinstance(result_data, string_types):
self.logger.warning("Job status report encountered the following error: %s", result_data)
return
for fail in result_data.get('failedUpdate', []):
self.logger.error("Error during update list of guests: %s", str(fail))
self.logger.debug("Number of mappings unchanged: %d", len(result_data.get('unchanged', [])))
self.logger.info("Mapping for config \"%s\" updated", report.config.name)
def uuid(self):
""" Read consumer certificate and get consumer UUID from it. """
if not self.cert_uuid:
try:
certificate = rhsm_certificate.create_from_file(self.cert_file)
self.cert_uuid = certificate.subject["CN"]
except Exception as e:
raise SubscriptionManagerError("Unable to open certificate %s (%s):" % (self.cert_file, str(e)))
return self.cert_uuid
| gpl-2.0 |
sestrella/ansible | lib/ansible/modules/network/ios/ios_facts.py | 12 | 7398 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_facts
version_added: "2.2"
author:
- "Peter Sprygada (@privateip)"
- "Sumit Jaiswal (@justjais)"
short_description: Collect facts from remote devices running Cisco IOS
description:
- Collects a base set of device facts from a remote device that
is running IOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: ios
notes:
- Tested against IOS 15.6
options:
gather_subset:
description:
- When supplied, this argument restricts the facts collected
to a given subset.
- Possible values for this argument include
C(all), C(min), C(hardware), C(config), and C(interfaces).
- Specify a list of values to include a larger subset.
- Use a value with an initial C(!) to collect all facts except that subset.
required: false
default: '!config'
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all and the resources like interfaces, vlans etc.
Can specify a list of values to include a larger subset.
Values can also be used with an initial C(M(!)) to specify that
a specific subset should not be collected.
Valid subsets are 'all', 'interfaces', 'l2_interfaces', 'vlans',
'lag_interfaces', 'lacp', 'lacp_interfaces', 'lldp_global',
'lldp_interfaces', 'l3_interfaces'.
version_added: "2.9"
"""
EXAMPLES = """
- name: Gather all legacy facts
ios_facts:
gather_subset: all
- name: Gather only the config and default facts
ios_facts:
gather_subset:
- config
- name: Do not gather hardware facts
ios_facts:
gather_subset:
- "!hardware"
- name: Gather legacy and resource facts
ios_facts:
gather_subset: all
gather_network_resources: all
- name: Gather only the interfaces resource facts and no legacy facts
ios_facts:
gather_subset:
- '!all'
- '!min'
gather_network_resources:
- interfaces
- name: Gather interfaces resource and minimal legacy facts
ios_facts:
gather_subset: min
gather_network_resources: interfaces
- name: Gather L2 interfaces resource and minimal legacy facts
ios_facts:
gather_subset: min
gather_network_resources: l2_interfaces
- name: Gather L3 interfaces resource and minimal legacy facts
ios_facts:
gather_subset: min
gather_network_resources: l3_interfaces
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
ansible_net_gather_network_resources:
description: The list of fact for network resource subsets collected from the device
returned: when the resource is configured
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_iostype:
description: The operating system type (IOS or IOS-XE) running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_image:
description: The image file the device is running
returned: always
type: str
ansible_net_stacked_models:
description: The model names of each device in the stack
returned: when multiple devices are configured in a stack
type: list
ansible_net_stacked_serialnums:
description: The serial numbers of each device in the stack
returned: when multiple devices are configured in a stack
type: list
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version Ansible controller is using
returned: always
type: str
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_filesystems_info:
description: A hash of all file systems containing info about each file system (e.g. free and total space)
returned: when hardware is configured
type: dict
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description:
- The list of CDP and LLDP neighbors from the remote device. If both,
CDP and LLDP neighbor data is present on one port, CDP is preferred.
returned: when interfaces is configured
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.argspec.facts.facts import FactsArgs
from ansible.module_utils.network.ios.facts.facts import Facts
from ansible.module_utils.network.ios.ios import ios_argument_spec
def main():
""" Main entry point for AnsibleModule
"""
argument_spec = FactsArgs.argument_spec
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = ['default value for `gather_subset` '
'will be changed to `min` from `!config` v2.11 onwards']
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
naiquevin/jinger | jinger/test/test_site.py | 1 | 1107 | # import unittest
import os
from jinger.site import create_empty_site, createdir
from jinger.test import DIR_PLAYGROUND, JingerPlaygroundTest
class SiteTest(JingerPlaygroundTest):
def test_create_dir(self):
mysite = createdir(DIR_PLAYGROUND, 'mysite')
self.assertTrue(os.path.exists(mysite))
# check that if the dir already exists, it raises an Exception
pass
def test_create_empty_site(self):
create_empty_site('mysite', DIR_PLAYGROUND)
newsite = os.path.join(DIR_PLAYGROUND, 'mysite')
os.path.exists(newsite)
os.path.exists(os.path.join(newsite, 'templates'))
os.path.exists(os.path.join(newsite, 'public'))
os.path.exists(os.path.join(newsite, 'config.json'))
create_empty_site('myothersite', DIR_PLAYGROUND, '_source', 'www')
newsite = os.path.join(DIR_PLAYGROUND, 'myothersite')
os.path.exists(newsite)
os.path.exists(os.path.join(newsite, '_source'))
os.path.exists(os.path.join(newsite, 'www'))
os.path.exists(os.path.join(newsite, 'config.json'))
| mit |
Blitzen/oauthlib | oauthlib/oauth1/rfc5849/endpoints/resource.py | 42 | 7083 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
import logging
from .base import BaseEndpoint
from .. import errors
log = logging.getLogger(__name__)
class ResourceEndpoint(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request
| bsd-3-clause |
sunlightlabs/openstates | scrapers/md/events.py | 2 | 4320 | import pytz
import dateutil.parser
import datetime
from urllib.parse import urlsplit, parse_qs
from utils import LXMLMixin
from openstates.scrape import Scraper, Event
class MDEventScraper(Scraper, LXMLMixin):
_TZ = pytz.timezone("US/Eastern")
chambers = {"upper": "Senate", "lower": ""}
date_format = "%B %d, %Y"
def scrape(self, chamber=None, start=None, end=None):
if start is None:
start_date = datetime.datetime.now().strftime(self.date_format)
else:
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
start_date = start_date.strftime(self.date_format)
# default to 30 days if no end
if end is None:
dtdelta = datetime.timedelta(days=30)
end_date = datetime.datetime.now() + dtdelta
end_date = end_date.strftime(self.date_format)
else:
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
end_date = end_date.strftime(self.date_format)
url = "http://mgaleg.maryland.gov/webmga/frmHearingSchedule.aspx?&range={} - {}"
url = url.format(start_date, end_date)
page = self.lxmlize(url)
if chamber is None:
yield from self.scrape_chamber(page, "upper")
yield from self.scrape_chamber(page, "lower")
else:
yield from self.scrape_chamber(page, chamber)
def scrape_chamber(self, page, chamber):
xpath = '//div[@id="ContentPlaceHolder1_div{}SingleColumn"]' "/div".format(
self.chambers[chamber]
)
com = None
rows = page.xpath(xpath)
for row in rows:
css = row.xpath("@class")[0]
if "CommitteeBanner" in css:
com = row.xpath("string(.//h3/a[1])").strip()
elif "CmteInfo" in css or "DayPanelSingleColumn" in css:
yield from self.parse_div(row, chamber, com)
def parse_div(self, row, chamber, com):
cal_link = row.xpath('.//a[.//span[@id="calendarmarker"]]/@href')[0]
# event_date = row.xpath('string(.//div[contains(@class,"ItemDate")])').strip()
title, location, start_date, end_date = self.parse_gcal(cal_link)
event = Event(
start_date=start_date, end_date=end_date, name=title, location_name=location
)
event.add_source("http://mgaleg.maryland.gov/webmga/frmHearingSchedule.aspx")
for item in row.xpath('.//div[@class="col-xs-12a Item"]'):
description = item.xpath("string(.)").strip()
agenda = event.add_agenda_item(description=description)
for item in row.xpath('.//div[contains(@class,"ItemContainer")]/a'):
description = item.xpath("string(.)").strip()
agenda = event.add_agenda_item(description=description)
event.add_document(
description,
item.xpath("@href")[0],
media_type="application/pdf",
on_duplicate="ignore",
)
for item in row.xpath(
'.//div[contains(@class,"ItemContainer")]' '[./div[@class="col-xs-1 Item"]]'
):
description = item.xpath("string(.)").strip()
agenda = event.add_agenda_item(description=description)
bill = item.xpath('.//div[@class="col-xs-1 Item"]/a/text()')[0].strip()
agenda.add_bill(bill)
video = row.xpath('.//a[./span[@class="OnDemand"]]')
if video:
event.add_media_link(
"Video of Hearing", video[0].xpath("@href")[0], "text/html"
)
if "subcommittee" in title.lower():
subcom = title.split("-")[0].strip()
event.add_participant(subcom, type="committee", note="host")
else:
event.add_participant(com, type="committee", note="host")
yield event
# Due to the convoluted HTML, it's easier just to parse the google cal links
def parse_gcal(self, url):
query = urlsplit(url).query
params = parse_qs(query)
dates = params["dates"][0].split("/")
start_date = self._TZ.localize(dateutil.parser.parse(dates[0]))
end_date = self._TZ.localize(dateutil.parser.parse(dates[1]))
return params["text"][0], params["location"][0], start_date, end_date
| gpl-3.0 |
sodafree/backend | build/ipython/build/lib.linux-i686-2.7/IPython/core/tests/test_oinspect.py | 3 | 8346 | """Tests for the object inspection functionality.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os
import re
# Third-party imports
import nose.tools as nt
# Our own imports
from .. import oinspect
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic,
register_line_magic, register_cell_magic,
register_line_cell_magic)
from IPython.external.decorator import decorator
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
inspector = oinspect.Inspector()
ip = get_ipython()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# WARNING: since this test checks the line number where a function is
# defined, if any code is inserted above, the following line will need to be
# updated. Do NOT insert any whitespace between the next line and the function
# definition below.
THIS_LINE_NUMBER = 48 # Put here the actual number of this line
def test_find_source_lines():
nt.assert_equal(oinspect.find_source_lines(test_find_source_lines),
THIS_LINE_NUMBER+1)
# A couple of utilities to ensure these tests work the same from a source or a
# binary install
def pyfile(fname):
return os.path.normcase(re.sub('.py[co]$', '.py', fname))
def match_pyfiles(f1, f2):
nt.assert_equal(pyfile(f1), pyfile(f2))
def test_find_file():
match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__))
def test_find_file_decorated1():
@decorator
def noop1(f):
def wrapper():
return f(*a, **kw)
return wrapper
@noop1
def f(x):
"My docstring"
match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
nt.assert_equal(f.__doc__, "My docstring")
def test_find_file_decorated2():
@decorator
def noop2(f, *a, **kw):
return f(*a, **kw)
@noop2
def f(x):
"My docstring 2"
match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
nt.assert_equal(f.__doc__, "My docstring 2")
def test_find_file_magic():
run = ip.find_line_magic('run')
nt.assert_not_equal(oinspect.find_file(run), None)
# A few generic objects we can then inspect in the tests below
class Call(object):
"""This is the class docstring."""
def __init__(self, x, y=1):
"""This is the constructor docstring."""
def __call__(self, *a, **kw):
"""This is the call docstring."""
def method(self, x, z=2):
"""Some method's docstring"""
class OldStyle:
"""An old-style class for testing."""
pass
def f(x, y=2, *a, **kw):
"""A simple function."""
def g(y, z=3, *a, **kw):
pass # no docstring
@register_line_magic
def lmagic(line):
"A line magic"
@register_cell_magic
def cmagic(line, cell):
"A cell magic"
@register_line_cell_magic
def lcmagic(line, cell=None):
"A line/cell magic"
@magics_class
class SimpleMagics(Magics):
@line_magic
def Clmagic(self, cline):
"A class-based line magic"
@cell_magic
def Ccmagic(self, cline, ccell):
"A class-based cell magic"
@line_cell_magic
def Clcmagic(self, cline, ccell=None):
"A class-based line/cell magic"
def check_calltip(obj, name, call, docstring):
"""Generic check pattern all calltip tests will use"""
info = inspector.info(obj, name)
call_line, ds = oinspect.call_tip(info)
nt.assert_equal(call_line, call)
nt.assert_equal(ds, docstring)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_calltip_class():
check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
def test_calltip_instance():
c = Call(1)
check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
def test_calltip_method():
c = Call(1)
check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
def test_calltip_function():
check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
def test_calltip_function2():
check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
def test_calltip_builtin():
check_calltip(sum, 'sum', None, sum.__doc__)
def test_calltip_line_magic():
check_calltip(lmagic, 'lmagic', 'lmagic(line)', "A line magic")
def test_calltip_cell_magic():
check_calltip(cmagic, 'cmagic', 'cmagic(line, cell)', "A cell magic")
def test_calltip_line_magic():
check_calltip(lcmagic, 'lcmagic', 'lcmagic(line, cell=None)',
"A line/cell magic")
def test_class_magics():
cm = SimpleMagics(ip)
ip.register_magics(cm)
check_calltip(cm.Clmagic, 'Clmagic', 'Clmagic(cline)',
"A class-based line magic")
check_calltip(cm.Ccmagic, 'Ccmagic', 'Ccmagic(cline, ccell)',
"A class-based cell magic")
check_calltip(cm.Clcmagic, 'Clcmagic', 'Clcmagic(cline, ccell=None)',
"A class-based line/cell magic")
def test_info():
"Check that Inspector.info fills out various fields as expected."
i = inspector.info(Call, oname='Call')
nt.assert_equal(i['type_name'], 'type')
expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'>
nt.assert_equal(i['base_class'], expted_class)
nt.assert_equal(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'>")
fname = __file__
if fname.endswith(".pyc"):
fname = fname[:-1]
# case-insensitive comparison needed on some filesystems
# e.g. Windows:
nt.assert_equal(i['file'].lower(), fname.lower())
nt.assert_equal(i['definition'], 'Call(self, *a, **kw)\n')
nt.assert_equal(i['docstring'], Call.__doc__)
nt.assert_equal(i['source'], None)
nt.assert_true(i['isclass'])
nt.assert_equal(i['init_definition'], "Call(self, x, y=1)\n")
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
i = inspector.info(Call, detail_level=1)
nt.assert_not_equal(i['source'], None)
nt.assert_equal(i['docstring'], None)
c = Call(1)
c.__doc__ = "Modified instance docstring"
i = inspector.info(c)
nt.assert_equal(i['type_name'], 'Call')
nt.assert_equal(i['docstring'], "Modified instance docstring")
nt.assert_equal(i['class_docstring'], Call.__doc__)
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
nt.assert_equal(i['call_docstring'], c.__call__.__doc__)
# Test old-style classes, which for example may not have an __init__ method.
if not py3compat.PY3:
i = inspector.info(OldStyle)
nt.assert_equal(i['type_name'], 'classobj')
i = inspector.info(OldStyle())
nt.assert_equal(i['type_name'], 'instance')
nt.assert_equal(i['docstring'], OldStyle.__doc__)
def test_getdoc():
class A(object):
"""standard docstring"""
pass
class B(object):
"""standard docstring"""
def getdoc(self):
return "custom docstring"
class C(object):
"""standard docstring"""
def getdoc(self):
return None
a = A()
b = B()
c = C()
nt.assert_equal(oinspect.getdoc(a), "standard docstring")
nt.assert_equal(oinspect.getdoc(b), "custom docstring")
nt.assert_equal(oinspect.getdoc(c), "standard docstring")
def test_pdef():
# See gh-1914
def foo(): pass
inspector.pdef(foo, 'foo')
| bsd-3-clause |
jonathan-beard/edx-platform | lms/djangoapps/teams/migrations/0004_auto__add_field_courseteam_discussion_topic_id__add_field_courseteam_l.py | 46 | 6547 | # -*- coding: utf-8 -*-
import pytz
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseTeam.last_activity_at'
db.add_column('teams_courseteam', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
# Adding field 'CourseTeamMembership.last_activity_at'
db.add_column('teams_courseteammembership', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseTeam.last_activity_at'
db.delete_column('teams_courseteam', 'last_activity_at')
# Deleting field 'CourseTeamMembership.last_activity_at'
db.delete_column('teams_courseteammembership', 'last_activity_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.courseteam': {
'Meta': {'object_name': 'CourseTeam'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'discussion_topic_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('student.models.LanguageField', [], {'max_length': '16', 'blank': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'team_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'topic_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.CourseTeamMembership']", 'to': "orm['auth.User']"})
},
'teams.courseteammembership': {
'Meta': {'unique_together': "(('user', 'team'),)", 'object_name': 'CourseTeamMembership'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'membership'", 'to': "orm['teams.CourseTeam']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['teams']
| agpl-3.0 |
beagles/neutron_hacking | neutron/services/firewall/agents/varmour/varmour_api.py | 20 | 4931 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, [email protected], vArmour Networks
import base64
import httplib2
from oslo.config import cfg
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
OPTS = [
cfg.StrOpt('director', default='localhost',
help=_("vArmour director ip")),
cfg.StrOpt('director_port', default='443',
help=_("vArmour director port")),
cfg.StrOpt('username', default='varmour',
help=_("vArmour director username")),
cfg.StrOpt('password', default='varmour', secret=True,
help=_("vArmour director password")), ]
cfg.CONF.register_opts(OPTS, "vArmour")
LOG = logging.getLogger(__name__)
REST_URL_PREFIX = '/api/v1.0'
class vArmourAPIException(Exception):
message = _("An unknown exception.")
def __init__(self, **kwargs):
try:
self.err = self.message % kwargs
except Exception:
self.err = self.message
def __str__(self):
return self.err
class AuthenticationFailure(vArmourAPIException):
message = _("Invalid login credential.")
class vArmourRestAPI(object):
def __init__(self):
LOG.debug(_('vArmourRestAPI: started'))
self.user = cfg.CONF.vArmour.username
self.passwd = cfg.CONF.vArmour.password
self.server = cfg.CONF.vArmour.director
self.port = cfg.CONF.vArmour.director_port
self.timeout = 3
self.key = ''
def auth(self):
headers = {}
enc = base64.b64encode(self.user + ':' + self.passwd)
headers['Authorization'] = 'Basic ' + enc
resp = self.rest_api('POST', va_utils.REST_URL_AUTH, None, headers)
if resp and resp['status'] == 200:
self.key = resp['body']['auth']
return True
else:
raise AuthenticationFailure()
def commit(self):
self.rest_api('POST', va_utils.REST_URL_COMMIT)
def rest_api(self, method, url, body=None, headers=None):
url = REST_URL_PREFIX + url
if body:
body_data = json.dumps(body)
else:
body_data = ''
if not headers:
headers = {}
enc = base64.b64encode('%s:%s' % (self.user, self.key))
headers['Authorization'] = 'Basic ' + enc
LOG.debug(_("vArmourRestAPI: %(server)s %(port)s"),
{'server': self.server, 'port': self.port})
try:
action = "https://" + self.server + ":" + self.port + url
LOG.debug(_("vArmourRestAPI Sending: "
"%(method)s %(action)s %(headers)s %(body_data)s"),
{'method': method, 'action': action,
'headers': headers, 'body_data': body_data})
h = httplib2.Http(timeout=3,
disable_ssl_certificate_validation=True)
resp, resp_str = h.request(action, method,
body=body_data,
headers=headers)
LOG.debug(_("vArmourRestAPI Response: %(status)s %(resp_str)s"),
{'status': resp.status, 'resp_str': resp_str})
if resp.status == 200:
return {'status': resp.status,
'reason': resp.reason,
'body': json.loads(resp_str)}
except Exception:
LOG.error(_('vArmourRestAPI: Could not establish HTTP connection'))
def del_cfg_objs(self, url, prefix):
resp = self.rest_api('GET', url)
if resp and resp['status'] == 200:
olist = resp['body']['response']
if not olist:
return
for o in olist:
if o.startswith(prefix):
self.rest_api('DELETE', url + '/"name:%s"' % o)
self.commit()
def count_cfg_objs(self, url, prefix):
count = 0
resp = self.rest_api('GET', url)
if resp and resp['status'] == 200:
for o in resp['body']['response']:
if o.startswith(prefix):
count += 1
return count
| apache-2.0 |
eerwitt/tensorflow | tensorflow/python/saved_model/main_op_impl.py | 25 | 2164 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel main op implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops as tf_data_flow_ops
from tensorflow.python.ops import variables
def main_op():
"""Returns a main op to init variables and tables.
Returns the main op including the group of ops that initializes all
variables, initializes local variables and initialize all tables.
Returns:
The set of ops to be run as part of the main op upon the load operation.
"""
init = variables.global_variables_initializer()
init_local = variables.local_variables_initializer()
init_tables = tf_data_flow_ops.tables_initializer()
return control_flow_ops.group(init, init_local, init_tables)
def main_op_with_restore(restore_op_name):
"""Returns a main op to init variables, tables and restore the graph.
Returns the main op including the group of ops that initializes all
variables, initialize local variables, initialize all tables and the restore
op name.
Args:
restore_op_name: Name of the op to use to restore the graph.
Returns:
The set of ops to be run as part of the main op upon the load operation.
"""
with ops.control_dependencies([main_op()]):
main_op_with_restore = control_flow_ops.group(restore_op_name)
return main_op_with_restore
| apache-2.0 |
twobob/buildroot-kindle | output/build/host-python-2.7.2/Tools/bgen/bgen/bgenType.py | 44 | 9507 | """Type classes and a modest collection of standard types."""
from bgenOutput import *
class Type:
"""Define the various things you can do with a C type.
Most methods are intended to be extended or overridden.
"""
def __init__(self, typeName, fmt):
"""Call with the C name and getargs format for the type.
Example: int = Type("int", "i")
"""
self.typeName = typeName
self.fmt = fmt
def declare(self, name, reference=False):
"""Declare a variable of the type with a given name.
Example: int.declare('spam') prints "int spam;"
"""
for decl in self.getArgDeclarations(name, reference):
Output("%s;", decl)
for decl in self.getAuxDeclarations(name):
Output("%s;", decl)
def getArgDeclarations(self, name, reference=False, constmode=False, outmode=False):
"""Return the main part of the declarations for this type: the items
that will be passed as arguments in the C/C++ function call."""
if reference:
ref = "&"
else:
ref = ""
if constmode:
const = "const "
else:
const = ""
if outmode:
out = "*"
else:
out = ""
return ["%s%s%s%s %s" % (const, self.typeName, ref, out, name)]
def getAuxDeclarations(self, name):
"""Return any auxiliary declarations needed for implementing this
type, such as helper variables used to hold sizes, etc. These declarations
are not part of the C/C++ function call interface."""
return []
def getargs(self):
return self.getargsFormat(), self.getargsArgs()
def getargsFormat(self):
"""Return the format for this type for use with PyArg_Parse().
Example: int.getargsFormat() returns the string "i".
(getargs is a very old name for PyArg_Parse, hence the name of this method).
"""
return self.fmt
def getargsArgs(self, name):
"""Return an argument for use with PyArg_Parse().
Example: int.getargsArgs("spam") returns the string "&spam".
"""
return "&" + name
def getargsPreCheck(self, name):
"""Perform any actions needed before calling getargs().
This could include declaring temporary variables and such.
"""
def getargsCheck(self, name):
"""Perform any needed post-[new]getargs() checks.
This is type-dependent; the default does not check for errors.
An example would be a check for a maximum string length, or it
could do post-getargs() copying or conversion."""
def passInput(self, name):
"""Return an argument for passing a variable into a call.
Example: int.passInput("spam") returns the string "spam".
"""
return name
def passOutput(self, name):
"""Return an argument for returning a variable out of a call.
Example: int.passOutput("spam") returns the string "&spam".
"""
return "&" + name
def passReference(self, name):
"""Return an argument for C++ pass-by-reference.
Default is to call passInput().
"""
return self.passInput(name)
def errorCheck(self, name):
"""Check for an error returned in the variable.
This is type-dependent; the default does not check for errors.
An example would be a check for a NULL pointer.
If an error is found, the generated routine should
raise an exception and return NULL.
XXX There should be a way to add error clean-up code.
"""
Output("/* XXX no err check for %s %s */", self.typeName, name)
def mkvalue(self):
return self.mkvalueFormat(), self.mkvalueArgs()
def mkvalueFormat(self):
"""Return the format for this type for use with Py_BuildValue().
This is normally the same as getargsFormat() but it is
a separate function to allow future divergence.
(mkvalue is a very old name for Py_BuildValue, hence the name of this
method).
"""
return self.getargsFormat()
def mkvalueArgs(self, name):
"""Return an argument for use with Py_BuildValue().
Example: int.mkvalueArgs("spam") returns the string "spam".
"""
return name
def mkvaluePreCheck(self, name):
"""Perform any actions needed before calling mkvalue().
This could include declaring temporary variables and such.
"""
def cleanup(self, name):
"""Clean up if necessary.
This is normally empty; it may deallocate buffers etc.
"""
pass
class ByAddressType(Type):
"Simple type that is also passed by address for input"
def passInput(self, name):
return "&%s" % name
# Sometimes it's useful to define a type that's only usable as input or output parameter
class InputOnlyMixIn:
"Mix-in class to boobytrap passOutput"
def passOutput(self, name):
raise RuntimeError, "Type '%s' can only be used for input parameters" % self.typeName
class InputOnlyType(InputOnlyMixIn, Type):
"Same as Type, but only usable for input parameters -- passOutput is boobytrapped"
class OutputOnlyMixIn:
"Mix-in class to boobytrap passInput"
def passInput(self, name):
raise RuntimeError, "Type '%s' can only be used for output parameters" % self.typeName
class OutputOnlyType(OutputOnlyMixIn, Type):
"Same as Type, but only usable for output parameters -- passInput is boobytrapped"
# A modest collection of standard C types.
void = None
char = Type("char", "c")
short = Type("short", "h")
unsigned_short = Type("unsigned short", "H")
int = Type("int", "i")
long = Type("long", "l")
unsigned_long = Type("unsigned long", "l")
float = Type("float", "f")
double = Type("double", "d")
# The most common use of character pointers is a null-terminated string.
# For input, this is easy. For output, and for other uses of char *,
# see the module bgenBuffer.
stringptr = InputOnlyType("char*", "s")
unicodestringptr = InputOnlyType("wchar_t *", "u")
# Some Python related types.
objectptr = Type("PyObject*", "O")
stringobjectptr = Type("PyStringObject*", "S")
# Etc.
class FakeType(InputOnlyType):
"""A type that is not represented in the Python version of the interface.
Instantiate with a value to pass in the call.
"""
def __init__(self, substitute):
self.substitute = substitute
self.typeName = None # Don't show this argument in __doc__ string
def getArgDeclarations(self, name, reference=False, constmode=False, outmode=False):
return []
def getAuxDeclarations(self, name, reference=False):
return []
def getargsFormat(self):
return ""
def getargsArgs(self, name):
return None
def passInput(self, name):
return self.substitute
class OpaqueType(Type):
"""A type represented by an opaque object type, always passed by address.
Instantiate with the type name and the names of the new and convert procs.
If fewer than three arguments are passed, the second argument is used
to derive the new and convert procs by appending _New and _Convert; it
defaults to the first argument.
"""
def __init__(self, name, arg = None, extra = None):
self.typeName = name
if extra is None:
# Two arguments (name, usetype) or one (name)
arg = arg or name
self.new = arg + '_New'
self.convert = arg + '_Convert'
else:
# Three arguments (name, new, convert)
self.new = arg
self.convert = extra
def getargsFormat(self):
return "O&"
def getargsArgs(self, name):
return "%s, &%s" % (self.convert, name)
def passInput(self, name):
return "&%s" % name
def mkvalueFormat(self):
return "O&"
def mkvalueArgs(self, name):
return "%s, &%s" % (self.new, name)
class OpaqueByValueType(OpaqueType):
"""A type represented by an opaque object type, on input passed BY VALUE.
Instantiate with the type name, and optionally an object type name whose
New/Convert functions will be used.
"""
def passInput(self, name):
return name
def mkvalueArgs(self, name):
return "%s, %s" % (self.new, name)
class OpaqueByRefType(OpaqueType):
"""An opaque object type, passed by reference.
Instantiate with the type name, and optionally an object type name whose
New/Convert functions will be used.
"""
def passInput(self, name):
return name
# def passOutput(self, name):
# return name
def mkvalueFormat(self):
return "O"
def mkvalueArgs(self, name):
return "%s(%s)" % (self.new, name)
class OpaqueByValueStructType(OpaqueByValueType):
"""Similar to OpaqueByValueType, but we also pass this to mkvalue by
address, in stead of by value.
"""
def mkvalueArgs(self, name):
return "%s, &%s" % (self.new, name)
class OpaqueArrayType(OpaqueByValueType):
"""A type represented by an opaque object type, with ARRAY passing semantics.
Instantiate with the type name, and optional an object type name whose
New/Convert functions will be used.
"""
def getargsArgs(self, name):
return "%s, %s" % (self.convert, name)
def passOutput(self, name):
return name
| gpl-2.0 |
DMSC-Instrument-Data/lewis | setup.py | 2 | 2713 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# *********************************************************************
# lewis - a library for creating hardware device simulators
# Copyright (C) 2016-2017 European Spallation Source ERIC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# *********************************************************************
from setuptools import setup, find_packages
# as suggested on http://python-packaging.readthedocs.io/en/latest/metadata.html
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='lewis',
version='1.2.0',
description='LeWIS - Let\'s Write Intricate Simulators!',
long_description=readme(),
url='https://github.com/DMSC-Instrument-Data/lewis',
author='Michael Hart, Michael Wedel, Owen Arnold',
author_email='[email protected]',
license='GPL v3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
keywords='hardware simulation controls',
packages=find_packages(where='src'),
package_dir={'': 'src'},
install_requires=['six', 'pyzmq', 'json-rpc', 'semantic_version',
'PyYAML', 'scanf>=1.4.1'],
extras_require={
'epics': ['pcaspy'],
'dev': ['flake8', 'mock>=1.0.1', 'sphinx>=1.4.5', 'sphinx_rtd_theme',
'pytest', 'pytest-cov', 'coverage', 'tox'],
},
entry_points={
'console_scripts': [
'lewis=lewis.scripts.run:run_simulation',
'lewis-control=lewis.scripts.control:control_simulation'
],
},
)
| gpl-3.0 |
archf/ansible | test/units/module_utils/test_network_common.py | 31 | 5437 | # -*- coding: utf-8 -*-
#
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.module_utils.network_common import to_list, sort_list
from ansible.module_utils.network_common import dict_diff, dict_merge
from ansible.module_utils.network_common import conditional, Template
class TestModuleUtilsNetworkCommon(unittest.TestCase):
def test_to_list(self):
for scalar in ('string', 1, True, False, None):
self.assertTrue(isinstance(to_list(scalar), list))
for container in ([1, 2, 3], {'one': 1}):
self.assertTrue(isinstance(to_list(container), list))
test_list = [1, 2, 3]
self.assertNotEqual(id(test_list), id(to_list(test_list)))
def test_sort(self):
data = [3, 1, 2]
self.assertEqual([1, 2, 3], sort_list(data))
string_data = '123'
self.assertEqual(string_data, sort_list(string_data))
def test_dict_diff(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_diff(base, other)
# string assertions
self.assertNotIn('one', result)
self.assertNotIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertNotIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [2, 1])
self.assertNotIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertNotIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertNotIn('key2', result['obj1'])
# bool assertions
self.assertNotIn('b1', result)
self.assertNotIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_dict_merge(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_merge(base, other)
# string assertions
self.assertIn('one', result)
self.assertIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [1, 2, 3])
self.assertIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertIn('key2', result['obj1'])
# bool assertions
self.assertIn('b1', result)
self.assertIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_conditional(self):
self.assertTrue(conditional(10, 10))
self.assertTrue(conditional('10', '10'))
self.assertTrue(conditional('foo', 'foo'))
self.assertTrue(conditional(True, True))
self.assertTrue(conditional(False, False))
self.assertTrue(conditional(None, None))
self.assertTrue(conditional("ge(1)", 1))
self.assertTrue(conditional("gt(1)", 2))
self.assertTrue(conditional("le(2)", 2))
self.assertTrue(conditional("lt(3)", 2))
self.assertTrue(conditional("eq(1)", 1))
self.assertTrue(conditional("neq(0)", 1))
self.assertTrue(conditional("min(1)", 1))
self.assertTrue(conditional("max(1)", 1))
self.assertTrue(conditional("exactly(1)", 1))
def test_template(self):
tmpl = Template()
self.assertEqual('foo', tmpl('{{ test }}', {'test': 'foo'}))
| gpl-3.0 |
kaiserroll14/301finalproject | main/osx/main/requests/packages/__init__.py | 838 | 1384 | '''
Debian and other distributions "unbundle" requests' vendored dependencies, and
rewrite all imports to use the global versions of ``urllib3`` and ``chardet``.
The problem with this is that not only requests itself imports those
dependencies, but third-party code outside of the distros' control too.
In reaction to these problems, the distro maintainers replaced
``requests.packages`` with a magical "stub module" that imports the correct
modules. The implementations were varying in quality and all had severe
problems. For example, a symlink (or hardlink) that links the correct modules
into place introduces problems regarding object identity, since you now have
two modules in `sys.modules` with the same API, but different identities::
requests.packages.urllib3 is not urllib3
With version ``2.5.2``, requests started to maintain its own stub, so that
distro-specific breakage would be reduced to a minimum, even though the whole
issue is not requests' fault in the first place. See
https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull
request.
'''
from __future__ import absolute_import
import sys
try:
from . import urllib3
except ImportError:
import urllib3
sys.modules['%s.urllib3' % __name__] = urllib3
try:
from . import chardet
except ImportError:
import chardet
sys.modules['%s.chardet' % __name__] = chardet
| gpl-3.0 |
kata198/usrsvc | usrsvcmod/Monitoring/ActivityFile.py | 1 | 3670 | '''
Copyright (c) 2016 Tim Savannah All Rights Reserved.
This software is licensed under the terms of the GPLv3.
This may change at my discretion, retroactively, and without notice.
You should have received a copy of this with the source distribution as a file titled, LICENSE.
The most current license can be found at:
https://github.com/kata198/usrsvc/LICENSE
This location may need to be changed at some point in the future, in which case
you are may email Tim Savannah <kata198 at gmail dot com>, or find them on the
current website intended for distribution of usrsvc.
ActivityFileMonitor - Asserts that a specific file or directory should be modified within a certain threshold
'''
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
import os
import time
from func_timeout import FunctionTimedOut
from . import MonitoringBase
from ..logging import logMsg, logErr
# TODO: We need to implement the check here as launching and joining on a thread, so that we don't lockup all monitoring if someone
# uses an NFS file on a disconnected device or anything else that will result in an indefinite uninterruptable ("D") state.
class ActivityFileMonitor(MonitoringBase):
'''
ActivityFileMonitor - Class for doing activity file monitoring
'''
def __init__(self, programName, activityFile, activityFileLimit):
MonitoringBase.__init__(self)
self.programName = programName
self.activityFile = activityFile
self.activityFileLimit = activityFileLimit
@classmethod
def createFromConfig(cls, programConfig):
if not programConfig.Monitoring.activityfile:
return None
return cls(programConfig.name, programConfig.Monitoring.activityfile, programConfig.Monitoring.activityfile_limit)
def shouldRestart(self, program=None):
'''
Returns True if activity file has not been modified within the threshold specified by activityfile_limit (should restart), otherwise False.
@param program - unused.
'''
activityFile = self.activityFile
activityFileLimit = self.activityFileLimit
programName = self.programName
if not activityFile:
# Yes this is checked twice if created through createFromConfig, but it may be called otherwise so better safe.
return False
try:
# If activity file is not present, this is a fail and we restart.
if not os.path.exists(activityFile):
self.setReason('Restarting %s because activity file ( %s ) does not exist\n' %(programName, activityFile,))
return True
# Gather the mtime and see if we are past the threshold
lastModified = os.stat(activityFile).st_mtime
now = time.time()
threshold = float(now - self.activityFileLimit)
if lastModified < threshold:
self.setReason('Restarting %s because it has not modified activity file ( %s ) in %.4f seconds. Limit is %d seconds.\n' %(programName, activityFile, float(now - lastModified), activityFileLimit) )
return True
except FunctionTimedOut:
logErr('MONITOR: ActivityFile timed out on %s\n' %(programName,))
raise
except Exception as e:
# If we got an exception, just log and try again next round.
logErr('Got an exception in activity file monitoring. Not restarting program. Program="%s" activityfile="%s"\nlocals: %s\n' %(programName, activityFile, str(locals())))
return False
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
| lgpl-2.1 |
johnsensible/django-sendfile | examples/protected_downloads/settings.py | 4 | 2706 | # Django settings for protected_downloads project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'download.db'),
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n309^dwk=@+g72ko--8vjyz&1v0u%xf#*0=wzr=2n#f3hb0a=l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'protected_downloads.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'download',
'sendfile',
)
# SENDFILE settings
SENDFILE_BACKEND = 'sendfile.backends.development'
#SENDFILE_BACKEND = 'sendfile.backends.xsendfile'
#SENDFILE_BACKEND = 'sendfile.backends.nginx'
SENDFILE_ROOT = os.path.join(PROJECT_ROOT, 'protected')
SENDFILE_URL = '/protected'
| bsd-3-clause |
stefanoteso/musm-adt17 | musm/pc.py | 1 | 4018 | import numpy as np
import gurobipy as gurobi
from .problem import Problem
class PC(Problem):
_ATTRIBUTES = [
('cpu', 37),
('hd', 10),
('manufacturer', 8),
('ram', 10),
('monitor', 8),
('pctype', 3),
]
_ATTR_TO_COSTS = {
'pctype': [50, 0, 80],
'manufacturer': [100, 0, 100, 50, 0, 0, 50, 50],
'cpu' : [
1.4*100, 1.4*130, 1.1*70, 1.1*90, 1.2*80, 1.2*50, 1.2*60, 1.2*80,
1.2*90, 1.2*100, 1.2*110, 1.2*120, 1.2*130, 1.2*140, 1.2*170,
1.5*50, 1.5*60, 1.5*80, 1.5*90, 1.5*100, 1.5*110, 1.5*130, 1.5*150,
1.5*160, 1.5*170, 1.5*180, 1.5*220, 1.4*27, 1.4*30, 1.4*40, 1.4*45,
1.4*50, 1.4*55, 1.4*60, 1.4*70, 1.6*70, 1.6*73,
],
'monitor': [
0.6*100, 0.6*104, 0.6*120, 0.6*133, 0.6*140, 0.6*150, 0.6*170,
0.6*210
],
'ram': [
0.8*64, 0.8*128, 0.8*160, 0.8*192, 0.8*256, 0.8*320, 0.8*384,
0.8*512, 0.8*1024, 0.8*2048
],
'hd': [
4*8, 4*10, 4*12, 4*15, 4*20, 4*30, 4*40, 4*60, 4*80, 4*120
],
}
def __init__(self, **kwargs):
super().__init__(sum(attr[1] for attr in self._ATTRIBUTES))
self.cost_matrix = np.hstack([
np.array(self._ATTR_TO_COSTS[attr], dtype=float)
for attr, _ in self._ATTRIBUTES
]).reshape((1, -1)) / 2754.4
def _add_constraints(self, model, x):
base, offs = 0, {}
for attr, size in self._ATTRIBUTES:
offs[attr] = base
x_attr = [x[z] for z in range(base, base + size)]
model.addConstr(gurobi.quicksum(x_attr) == 1)
base += size
def implies(head, body):
# NOTE here we subtract 1 from head and body bits because the bit
# numbers in the constraints were computed starting from one, to
# work in MiniZinc, while Gurobi expects them to start from zero
head = 1 - x[head - 1]
body = gurobi.quicksum([x[i - 1] for i in body])
return model.addConstr(head + body >= 1)
# Manufacturer -> Type
implies(offs['manufacturer'] + 2, [offs['pctype'] + i for i in [1, 2]])
implies(offs['manufacturer'] + 4, [offs['pctype'] + 1])
implies(offs['manufacturer'] + 6, [offs['pctype'] + 2])
implies(offs['manufacturer'] + 7, [offs['pctype'] + i for i in [1, 3]])
# Manufacturer -> CPU
implies(offs['manufacturer'] + 1, [offs['cpu'] + i for i in range(28, 37+1)])
implies(offs['manufacturer'] + 2, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 7, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 4, [offs['cpu'] + i for i in range(5, 27+1)])
implies(offs['manufacturer'] + 3, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 5, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 8, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 6, [offs['cpu'] + i for i in range(16, 27+1)])
# Type -> RAM
implies(offs['pctype'] + 1, [offs['ram'] + i for i in range(1, 9+1)])
implies(offs['pctype'] + 2, [offs['ram'] + i for i in [2, 5, 8, 9]])
implies(offs['pctype'] + 3, [offs['ram'] + i for i in [5, 8, 9, 10]])
# Type -> HD
implies(offs['pctype'] + 1, [offs['hd'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['hd'] + i for i in range(5, 10+1)])
implies(offs['pctype'] + 3, [offs['hd'] + i for i in range(5, 10+1)])
# Type -> Monitor
implies(offs['pctype'] + 1, [offs['monitor'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['monitor'] + i for i in range(6, 8+1)])
implies(offs['pctype'] + 3, [offs['monitor'] + i for i in range(6, 8+1)])
| mit |
procangroup/edx-platform | openedx/core/djangoapps/programs/signals.py | 11 | 1873 | """
This module contains signals / handlers related to programs.
"""
import logging
from django.dispatch import receiver
from openedx.core.djangoapps.signals.signals import COURSE_CERT_AWARDED
LOGGER = logging.getLogger(__name__)
@receiver(COURSE_CERT_AWARDED)
def handle_course_cert_awarded(sender, user, course_key, mode, status, **kwargs): # pylint: disable=unused-argument
"""
If programs is enabled and a learner is awarded a course certificate,
schedule a celery task to process any programs certificates for which
the learner may now be eligible.
Args:
sender:
class of the object instance that sent this signal
user:
django.contrib.auth.User - the user to whom a cert was awarded
course_key:
refers to the course run for which the cert was awarded
mode:
mode / certificate type, e.g. "verified"
status:
either "downloadable" or "generating"
Returns:
None
"""
# Import here instead of top of file since this module gets imported before
# the credentials app is loaded, resulting in a Django deprecation warning.
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
# Avoid scheduling new tasks if certification is disabled.
if not CredentialsApiConfig.current().is_learner_issuance_enabled:
return
# schedule background task to process
LOGGER.debug(
'handling COURSE_CERT_AWARDED: username=%s, course_key=%s, mode=%s, status=%s',
user,
course_key,
mode,
status,
)
# import here, because signal is registered at startup, but items in tasks are not yet able to be loaded
from openedx.core.djangoapps.programs.tasks.v1.tasks import award_program_certificates
award_program_certificates.delay(user.username)
| agpl-3.0 |
hbrunn/OpenUpgrade | addons/hr_holidays/tests/test_holidays_flow.py | 44 | 10276 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.addons.hr_holidays.tests.common import TestHrHolidaysBase
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestHolidaysFlow(TestHrHolidaysBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_00_leave_request_flow(self):
""" Testing leave request flow """
cr, uid = self.cr, self.uid
def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
self.assertEqual(holiday_status.max_leaves, ml,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.leaves_taken, lt,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.remaining_leaves, rl,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
'hr_holidays: wrong type days computation')
# HrUser creates some holiday statuses -> crash because only HrManagers should do this
with self.assertRaises(AccessError):
self.holidays_status_dummy = self.hr_holidays_status.create(cr, self.user_hruser_id, {
'name': 'UserCheats',
'limit': True,
})
# HrManager creates some holiday statuses
self.holidays_status_0 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'WithMeetingType',
'limit': True,
'categ_id': self.registry('calendar.event.type').create(cr, self.user_hrmanager_id, {'name': 'NotLimitedMeetingType'}),
})
self.holidays_status_1 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'NotLimited',
'limit': True,
})
self.holidays_status_2 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'Limited',
'limit': False,
'double_validation': True,
})
# --------------------------------------------------
# Case1: unlimited type of leave request
# --------------------------------------------------
# Employee creates a leave request for another employee -> should crash
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol10',
'employee_id': self.employee_hruser_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a no-limit category
hol1_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol11',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
hol1 = self.hr_holidays.browse(cr, self.user_hruser_id, hol1_id)
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# Employee validates its leave request -> should not work
self.hr_holidays.signal_validate(cr, self.user_employee_id, [hol1_id])
hol1.refresh()
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request')
# HrUser validates the employee leave request
self.hr_holidays.signal_validate(cr, self.user_hrmanager_id, [hol1_id])
hol1.refresh()
self.assertEqual(hol1.state, 'validate', 'hr_holidays: validates leave request should be in validate state')
# --------------------------------------------------
# Case2: limited type of leave request
# --------------------------------------------------
# Employee creates a new leave request at the same time -> crash, avoid interlapping
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol21',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a limited category -> crash, not enough days left
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=1)),
'number_of_days_temp': 1,
})
# Clean transaction
self.hr_holidays.unlink(cr, uid, self.hr_holidays.search(cr, uid, [('name', 'in', ['Hol21', 'Hol22'])]))
# HrUser allocates some leaves to the employee
aloc1_id = self.hr_holidays.create(cr, self.user_hruser_id, {
'name': 'Days for limited category',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'type': 'add',
'number_of_days_temp': 2,
})
# HrUser validates the allocation request
self.hr_holidays.signal_validate(cr, self.user_hruser_id, [aloc1_id])
self.hr_holidays.signal_second_validate(cr, self.user_hruser_id, [aloc1_id])
# Checks Employee has effectively some days left
hol_status_2 = self.hr_holidays_status.browse(cr, self.user_employee_id, self.holidays_status_2)
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Employee creates a leave request in the limited category, now that he has some days left
hol2_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=3)),
'number_of_days_temp': 1,
})
hol2 = self.hr_holidays.browse(cr, self.user_hruser_id, hol2_id)
# Check left days: - 1 virtual remaining day
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 1.0)
# HrUser validates the first step
self.hr_holidays.signal_validate(cr, self.user_hruser_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'validate1',
'hr_holidays: first validation should lead to validate1 state')
# HrUser validates the second step
self.hr_holidays.signal_second_validate(cr, self.user_hruser_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'validate',
'hr_holidays: second validation should lead to validate state')
# Check left days: - 1 day taken
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 1.0, 1.0, 1.0)
# HrManager finds an error: he refuses the leave request
self.hr_holidays.signal_refuse(cr, self.user_hrmanager_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: refuse should lead to refuse state')
# Check left days: 2 days left again
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager
self.hr_holidays.signal_reset(cr, self.user_hruser_id, [hol2_id])
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: hr_user should not be able to reset a refused leave request')
# HrManager resets the request
self.hr_holidays.signal_reset(cr, self.user_hrmanager_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'draft',
'hr_holidays: resetting should lead to draft state')
# HrManager changes the date and put too much days -> crash when confirming
self.hr_holidays.write(cr, self.user_hrmanager_id, [hol2_id], {
'date_from': (datetime.today() + relativedelta(days=4)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=7)),
'number_of_days_temp': 4,
})
with self.assertRaises(except_orm):
self.hr_holidays.signal_confirm(cr, self.user_hrmanager_id, [hol2_id])
| agpl-3.0 |
dpiers/coderang-meteor | public/jsrepl/extern/python/unclosured/lib/python2.7/glob.py | 173 | 2249 | """Filename globbing utility."""
import sys
import os
import re
import fnmatch
__all__ = ["glob", "iglob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
return list(iglob(pathname))
def iglob(pathname):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not has_magic(pathname):
if os.path.lexists(pathname):
yield pathname
return
dirname, basename = os.path.split(pathname)
if not dirname:
for name in glob1(os.curdir, basename):
yield name
return
if has_magic(dirname):
dirs = iglob(dirname)
else:
dirs = [dirname]
if has_magic(basename):
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
dirname = os.curdir
if isinstance(pattern, unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
try:
names = os.listdir(dirname)
except os.error:
return []
if pattern[0] != '.':
names = filter(lambda x: x[0] != '.', names)
return fnmatch.filter(names, pattern)
def glob0(dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
magic_check = re.compile('[*?[]')
def has_magic(s):
return magic_check.search(s) is not None
| mit |
hikelee/launcher | launcher/templatetags/helpers.py | 1 | 6201 | """
sentry.templatetags.sentry_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
import os.path
from collections import namedtuple
from datetime import timedelta
import pytz
import six
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils import timezone
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from six.moves import range
from six.moves.urllib.parse import quote
from launcher.utils.strings import soft_break as _soft_break,soft_hyphenate,to_unicode,truncatechars
SentryVersion=namedtuple('SentryVersion',[
'current',
'latest',
'update_available',
'build',
])
register=template.Library()
truncatechars=register.filter(stringfilter(truncatechars))
truncatechars.is_safe=True
@register.filter
def multiply(x,y):
def coerce(value):
if isinstance(value,(six.integer_types,float)):
return value
try:
return int(value)
except ValueError:
return float(value)
return coerce(x)*coerce(y)
@register.filter
def pprint(value,break_after=10):
"""
break_after is used to define how often a <span> is
inserted (for soft wrapping).
"""
value=to_unicode(value)
return mark_safe(
u'<span></span>'.
join([escape(value[i:(i+break_after)]) for i in range(0,len(value),break_after)])
)
@register.filter
def is_url(value):
if not isinstance(value,six.string_types):
return False
if not value.startswith(('http://','https://')):
return False
if ' ' in value:
return False
return True
# seriously Django?
@register.filter
def subtract(value,amount):
return int(value)-int(amount)
@register.filter
def absolute_value(value):
return abs(int(value) if isinstance(value,six.integer_types) else float(value))
@register.filter
def has_charts(group):
from launcher.utils.db import has_charts
if hasattr(group,'_state'):
db=group._state.db or 'default'
else:
db='default'
return has_charts(db)
@register.filter
def as_sorted(value):
return sorted(value)
@register.filter
def small_count(v,precision=1):
if not v:
return 0
z=[
(1000000000,_('b')),
(1000000,_('m')),
(1000,_('k')),
]
v=int(v)
for x,y in z:
o,p=divmod(v,x)
if o:
if len(six.text_type(o))>2 or not p:
return '%d%s'%(o,y)
return ('%.{}f%s'.format(precision))%(v/float(x),y)
return v
@register.filter
def num_digits(value):
return len(six.text_type(value))
@register.filter
def to_str(data):
return six.text_type(data)
@register.filter
def is_none(value):
return value is None
@register.filter
def timesince(value,now=None):
from django.template.defaultfilters import timesince
if now is None:
now=timezone.now()
if not value:
return _('never')
if value<(now-timedelta(days=5)):
return value.date()
value=(' '.join(timesince(value,now).split(' ')[0:2])).strip(',')
if value==_('0 minutes'):
return _('just now')
if value==_('1 day'):
return _('yesterday')
return value+_(' ago')
@register.filter
def duration(value):
if not value:
return '0s'
# value is assumed to be in ms
value=value/1000.0
hours,minutes,seconds=0,0,0
if value>3600:
hours=value/3600
value=value%3600
if value>60:
minutes=value/60
value=value%60
seconds=value
output=[]
if hours:
output.append('%dh'%hours)
if minutes:
output.append('%dm'%minutes)
if seconds>1:
output.append('%0.2fs'%seconds)
elif seconds:
output.append('%dms'%(seconds*1000))
return ''.join(output)
@register.filter
def date(dt,arg=None):
from django.template.defaultfilters import date
if not timezone.is_aware(dt):
dt=dt.replace(tzinfo=timezone.utc)
return date(dt,arg)
@register.filter
def trim_schema(value):
return value.split('//',1)[-1]
@register.filter
def with_metadata(group_list,request):
group_list=list(group_list)
if request.user.is_authenticated() and group_list:
project=group_list[0].project
bookmarks=set(
project.bookmark_set.filter(
user=request.user,
group__in=group_list,
).values_list('group_id',flat=True)
)
else:
bookmarks=set()
# TODO(dcramer): this is obsolete and needs to pull from the tsdb backend
historical_data={}
for g in group_list:
yield g,{
'is_bookmarked':g.pk in bookmarks,
'historical_data':','.join(six.text_type(x[1]) for x in historical_data.get(g.id,[])),
}
@register.simple_tag
def percent(value,total,format=None):
if not (value and total):
result=0
else:
result=int(value)/float(total)*100
if format is None:
return int(result)
else:
return ('%%%s'%format)%result
@register.filter
def titlize(value):
return value.replace('_',' ').title()
@register.filter
def split(value,delim=''):
return value.split(delim)
@register.inclusion_tag('sentry/partial/github_button.html')
def github_button(user,repo):
return {
'user':user,
'repo':repo,
}
@register.filter
def urlquote(value,safe=''):
return quote(value.encode('utf8'),safe)
@register.filter
def basename(value):
return os.path.basename(value)
@register.filter
def user_display_name(user):
return user.name or user.username
@register.simple_tag(takes_context=True)
def localized_datetime(context,dt,format='DATETIME_FORMAT'):
request=context['request']
timezone=getattr(request,'timezone',None)
if not timezone:
timezone=pytz.timezone(settings.SENTRY_DEFAULT_TIME_ZONE)
dt=dt.astimezone(timezone)
return date(dt,format)
@register.filter
def format_userinfo(user):
parts=user.username.split('@')
if len(parts)==1:
username=user.username
else:
username=parts[0].lower()
return mark_safe('<span title="%s">%s</span>'%(escape(user.username),escape(username),))
@register.filter
def soft_break(value,length):
return _soft_break(
value,
length,
functools.partial(soft_hyphenate,length=max(length//10,10)),
)
| mit |
zubron/servo | components/script/dom/bindings/codegen/BindingGen.py | 150 | 1729 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
sys.path.append(os.path.join(".", "parser"))
sys.path.append(os.path.join(".", "ply"))
import cPickle
from Configuration import Configuration
from CodegenRust import CGBindingRoot, replaceFileIfChanged
def generate_binding_rs(config, outputprefix, webidlfile):
"""
|config| Is the configuration object.
|outputprefix| is a prefix to use for the header guards and filename.
"""
filename = outputprefix + ".rs"
module = CGBindingRoot(config, outputprefix, webidlfile).define()
if not module:
print "Skipping empty module: %s" % (filename)
elif replaceFileIfChanged(filename, module):
print "Generating binding implementation: %s" % (filename)
def main():
# Parse arguments.
from optparse import OptionParser
usagestring = "usage: %prog configFile outputdir outputPrefix webIDLFile"
o = OptionParser(usage=usagestring)
(options, args) = o.parse_args()
if len(args) != 4:
o.error(usagestring)
configFile = os.path.normpath(args[0])
outputdir = args[1]
outputPrefix = args[2]
webIDLFile = os.path.normpath(args[3])
# Load the parsing results
resultsPath = os.path.join(outputdir, 'ParserResults.pkl')
with open(resultsPath, 'rb') as f:
parserData = cPickle.load(f)
# Create the configuration data.
config = Configuration(configFile, parserData)
# Generate the prototype classes.
generate_binding_rs(config, outputPrefix, webIDLFile)
if __name__ == '__main__':
main()
| mpl-2.0 |
louyihua/edx-platform | lms/djangoapps/survey/migrations/0001_initial.py | 50 | 2289 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
import model_utils.fields
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SurveyAnswer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('field_name', models.CharField(max_length=255, db_index=True)),
('field_value', models.CharField(max_length=1024)),
('course_key', xmodule_django.models.CourseKeyField(max_length=255, null=True, db_index=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SurveyForm',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(unique=True, max_length=255, db_index=True)),
('form', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='surveyanswer',
name='form',
field=models.ForeignKey(to='survey.SurveyForm'),
),
migrations.AddField(
model_name='surveyanswer',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
| agpl-3.0 |
scue/vim-ycm_win7 | third_party/requests/requests/packages/urllib3/connectionpool.py | 223 | 25767 | # urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ConnectTimeoutError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
ReadTimeoutError,
ProxyError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
assert_fingerprint,
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
**extra_params)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue
raise EmptyPoolError(self, "No pool connections are available.")
except BaseSSLError as e:
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except TimeoutError as e:
# Connection broken, discard.
conn = None
# Save the error off for retry logic.
err = e
if retries == 0:
raise
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
if isinstance(e, SocketError) and self.proxy is not None:
raise ProxyError('Cannot connect to proxy. '
'Socket error: %s.' % e)
else:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return self._prepare_conn(conn)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| gpl-3.0 |
kevclarx/ansible | lib/ansible/template/vars.py | 35 | 3911 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.utils import missing
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
__all__ = ['AnsibleJ2Vars']
class AnsibleJ2Vars:
'''
Helper class to template all variable content before jinja2 sees it. This is
done by hijacking the variable storage that jinja2 uses, and overriding __contains__
and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
hashes that inject tends to be.
To facilitate using builtin jinja2 things like range, globals are also handled here.
'''
def __init__(self, templar, globals, locals=None, *extras):
'''
Initializes this object with a valid Templar() object, as
well as several dictionaries of variables representing
different scopes (in jinja2 terminology).
'''
self._templar = templar
self._globals = globals
self._extras = extras
self._locals = dict()
if isinstance(locals, dict):
for key, val in iteritems(locals):
if val is not missing:
if key[:2] == 'l_':
self._locals[key[2:]] = val
elif key not in ('context', 'environment', 'template'):
self._locals[key] = val
def __contains__(self, k):
if k in self._templar._available_variables:
return True
if k in self._locals:
return True
for i in self._extras:
if k in i:
return True
if k in self._globals:
return True
return False
def __getitem__(self, varname):
if varname not in self._templar._available_variables:
if varname in self._locals:
return self._locals[varname]
for i in self._extras:
if varname in i:
return i[varname]
if varname in self._globals:
return self._globals[varname]
else:
raise KeyError("undefined variable: %s" % varname)
variable = self._templar._available_variables[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
from ansible.vars.hostvars import HostVars
if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'):
return variable
else:
value = None
try:
value = self._templar.template(variable)
except Exception as e:
raise type(e)(to_native(variable) + ': ' + e.message)
return value
def add_locals(self, locals):
'''
If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
'''
if locals is None:
return self
return AnsibleJ2Vars(self._templar, self._globals, locals=locals, *self._extras)
| gpl-3.0 |
admcrae/tensorflow | tensorflow/contrib/keras/python/keras/__init__.py | 29 | 1864 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Keras API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import applications
from tensorflow.contrib.keras.python.keras import backend
from tensorflow.contrib.keras.python.keras import callbacks
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import datasets
from tensorflow.contrib.keras.python.keras import engine
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import layers
from tensorflow.contrib.keras.python.keras import losses
from tensorflow.contrib.keras.python.keras import metrics
from tensorflow.contrib.keras.python.keras import models
from tensorflow.contrib.keras.python.keras import optimizers
from tensorflow.contrib.keras.python.keras import preprocessing
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras import utils
from tensorflow.contrib.keras.python.keras import wrappers
__version__ = '2.0.2-tf'
| apache-2.0 |
sasukeh/acos-client | acos_client/v21/device_info.py | 4 | 1044 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base
class DeviceInfo(base.BaseV21):
def get(self, **kwargs):
return self._get('system.device_info.get', **kwargs)
def cpu_current_usage(self, **kwargs):
return self._get('system.device_info.cpu.current_usage.get',
**kwargs)
def cpu_historical_usage(self, **kwargs):
return self._get('system.device_info.cpu.historical_usage.get',
**kwargs)
| apache-2.0 |
vigneshkarthi/satireguru | satire-bot.py | 1 | 3178 | import twitter
import yaml
import time
import pickle
import re
global match, api, msg, oldID
import random
msg = ''
#RegEx for parsing twitter handle from retrived
keyword = '';
#UTF_CHARS = ur'a-z0-9_\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u00ff'
#TAG_EXP = ur'(^|[^0-9A-Z&/]+)(#|\uff03)([0-9A-Z_]*[A-Z_]+[%s]*)' % UTF_CHARS
#TAG_REGEX = re.compile(TAG_EXP, re.UNICODE | re.IGNORECASE)
#Performs OAuth authentication, place all the neccessary keys in access.yaml
def authenticate():
global api
data = yaml.load(open("access.yaml"))
api = twitter.Api(consumer_key=data['consumer-key'],consumer_secret=data['consumer-secret'],access_token_key=data['access-key'],access_token_secret=data['access-secret'])
#Parses response.yaml to search and reply with relevant messages according to twitterhandles, fill your responses in response.yaml
def choose_reply():
global match, msg
comments = yaml.load(open("response.yaml"))
for name in comments['name']:
if(name['keyword']==match):
msg = random.choice(name['response'])
#Module which checks for mentions and replies to the mentioner and the person mentioned
#current version supports only one mentioned person
def get_and_post_replies(old):
cache_msg_to_post = ' '
global match, api
while(1):
try:
i = 0
repl = api.GetMentions()
total = len(repl)
newID = int(repl[i].id)
while(newID != old):
print repl[i].text+", by @"+repl[i].user.screen_name
if "pm" in repl[i].text.lower():
match = 'PM'
print "Match is", match
choose_reply()
msg_to_post = "@"+repl[i].user.screen_name+" "+msg
if(msg_to_post == cache_msg_to_post):
msg_to_post = msg_to_post + random.randint(0,1000)
cache_msg_to_post = msg_to_post
try:
api.PostUpdate(msg_to_post, in_reply_to_status_id=repl[i].id)
print "Msg posted is", msg_to_post
i = i+1
if (total == i):
break
newID = int(repl[i].id)
except twitter.TwitterError:
print "Something happend.. Saving ID's to file.. Not to Worry"
fileObj = open("idstore",'r+')
old = repl[0].id
fileObj.seek(0)
fileObj.write(str(old))
fileObj.close()
return
else:
i = i + 1
if (total == i):
break
newId = int(repl[i].id)
old = int(repl[0].id)
print "No New Tweets !!"
print "Gonna sleep for a minute :)"
time.sleep(60)
except KeyboardInterrupt:
fileObj = open("idstore", 'r+')
fileObj.seek(0)
fileObj.write(str(old))
print "Saving ID's to file.. Exiting!!"
return
authenticate()
fileObj = open("idstore",'r+')
old = fileObj.read()
old = int(old)
get_and_post_replies(old)
| gpl-2.0 |
fuselock/odoo | addons/mrp_repair/mrp_repair.py | 148 | 36935 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from datetime import datetime
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class mrp_repair(osv.osv):
_name = 'mrp.repair'
_inherit = 'mail.thread'
_description = 'Repair Order'
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates untaxed amount.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param field_name: Name of field.
@param arg: Argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = 0.0
for line in repair.operations:
res[repair.id] += line.price_subtotal
for line in repair.fees_lines:
res[repair.id] += line.price_subtotal
cur = repair.pricelist_id.currency_id
res[repair.id] = cur_obj.round(cr, uid, cur, res[repair.id])
return res
def _amount_tax(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates taxed amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
#return {}.fromkeys(ids, 0)
cur_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for repair in self.browse(cr, uid, ids, context=context):
val = 0.0
cur = repair.pricelist_id.currency_id
for line in repair.operations:
#manage prices with tax included use compute_all instead of compute
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
for line in repair.fees_lines:
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
res[repair.id] = cur_obj.round(cr, uid, cur, val)
return res
def _amount_total(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates total amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
untax = self._amount_untaxed(cr, uid, ids, field_name, arg, context=context)
tax = self._amount_tax(cr, uid, ids, field_name, arg, context=context)
cur_obj = self.pool.get('res.currency')
for id in ids:
repair = self.browse(cr, uid, id, context=context)
cur = repair.pricelist_id.currency_id
res[id] = cur_obj.round(cr, uid, cur, untax.get(id, 0.0) + tax.get(id, 0.0))
return res
def _get_default_address(self, cr, uid, ids, field_name, arg, context=None):
res = {}
partner_obj = self.pool.get('res.partner')
for data in self.browse(cr, uid, ids, context=context):
adr_id = False
if data.partner_id:
adr_id = partner_obj.address_get(cr, uid, [data.partner_id.id], ['default'])['default']
res[data.id] = adr_id
return res
def _get_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('operations', 'in', ids)], context=context)
def _get_fee_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('fees_lines', 'in', ids)], context=context)
_columns = {
'name': fields.char('Repair Reference', required=True, states={'confirmed': [('readonly', True)]}, copy=False),
'product_id': fields.many2one('product.product', string='Product to Repair', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', select=True, help='Choose partner for whom the order will be invoiced and delivered.', states={'confirmed': [('readonly', True)]}),
'address_id': fields.many2one('res.partner', 'Delivery Address', domain="[('parent_id','=',partner_id)]", states={'confirmed': [('readonly', True)]}),
'default_address_id': fields.function(_get_default_address, type="many2one", relation="res.partner"),
'state': fields.selection([
('draft', 'Quotation'),
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('under_repair', 'Under Repair'),
('ready', 'Ready to Repair'),
('2binvoiced', 'To be Invoiced'),
('invoice_except', 'Invoice Exception'),
('done', 'Repaired')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order. \
\n* The \'Confirmed\' status is used when a user confirms the repair order. \
\n* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed. \
\n* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done. \
\n* The \'Done\' status is set when repairing is completed.\
\n* The \'Cancelled\' status is used when user cancel repair order.'),
'location_id': fields.many2one('stock.location', 'Current Location', select=True, required=True, readonly=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'location_dest_id': fields.many2one('stock.location', 'Delivery Location', readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'lot_id': fields.many2one('stock.production.lot', 'Repaired Lot', domain="[('product_id','=', product_id)]", help="Products repaired are all belonging to this lot", oldname="prodlot_id"),
'guarantee_limit': fields.date('Warranty Expiration', states={'confirmed': [('readonly', True)]}),
'operations': fields.one2many('mrp.repair.line', 'repair_id', 'Operation Lines', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help='Pricelist of the selected partner.'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoicing Address'),
'invoice_method': fields.selection([
("none", "No Invoice"),
("b4repair", "Before Repair"),
("after_repair", "After Repair")
], "Invoice Method",
select=True, required=True, states={'draft': [('readonly', False)]}, readonly=True, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.'),
'invoice_id': fields.many2one('account.invoice', 'Invoice', readonly=True, track_visibility="onchange", copy=False),
'move_id': fields.many2one('stock.move', 'Move', readonly=True, help="Move created by the repair order", track_visibility="onchange", copy=False),
'fees_lines': fields.one2many('mrp.repair.fee', 'repair_id', 'Fees', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'internal_notes': fields.text('Internal Notes'),
'quotation_notes': fields.text('Quotation Notes'),
'company_id': fields.many2one('res.company', 'Company'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'repaired': fields.boolean('Repaired', readonly=True, copy=False),
'amount_untaxed': fields.function(_amount_untaxed, string='Untaxed Amount',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_tax': fields.function(_amount_tax, string='Taxes',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_total': fields.function(_amount_total, string='Total',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'state': lambda *a: 'draft',
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'mrp.repair'),
'invoice_method': lambda *a: 'none',
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.repair', context=context),
'pricelist_id': lambda self, cr, uid, context: self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')])[0],
'product_qty': 1.0,
'location_id': _default_stock_location,
}
_sql_constraints = [
('name', 'unique (name)', 'The name of the Repair Order must be unique!'),
]
def onchange_product_id(self, cr, uid, ids, product_id=None):
""" On change of product sets some values.
@param product_id: Changed product
@return: Dictionary of values.
"""
product = False
if product_id:
product = self.pool.get("product.product").browse(cr, uid, product_id)
return {'value': {
'guarantee_limit': False,
'lot_id': False,
'product_uom': product and product.uom_id.id or False,
}
}
def onchange_product_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_location_id(self, cr, uid, ids, location_id=None):
""" On change of location
"""
return {'value': {'location_dest_id': location_id}}
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_partner_id(self, cr, uid, ids, part, address_id):
""" On change of partner sets the values of partner address,
partner invoice address and pricelist.
@param part: Changed id of partner.
@param address_id: Address id from current record.
@return: Dictionary of values.
"""
part_obj = self.pool.get('res.partner')
pricelist_obj = self.pool.get('product.pricelist')
if not part:
return {'value': {
'address_id': False,
'partner_invoice_id': False,
'pricelist_id': pricelist_obj.search(cr, uid, [('type', '=', 'sale')])[0]
}
}
addr = part_obj.address_get(cr, uid, [part], ['delivery', 'invoice', 'default'])
partner = part_obj.browse(cr, uid, part)
pricelist = partner.property_product_pricelist and partner.property_product_pricelist.id or False
return {'value': {
'address_id': addr['delivery'] or addr['default'],
'partner_invoice_id': addr['invoice'],
'pricelist_id': pricelist
}
}
def action_cancel_draft(self, cr, uid, ids, *args):
""" Cancels repair order when it is in 'Draft' state.
@param *arg: Arguments
@return: True
"""
if not len(ids):
return False
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids):
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'draft'})
self.write(cr, uid, ids, {'state': 'draft'})
return self.create_workflow(cr, uid, ids)
def action_confirm(self, cr, uid, ids, *args):
""" Repair order state is set to 'To be invoiced' when invoice method
is 'Before repair' else state becomes 'Confirmed'.
@param *arg: Arguments
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for o in self.browse(cr, uid, ids):
if (o.invoice_method == 'b4repair'):
self.write(cr, uid, [o.id], {'state': '2binvoiced'})
else:
self.write(cr, uid, [o.id], {'state': 'confirmed'})
for line in o.operations:
if line.product_id.track_production and not line.lot_id:
raise osv.except_osv(_('Warning!'), _("Serial number is required for operation line with product '%s'") % (line.product_id.name))
mrp_line_obj.write(cr, uid, [l.id for l in o.operations], {'state': 'confirmed'})
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels repair order.
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
if not repair.invoiced:
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'cancel'}, context=context)
else:
raise osv.except_osv(_('Warning!'), _('Repair order is already invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def wkf_invoice_create(self, cr, uid, ids, *args):
self.action_invoice_create(cr, uid, ids)
return True
def action_invoice_create(self, cr, uid, ids, group=False, context=None):
""" Creates invoice(s) for repair order.
@param group: It is set to true when group invoice is to be generated.
@return: Invoice Ids.
"""
res = {}
invoices_group = {}
inv_line_obj = self.pool.get('account.invoice.line')
inv_obj = self.pool.get('account.invoice')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_fee_obj = self.pool.get('mrp.repair.fee')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = False
if repair.state in ('draft', 'cancel') or repair.invoice_id:
continue
if not (repair.partner_id.id and repair.partner_invoice_id.id):
raise osv.except_osv(_('No partner!'), _('You have to select a Partner Invoice Address in the repair form!'))
comment = repair.quotation_notes
if (repair.invoice_method != 'none'):
if group and repair.partner_invoice_id.id in invoices_group:
inv_id = invoices_group[repair.partner_invoice_id.id]
invoice = inv_obj.browse(cr, uid, inv_id)
invoice_vals = {
'name': invoice.name + ', ' + repair.name,
'origin': invoice.origin + ', ' + repair.name,
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
}
inv_obj.write(cr, uid, [inv_id], invoice_vals, context=context)
else:
if not repair.partner_id.property_account_receivable:
raise osv.except_osv(_('Error!'), _('No account defined for partner "%s".') % repair.partner_id.name)
account_id = repair.partner_id.property_account_receivable.id
inv = {
'name': repair.name,
'origin': repair.name,
'type': 'out_invoice',
'account_id': account_id,
'partner_id': repair.partner_invoice_id.id or repair.partner_id.id,
'currency_id': repair.pricelist_id.currency_id.id,
'comment': repair.quotation_notes,
'fiscal_position': repair.partner_id.property_account_position.id
}
inv_id = inv_obj.create(cr, uid, inv)
invoices_group[repair.partner_invoice_id.id] = inv_id
self.write(cr, uid, repair.id, {'invoiced': True, 'invoice_id': inv_id})
for operation in repair.operations:
if operation.to_invoice:
if group:
name = repair.name + '-' + operation.name
else:
name = operation.name
if operation.product_id.property_account_income:
account_id = operation.product_id.property_account_income.id
elif operation.product_id.categ_id.property_account_income_categ:
account_id = operation.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % operation.product_id.name)
invoice_line_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': operation.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in operation.tax_id])],
'uos_id': operation.product_uom.id,
'price_unit': operation.price_unit,
'price_subtotal': operation.product_uom_qty * operation.price_unit,
'product_id': operation.product_id and operation.product_id.id or False
})
repair_line_obj.write(cr, uid, [operation.id], {'invoiced': True, 'invoice_line_id': invoice_line_id})
for fee in repair.fees_lines:
if fee.to_invoice:
if group:
name = repair.name + '-' + fee.name
else:
name = fee.name
if not fee.product_id:
raise osv.except_osv(_('Warning!'), _('No product defined on Fees!'))
if fee.product_id.property_account_income:
account_id = fee.product_id.property_account_income.id
elif fee.product_id.categ_id.property_account_income_categ:
account_id = fee.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % fee.product_id.name)
invoice_fee_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': fee.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in fee.tax_id])],
'uos_id': fee.product_uom.id,
'product_id': fee.product_id and fee.product_id.id or False,
'price_unit': fee.price_unit,
'price_subtotal': fee.product_uom_qty * fee.price_unit
})
repair_fee_obj.write(cr, uid, [fee.id], {'invoiced': True, 'invoice_line_id': invoice_fee_id})
inv_obj.button_reset_taxes(cr, uid, inv_id, context=context)
res[repair.id] = inv_id
return res
def action_repair_ready(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Ready'
@return: True
"""
for repair in self.browse(cr, uid, ids, context=context):
self.pool.get('mrp.repair.line').write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
self.write(cr, uid, [repair.id], {'state': 'ready'})
return True
def action_repair_start(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Under Repair'
@return: True
"""
repair_line = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
repair_line.write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
repair.write({'state': 'under_repair'})
return True
def action_repair_end(self, cr, uid, ids, context=None):
""" Writes repair order state to 'To be invoiced' if invoice method is
After repair else state is set to 'Ready'.
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
val = {}
val['repaired'] = True
if (not order.invoiced and order.invoice_method == 'after_repair'):
val['state'] = '2binvoiced'
elif (not order.invoiced and order.invoice_method == 'b4repair'):
val['state'] = 'ready'
else:
pass
self.write(cr, uid, [order.id], val)
return True
def wkf_repair_done(self, cr, uid, ids, *args):
self.action_repair_done(cr, uid, ids)
return True
def action_repair_done(self, cr, uid, ids, context=None):
""" Creates stock move for operation and stock move for final product of repair order.
@return: Move ids of final products
"""
res = {}
move_obj = self.pool.get('stock.move')
repair_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
move_ids = []
for move in repair.operations:
move_id = move_obj.create(cr, uid, {
'name': move.name,
'product_id': move.product_id.id,
'restrict_lot_id': move.lot_id.id,
'product_uom_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': move.location_id.id,
'location_dest_id': move.location_dest_id.id,
})
move_ids.append(move_id)
repair_line_obj.write(cr, uid, [move.id], {'move_id': move_id, 'state': 'done'}, context=context)
move_id = move_obj.create(cr, uid, {
'name': repair.name,
'product_id': repair.product_id.id,
'product_uom': repair.product_uom.id or repair.product_id.uom_id.id,
'product_uom_qty': repair.product_qty,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': repair.location_id.id,
'location_dest_id': repair.location_dest_id.id,
'restrict_lot_id': repair.lot_id.id,
})
move_ids.append(move_id)
move_obj.action_done(cr, uid, move_ids, context=context)
self.write(cr, uid, [repair.id], {'state': 'done', 'move_id': move_id}, context=context)
res[repair.id] = move_id
return res
class ProductChangeMixin(object):
def product_id_change(self, cr, uid, ids, pricelist, product, uom=False,
product_uom_qty=0, partner_id=False, guarantee_limit=False, context=None):
""" On change of product it sets product quantity, tax account, name,
uom of product, unit price and price subtotal.
@param pricelist: Pricelist of current record.
@param product: Changed id of product.
@param uom: UoM of current record.
@param product_uom_qty: Quantity of current record.
@param partner_id: Partner of current record.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values and warning message.
"""
result = {}
warning = {}
ctx = context and context.copy() or {}
ctx['uom'] = uom
if not product_uom_qty:
product_uom_qty = 1
result['product_uom_qty'] = product_uom_qty
if product:
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=ctx)
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, partner.property_account_position, product_obj.taxes_id, context=ctx)
result['name'] = product_obj.display_name
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id or False
if not pricelist:
warning = {
'title': _('No Pricelist!'),
'message':
_('You have to select a pricelist in the Repair form !\n'
'Please set one before choosing a product.')
}
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, product_uom_qty, partner_id, context=ctx)[pricelist]
if price is False:
warning = {
'title': _('No valid pricelist line found !'),
'message':
_("Couldn't find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
}
else:
result.update({'price_unit': price, 'price_subtotal': price * product_uom_qty})
return {'value': result, 'warning': warning}
class mrp_repair_line(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.line'
_description = 'Repair Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
if line.to_invoice:
taxes = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, line.repair_id.partner_id)
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
else:
res[line.id] = 0
return res
_columns = {
'name': fields.char('Description', required=True),
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', ondelete='cascade', select=True),
'type': fields.selection([('add', 'Add'), ('remove', 'Remove')], 'Type', required=True),
'to_invoice': fields.boolean('To Invoice'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'price_unit': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', required=True, select=True),
'move_id': fields.many2one('stock.move', 'Inventory Move', readonly=True, copy=False),
'lot_id': fields.many2one('stock.production.lot', 'Lot'),
'state': fields.selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('done', 'Done'),
('cancel', 'Cancelled')], 'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically as draft when repair order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when repair order in confirm status. \
\n* The \'Done\' status is set automatically when repair order is completed.\
\n* The \'Cancelled\' status is set automatically when user cancel repair order.'),
}
_defaults = {
'state': lambda *a: 'draft',
'product_uom_qty': lambda *a: 1,
}
def onchange_operation_type(self, cr, uid, ids, type, guarantee_limit, company_id=False, context=None):
""" On change of operation type it sets source location, destination location
and to invoice field.
@param product: Changed operation type.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values.
"""
if not type:
return {'value': {
'location_id': False,
'location_dest_id': False
}}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
location_id = location_obj.search(cr, uid, [('usage', '=', 'production')], context=context)
location_id = location_id and location_id[0] or False
if type == 'add':
# TOCHECK: Find stock location for user's company warehouse or
# repair order's company's warehouse (company_id field is added in fix of lp:831583)
args = company_id and [('company_id', '=', company_id)] or []
warehouse_ids = warehouse_obj.search(cr, uid, args, context=context)
stock_id = False
if warehouse_ids:
stock_id = warehouse_obj.browse(cr, uid, warehouse_ids[0], context=context).lot_stock_id.id
to_invoice = (guarantee_limit and datetime.strptime(guarantee_limit, '%Y-%m-%d') < datetime.now())
return {'value': {
'to_invoice': to_invoice,
'location_id': stock_id,
'location_dest_id': location_id
}}
scrap_location_ids = location_obj.search(cr, uid, [('scrap_location', '=', True)], context=context)
return {'value': {
'to_invoice': False,
'location_id': location_id,
'location_dest_id': scrap_location_ids and scrap_location_ids[0] or False,
}}
class mrp_repair_fee(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.fee'
_description = 'Repair Fees Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
if line.to_invoice:
taxes = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, line.repair_id.partner_id)
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
else:
res[line.id] = 0
return res
_columns = {
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', required=True, ondelete='cascade', select=True),
'name': fields.char('Description', select=True, required=True),
'product_id': fields.many2one('product.product', 'Product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'price_unit': fields.float('Unit Price', required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes'),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'to_invoice': fields.boolean('To Invoice'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
}
_defaults = {
'to_invoice': lambda *a: True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
9miao/Firefly | gfirefly/server/server.py | 6 | 4947 | #coding:utf8
'''
Created on 2013-8-2
@author: lan (www.9miao.com)
'''
from gfirefly.netconnect.protoc import LiberateFactory
from flask import Flask
from gfirefly.distributed.root import PBRoot,BilateralFactory
from gfirefly.distributed.node import RemoteObject
from gfirefly.dbentrust.dbpool import dbpool
from gfirefly.dbentrust.memclient import mclient
from gfirefly.server.logobj import loogoo
from gfirefly.server.globalobject import GlobalObject
from gtwisted.utils import log
from gtwisted.core import reactor
from gfirefly.utils import services
import os,sys,affinity
reactor = reactor
def serverStop():
"""停止服务进程
"""
log.msg('stop')
if GlobalObject().stophandler:
GlobalObject().stophandler()
reactor.callLater(0.5,reactor.stop)
return True
class FFServer:
"""抽象出的一个服务进程
"""
def __init__(self):
'''
'''
self.netfactory = None#net前端
self.root = None#分布式root节点
self.webroot = None#http服务
self.remote = {}#remote节点
self.master_remote = None
self.db = None
self.mem = None
self.servername = None
self.remoteportlist = []
def config(self, config, servername=None, dbconfig=None,
memconfig=None, masterconf=None):
'''配置服务器
'''
GlobalObject().json_config = config
netport = config.get('netport')#客户端连接
webport = config.get('webport')#http连接
rootport = config.get('rootport')#root节点配置
self.remoteportlist = config.get('remoteport',[])#remote节点配置列表
if not servername:
servername = config.get('name')#服务器名称
logpath = config.get('log')#日志
hasdb = config.get('db')#数据库连接
hasmem = config.get('mem')#memcached连接
app = config.get('app')#入口模块名称
cpuid = config.get('cpu')#绑定cpu
mreload = config.get('reload')#重新加载模块名称
self.servername = servername
if netport:
self.netfactory = LiberateFactory()
netservice = services.CommandService("netservice")
self.netfactory.addServiceChannel(netservice)
reactor.listenTCP(netport,self.netfactory)
if webport:
self.webroot = Flask("servername")
GlobalObject().webroot = self.webroot
reactor.listenWSGI(webport, self.webroot)
if rootport:
self.root = PBRoot()
rootservice = services.Service("rootservice")
self.root.addServiceChannel(rootservice)
reactor.listenTCP(rootport, BilateralFactory(self.root))
for cnf in self.remoteportlist:
rname = cnf.get('rootname')
self.remote[rname] = RemoteObject(self.servername)
if hasdb and dbconfig:
log.msg(str(dbconfig))
dbpool.initPool(**dbconfig)
if hasmem and memconfig:
urls = memconfig.get('urls')
hostname = str(memconfig.get('hostname'))
mclient.connect(urls, hostname)
if logpath:
log.addObserver(loogoo(logpath))#日志处理
log.startLogging(sys.stdout)
if cpuid:
affinity.set_process_affinity_mask(os.getpid(), cpuid)
GlobalObject().config(netfactory = self.netfactory, root=self.root,
remote = self.remote)
if masterconf:
masterport = masterconf.get('rootport')
masterhost = masterconf.get('roothost')
self.master_remote = RemoteObject(servername)
addr = ('localhost',masterport) if not masterhost else (masterhost,masterport)
self.master_remote.connect(addr)
GlobalObject().masterremote = self.master_remote
import admin
if app:
__import__(app)
if mreload:
_path_list = mreload.split(".")
GlobalObject().reloadmodule = __import__(mreload,fromlist=_path_list[:1])
GlobalObject().remote_connect = self.remote_connect
def remote_connect(self, rname, rhost):
"""进行rpc的连接
"""
for cnf in self.remoteportlist:
_rname = cnf.get('rootname')
if rname == _rname:
rport = cnf.get('rootport')
if not rhost:
addr = ('localhost',rport)
else:
addr = (rhost,rport)
self.remote[rname].connect(addr)
break
def start(self):
'''启动服务器
'''
log.msg('[%s] started...'%self.servername)
log.msg('[%s] pid: %s'%(self.servername,os.getpid()))
reactor.run()
| mit |
RafaelTorrealba/odoo | openerp/addons/test_new_api/models.py | 79 | 9125 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from openerp.exceptions import AccessError
##############################################################################
#
# OLD API
#
##############################################################################
from openerp.osv import osv, fields
class res_partner(osv.Model):
_inherit = 'res.partner'
#
# add related fields to test them
#
_columns = {
# a regular one
'related_company_partner_id': fields.related(
'company_id', 'partner_id', type='many2one', obj='res.partner'),
# a related field with a single field
'single_related_company_id': fields.related(
'company_id', type='many2one', obj='res.company'),
# a related field with a single field that is also a related field!
'related_related_company_id': fields.related(
'single_related_company_id', type='many2one', obj='res.company'),
}
class TestFunctionCounter(osv.Model):
_name = 'test_old_api.function_counter'
def _compute_cnt(self, cr, uid, ids, fname, arg, context=None):
res = {}
for cnt in self.browse(cr, uid, ids, context=context):
res[cnt.id] = cnt.access and cnt.cnt + 1 or 0
return res
_columns = {
'access': fields.datetime('Datetime Field'),
'cnt': fields.function(
_compute_cnt, type='integer', string='Function Field', store=True),
}
class TestFunctionNoInfiniteRecursion(osv.Model):
_name = 'test_old_api.function_noinfiniterecursion'
def _compute_f1(self, cr, uid, ids, fname, arg, context=None):
res = {}
for tf in self.browse(cr, uid, ids, context=context):
res[tf.id] = 'create' in tf.f0 and 'create' or 'write'
cntobj = self.pool['test_old_api.function_counter']
cnt_id = self.pool['ir.model.data'].xmlid_to_res_id(
cr, uid, 'test_new_api.c1')
cntobj.write(
cr, uid, cnt_id, {'access': datetime.datetime.now()},
context=context)
return res
_columns = {
'f0': fields.char('Char Field'),
'f1': fields.function(
_compute_f1, type='char', string='Function Field', store=True),
}
##############################################################################
#
# NEW API
#
##############################################################################
from openerp import models, fields, api, _
class Category(models.Model):
_name = 'test_new_api.category'
name = fields.Char(required=True)
parent = fields.Many2one('test_new_api.category')
display_name = fields.Char(compute='_compute_display_name', inverse='_inverse_display_name')
discussions = fields.Many2many('test_new_api.discussion', 'test_new_api_discussion_category',
'category', 'discussion')
@api.one
@api.depends('name', 'parent.display_name') # this definition is recursive
def _compute_display_name(self):
if self.parent:
self.display_name = self.parent.display_name + ' / ' + self.name
else:
self.display_name = self.name
@api.one
def _inverse_display_name(self):
names = self.display_name.split('/')
# determine sequence of categories
categories = []
for name in names[:-1]:
category = self.search([('name', 'ilike', name.strip())])
categories.append(category[0])
categories.append(self)
# assign parents following sequence
for parent, child in zip(categories, categories[1:]):
if parent and child:
child.parent = parent
# assign name of last category, and reassign display_name (to normalize it)
self.name = names[-1].strip()
def read(self, fields=None, load='_classic_read'):
if self.search_count([('id', 'in', self._ids), ('name', '=', 'NOACCESS')]):
raise AccessError('Sorry')
return super(Category, self).read(fields, load)
class Discussion(models.Model):
_name = 'test_new_api.discussion'
name = fields.Char(string='Title', required=True,
help="General description of what this discussion is about.")
moderator = fields.Many2one('res.users')
categories = fields.Many2many('test_new_api.category',
'test_new_api_discussion_category', 'discussion', 'category')
participants = fields.Many2many('res.users')
messages = fields.One2many('test_new_api.message', 'discussion')
message_changes = fields.Integer(string='Message changes')
@api.onchange('moderator')
def _onchange_moderator(self):
self.participants |= self.moderator
@api.onchange('messages')
def _onchange_messages(self):
self.message_changes = len(self.messages)
class Message(models.Model):
_name = 'test_new_api.message'
discussion = fields.Many2one('test_new_api.discussion', ondelete='cascade')
body = fields.Text()
author = fields.Many2one('res.users', default=lambda self: self.env.user)
name = fields.Char(string='Title', compute='_compute_name', store=True)
display_name = fields.Char(string='Abstract', compute='_compute_display_name')
size = fields.Integer(compute='_compute_size', search='_search_size')
double_size = fields.Integer(compute='_compute_double_size')
discussion_name = fields.Char(related='discussion.name')
@api.one
@api.constrains('author', 'discussion')
def _check_author(self):
if self.discussion and self.author not in self.discussion.participants:
raise ValueError(_("Author must be among the discussion participants."))
@api.one
@api.depends('author.name', 'discussion.name')
def _compute_name(self):
self.name = "[%s] %s" % (self.discussion.name or '', self.author.name or '')
@api.one
@api.depends('author.name', 'discussion.name', 'body')
def _compute_display_name(self):
stuff = "[%s] %s: %s" % (self.author.name, self.discussion.name or '', self.body or '')
self.display_name = stuff[:80]
@api.one
@api.depends('body')
def _compute_size(self):
self.size = len(self.body or '')
def _search_size(self, operator, value):
if operator not in ('=', '!=', '<', '<=', '>', '>=', 'in', 'not in'):
return []
# retrieve all the messages that match with a specific SQL query
query = """SELECT id FROM "%s" WHERE char_length("body") %s %%s""" % \
(self._table, operator)
self.env.cr.execute(query, (value,))
ids = [t[0] for t in self.env.cr.fetchall()]
return [('id', 'in', ids)]
@api.one
@api.depends('size')
def _compute_double_size(self):
# This illustrates a subtle situation: self.double_size depends on
# self.size. When size is computed, self.size is assigned, which should
# normally invalidate self.double_size. However, this may not happen
# while self.double_size is being computed: the last statement below
# would fail, because self.double_size would be undefined.
self.double_size = 0
size = self.size
self.double_size = self.double_size + size
class MixedModel(models.Model):
_name = 'test_new_api.mixed'
number = fields.Float(digits=(10, 2), default=3.14)
date = fields.Date()
now = fields.Datetime(compute='_compute_now')
lang = fields.Selection(string='Language', selection='_get_lang')
reference = fields.Reference(string='Related Document',
selection='_reference_models')
@api.one
def _compute_now(self):
# this is a non-stored computed field without dependencies
self.now = fields.Datetime.now()
@api.model
def _get_lang(self):
langs = self.env['res.lang'].search([])
return [(lang.code, lang.name) for lang in langs]
@api.model
def _reference_models(self):
models = self.env['ir.model'].search([('state', '!=', 'manual')])
return [(model.model, model.name)
for model in models
if not model.model.startswith('ir.')]
| agpl-3.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/django/db/migrations/state.py | 31 | 25662 | from __future__ import unicode_literals
import copy
from collections import OrderedDict
from contextlib import contextmanager
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.conf import settings
from django.db import models
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, six.string_types):
split = model.split('.', 1)
return (tuple(split) if len(split) == 2 else (app_label, split[0]))
else:
return model._meta.app_label, model._meta.model_name
def get_related_models_recursive(model):
"""
Returns all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
def _related_models(m):
return [
f.related_model for f in m._meta.get_fields(include_parents=True, include_hidden=True)
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, six.string_types)
] + [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
seen = set()
queue = _related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def reload_model(self, app_label, model_name):
if 'apps' in self.__dict__: # hasattr would cache the property
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
related_models = set()
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for name, field in model_state.fields:
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"Returns an exact copy of this ProjectState"
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
return new_state
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))]
super(StateApps, self).__init__(app_configs)
self.render_multiple(list(models.values()) + self.real_models)
# There shouldn't be any operations pending at this point.
pending_models = set(self._pending_operations)
if ignore_swappable:
pending_models -= {make_model_tuple(settings.AUTH_USER_MODEL)}
if pending_models:
raise ValueError(self._pending_models_error(pending_models))
def _pending_models_error(self, pending_models):
"""
Almost all internal uses of lazy operations are to resolve string model
references in related fields. We can extract the fields from those
operations and use them to provide a nicer error message.
This will work for any function passed to lazy_related_operation() that
has a keyword argument called 'field'.
"""
def extract_field(operation):
# operation is annotated with the field in
# apps.registry.Apps.lazy_model_operation().
return getattr(operation, 'field', None)
def extract_field_names(operations):
return (str(field) for field in map(extract_field, operations) if field)
get_ops = self._pending_operations.__getitem__
# Ordered list of pairs of the form
# ((app_label, model_name), [field_name_1, field_name_2, ...])
models_fields = sorted(
(model_key, sorted(extract_field_names(get_ops(model_key))))
for model_key in pending_models
)
def model_text(model_key, fields):
field_list = ", ".join(fields)
field_text = " (referred to by fields: %s)" % field_list if fields else ""
return ("%s.%s" % model_key) + field_text
msg = "Unhandled pending operations for models:"
return "\n ".join([msg] + [model_text(*i) for i in models_fields])
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""
Return a clone of this registry, mainly used by the migration framework.
"""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
for name, field in fields:
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = force_text(field.name, strings_only=True)
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
# Force-convert all options to text_type (#23226)
options = cls.force_text_recursive(options)
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
# Constructs all managers on the model
managers_mapping = {}
def reconstruct_manager(mgr):
as_manager, manager_path, qs_path, args, kwargs = mgr.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
instance = qs_class.as_manager()
else:
manager_class = import_string(manager_path)
instance = manager_class(*args, **kwargs)
# We rely on the ordering of the creation_counter of the original
# instance
name = force_text(mgr.name)
managers_mapping[name] = (mgr.creation_counter, instance)
if hasattr(model, "_default_manager"):
default_manager_name = force_text(model._default_manager.name)
# Make sure the default manager is always the first
if model._default_manager.use_in_migrations:
reconstruct_manager(model._default_manager)
else:
# Force this manager to be the first and thus default
managers_mapping[default_manager_name] = (0, models.Manager())
# Sort all managers by their creation counter
for _, manager, _ in sorted(model._meta.managers):
if manager.name == "_base_manager" or not manager.use_in_migrations:
continue
reconstruct_manager(manager)
# Sort all managers by their creation counter but take only name and
# instance for further processing
managers = [
(name, instance) for name, (cc, instance) in
sorted(managers_mapping.items(), key=lambda v: v[1])
]
# If the only manager on the model is the default manager defined
# by Django (`objects = models.Manager()`), this manager will not
# be added to the model state.
if managers == [('objects', models.Manager())]:
managers = []
else:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
@classmethod
def force_text_recursive(cls, value):
if isinstance(value, six.string_types):
return smart_text(value)
elif isinstance(value, list):
return [cls.force_text_recursive(x) for x in value]
elif isinstance(value, tuple):
return tuple(cls.force_text_recursive(x) for x in value)
elif isinstance(value, set):
return set(cls.force_text_recursive(x) for x in value)
elif isinstance(value, dict):
return {
cls.force_text_recursive(k): cls.force_text_recursive(v)
for k, v in value.items()
}
return value
def construct_managers(self):
"Deep-clone the managers using deconstruction"
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
mgr_name = force_text(mgr_name)
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.fields),
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = {name: field.clone() for name, field in self.fields}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __repr__(self):
return "<ModelState: '%s.%s'>" % (self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
def __ne__(self, other):
return not (self == other)
| mit |
dumbbell/virt-manager | src/virtManager/remote.py | 3 | 2157 | #
# Copyright (C) 2006 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import dbus.service
class vmmRemote(dbus.service.Object):
def __init__(self, engine, bus_name, object_path="/com/redhat/virt/manager"):
dbus.service.Object.__init__(self, bus_name, object_path)
self.engine = engine
@dbus.service.method("com.redhat.virt.manager", in_signature="s")
def show_domain_creator(self, uri):
self.engine.show_domain_creator(str(uri))
@dbus.service.method("com.redhat.virt.manager", in_signature="ss")
def show_domain_editor(self, uri, uuid):
self.engine.show_domain_editor(str(uri), str(uuid))
@dbus.service.method("com.redhat.virt.manager", in_signature="ss")
def show_domain_performance(self, uri, uuid):
self.engine.show_domain_performance(str(uri), str(uuid))
@dbus.service.method("com.redhat.virt.manager", in_signature="ss")
def show_domain_console(self, uri, uuid):
self.engine.show_domain_console(str(uri), str(uuid))
@dbus.service.method("com.redhat.virt.manager", in_signature="s")
def show_host_summary(self, uri):
self.engine.show_host_summary(str(uri))
@dbus.service.method("com.redhat.virt.manager", in_signature="")
def show_manager(self):
self.engine.show_manager()
@dbus.service.method("com.redhat.virt.manager")
def show_connect(self):
self.engine.show_connect()
| gpl-2.0 |
emilopez/pydem | pydem/examples/cross-tile_process_manager_test.py | 3 | 8813 | # -*- coding: utf-8 -*-
"""
Copyright 2015 Creare
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
if __name__ == "__main__":
import numpy as np
import os
from pydem.processing_manager import ProcessManager
from pydem.test_pydem import make_test_files, mk_test_multifile
#%% Make the test case files
NN = [300, 400, 660, 740]
test_num = 32
testdir = 'testtiff'
make_test_files(NN, testnum=test_num, testdir=testdir, plotflag=False)
mk_test_multifile(test_num, NN, testdir, nx_grid=3, ny_grid=4,
nx_overlap=16, ny_overlap=32)
path = r'testtiff\chunks'
# Remove a couple of these files so that we only have 4 tiles, and we
# know where they should drain to
files = os.listdir(path)
files.sort()
for i, fil in enumerate(files):
print i, fil
delete_ids = [0, 1, 2, 3, 4, 5, 6, 9]
for d_id in delete_ids:
os.remove(os.path.join(path, files[d_id]))
# Create the ProcessManager object
savepath = r'testtiff\processed_data'
pm = ProcessManager(path, savepath)
pm._DEBUG = True # Save out the magnitude and slope
pm.elev_source_files.sort()
esfile = pm.elev_source_files[1] # Start with lower-left tile and go CCW
# Start twi calculation for first tile
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = \
pm.tile_edge.get_edge_init_data(esfile)
# THe only valuable information here is the edge_init_todo, which is self-set
# In this case the right edge of the tile is the edge that needs,
# information, so the right todo should be True
np.testing.assert_(np.all(edge_init_todo['right'][1:-1])) #don't look at corners
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1])) #don't look at corners
# Next we check that the right and top neighbors are correctly set also
top = pm.tile_edge.neighbors[esfile]['top']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(top)
np.testing.assert_(np.all(~edge_init_done['bottom'][1:-1])) #don't look at corners
# stop
right = pm.tile_edge.neighbors[esfile]['right']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(right)
np.testing.assert_(np.all(~edge_init_done['left'][1:-1])) #don't look at corners
topright = pm.tile_edge.neighbors[esfile]['top-right']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(topright)
np.testing.assert_(np.all(~edge_init_done['left'][1:-1])) #don't look at corners
np.testing.assert_(np.all(~edge_init_done['bottom'][1:-1])) #don't look at corners
# pm.tile_edge.visualize_neighbors()
# do the next tile
esfile = pm.elev_source_files[0]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['bottom'][1:-1]))
# Next we check that the left and top neighbors are correctly set also
top = pm.tile_edge.neighbors[esfile]['top']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(top)
np.testing.assert_(np.all(edge_init_done['bottom']))
left = pm.tile_edge.neighbors[esfile]['left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(left)
np.testing.assert_(np.all(edge_init_done['right']))
topleft = pm.tile_edge.neighbors[esfile]['top-left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(topleft)
np.testing.assert_(np.any(edge_init_done['right']))
np.testing.assert_(np.any(edge_init_done['bottom']))
# pm.tile_edge.visualize_neighbors()
# Do the third tile
esfile = pm.elev_source_files[2]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['bottom'][1:-1]))
# Next we check that the left and top neighbors are correctly set also
left = pm.tile_edge.neighbors[esfile]['left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(left)
np.testing.assert_(np.all(edge_init_done['right']))
bottomleft = pm.tile_edge.neighbors[esfile]['bottom-left']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(bottomleft)
np.testing.assert_(np.any(edge_init_done['right']))
np.testing.assert_(np.any(edge_init_done['top']))
# pm.tile_edge.visualize_neighbors()
# a1 = pm.dem_proc.uca.copy()
# esfile = pm.elev_source_files[2]
# coords1 = parse_fn(esfile)
# imshow(a1, interpolation='none',
# extent=[coords1[1], coords1[3], coords1[0], coords1[2]]);clim(0, a1.max())
# crds = pm.tile_edge.edges[left]['right'].get_coordinates()
# edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(left)
# imshow(edge_init_data['right'][:, None], interpolation='none',
# extent=[crds[:, 1].min(), crds[:, 1].max()+0.3 / a1.shape[0],
# crds[:, 0].min(), crds[:, 0].max()]);clim(0, a1.max())
# xlim(coords1[1], coords1[3])
# ylim(coords1[0], coords1[2])
#%%Do the final tile to complete the first round (non-edge resolving)
esfile = pm.elev_source_files[3]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=False)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.any(~edge_init_todo['bottom'][1:-1])) # mixed on bottom
np.testing.assert_(np.any(edge_init_todo['bottom'][1:-1])) # mixed on bottom
# This one has no neighbors to check (no downstream dependencies)
# a2 = pm.dem_proc.uca.copy()
# esfile = pm.elev_source_files[3]
# coords = parse_fn(esfile)
# imshow(a2, extent=[coords[1], coords[3], coords[0], coords[2]],
# interpolation='none');clim(0, a1.max())
# xlim(coords[1], coords1[3])
# Now let us start the edge resolution round. There are only 2 tiles that
# require edge resolution
# %%
i = pm.tile_edge.find_best_candidate(pm.elev_source_files)
np.testing.assert_(i==1) # should be the first tile
esfile = pm.elev_source_files[i]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=True)
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(esfile)
np.testing.assert_(np.all(~edge_init_todo['right'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['left'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['top'][1:-1]))
np.testing.assert_(np.all(~edge_init_todo['bottom'][1:-1]))
# check neihbors
top = pm.tile_edge.neighbors[esfile]['top']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(top)
np.testing.assert_(np.all(edge_init_done['bottom'][1:-1])) #don't look at corners
right = pm.tile_edge.neighbors[esfile]['right']
edge_init_data, edge_init_done, edge_init_todo = pm.tile_edge.get_edge_init_data(right)
np.testing.assert_(np.all(edge_init_done['left'][1:-1])) #don't look at corners
i = pm.tile_edge.find_best_candidate(pm.elev_source_files)
np.testing.assert_(i==3) # should be the last tile
esfile = pm.elev_source_files[i]
fn, status = pm.calculate_twi(esfile,
save_path=pm.save_path, do_edges=True) | apache-2.0 |
CLVsol/oehealth | oehealth_dispensation/oehealth_dispensation.py | 1 | 9325 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from osv import osv
from osv import fields
import time
class oehealth_dispensation(osv.Model):
_name='oehealth.dispensation'
def _compute_create_uid(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
create_uid = perms[0].get('create_uid', 'n/a')
result[r.id] = create_uid
return result
def _compute_create_date(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
create_date = perms[0].get('create_date', 'n/a')
result[r.id] = create_date
return result
def _compute_write_uid(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
write_uid = perms[0].get('write_uid', 'n/a')
result[r.id] = write_uid
return result
def _compute_write_date(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
write_date = perms[0].get('write_date', 'n/a')
result[r.id] = write_date
return result
_columns={
'name': fields.char(size=256, string='Dispensation ID', required=True,
help='Type in the ID of this dispensation'),
'dispensation_date': fields.date(string='Dispensation Date', required=True),
'prescription_date': fields.date(string='Prescription Date', required=True),
'prescriber_id': fields.many2one('oehealth.prescriber', string='Prescriber', required=True),
#'patient_id': fields.many2one('oehealth.patient', string='Patient', required=True),
#'pregnancy_warning': fields.boolean(string='Pregancy Warning', readonly=True),
'notes': fields.text(string='Prescription Notes'),
#'prescription_line': fields.one2many('oehealth.dispensation.line',
# 'pbm_prescription_order_id',
# string='Dispensation line',),
'prescription_line': fields.one2many('oehealth.medicament.template',
'dispensation_id',
string='Prescription lines',),
#'pbm_prescription_warning_ack': fields.boolean(string='Dispensation verified'),
#'user_id': fields.many2one('res.users', string='Prescribing Doctor', required=True),
'active': fields.boolean('Active', help="The active field allows you to hide the dispensation without removing it."),
'state': fields.selection([('new','New'),
('revised','Revised'),
('waiting','Waiting'),
('okay','Okay')], 'Stage', readonly=True),
'create_uid': fields.function(_compute_create_uid, method=True, type='char', string='Create User',),
'create_date': fields.function(_compute_create_date, method=True, type='datetime', string='Create Date',),
'write_uid': fields.function(_compute_write_uid, method=True, type='char', string='Write User',),
'write_date': fields.function(_compute_write_date, method=True, type='datetime', string='Write Date',),
}
_sql_constraints = [
('uniq_name', 'unique(name)', "The Dispensation ID must be unique!"),
]
_defaults={
'name': '/',
'dispensation_date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'active': 1,
'state': 'new',
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not 'name' in vals or vals['name'] == '/':
val = self.pool.get('ir.sequence').get(cr, uid, 'oehealth.dispensation.code')
code = map(int, str(val))
code_len = len(code)
while len(code) < 14:
code.insert(0, 0)
while len(code) < 16:
n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11
if n > 1:
f = 11 - n
else:
f = 0
code.append(f)
code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]),
str(code[2]) + str(code[3]) + str(code[4]),
str(code[5]) + str(code[6]) + str(code[7]),
str(code[8]) + str(code[9]) + str(code[10]),
str(code[11]) + str(code[12]) + str(code[13]),
str(code[14]) + str(code[15]))
if code_len <= 3:
vals['name'] = code_str[18 - code_len:21]
elif code_len > 3 and code_len <= 6:
vals['name'] = code_str[17 - code_len:21]
elif code_len > 6 and code_len <= 9:
vals['name'] = code_str[16 - code_len:21]
elif code_len > 9 and code_len <= 12:
vals['name'] = code_str[15 - code_len:21]
elif code_len > 12 and code_len <= 14:
vals['name'] = code_str[14 - code_len:21]
return super(oehealth_dispensation, self).create(cr, uid, vals, context)
def oehealth_dispensation_new(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'new'})
return True
def oehealth_dispensation_revised(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'revised'})
return True
def oehealth_dispensation_waiting(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'waiting'})
return True
def oehealth_dispensation_okay(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'okay'})
return True
def get_authorization(self, cr, uid, ids, context={}):
data=ids
test_request_obj = self.pool.get('oehealth.dispensation')
lab_obj = self.pool.get('oehealth.dispensation')
test_report_data={}
test_cases = []
test_obj = test_request_obj.browse(cr, uid, context.get('active_id'), context=context)
#if test_obj.state == 'tested':
if test_obj.state != 'tested':
#raise osv.except_osv(_('UserError'),_('Test Report already created.'))
raise osv.except_osv(('UserError'),('Test Report already created.'))
test_report_data['test'] = test_obj.name.id
test_report_data['patient'] = test_obj.patient_id.id
#test_report_data['requestor'] = test_obj.doctor_id.id
test_report_data['date_requested'] = test_obj.date
for criterion in test_obj.name.criteria:
test_cases.append((0,0,{'name':criterion.name,
'sequence':criterion.sequence,
'normal_range':criterion.normal_range,
'unit':criterion.unit.id,
}))
test_report_data['criteria'] = test_cases
lab_id = lab_obj.create(cr,uid,test_report_data,context=context)
test_request_obj.write(cr, uid, context.get('active_id'), {'state':'tested'})
return {
'domain': "[('id','=', "+str(lab_id)+")]",
'name': 'Lab Test Report',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'oehealth.lab_test',
'type': 'ir.actions.act_window'
}
oehealth_dispensation()
| agpl-3.0 |
HonzaKral/django | django/conf/locale/nn/formats.py | 504 | 1810 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
shadowk29/cusumtools | legacy/minimal_psd.py | 1 | 12009 | ## COPYRIGHT
## Copyright (C) 2015 Kyle Briggs (kbrig035<at>uottawa.ca)
##
## This file is part of cusumtools.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import tkinter.filedialog
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import scipy.io as sio
from scipy.signal import bessel, filtfilt, welch
from scikits.samplerate import resample
import pylab as pl
import glob
import os
import time
import pandas as pd
from pandasql import sqldf
import re
def make_format(current, other):
# current and other are axes
def format_coord(x, y):
# x, y are data coordinates
# convert to display coords
display_coord = current.transData.transform((x,y))
inv = other.transData.inverted()
# convert back to data coords with respect to ax
ax_coord = inv.transform(display_coord)
coords = [ax_coord, (x, y)]
return ('Left: {:<40} Right: {:<}'
.format(*['({:.3f}, {:.3f})'.format(x, y) for x,y in coords]))
return format_coord
class App(tk.Frame):
def __init__(self, parent,file_path):
tk.Frame.__init__(self, parent)
parent.deiconify()
self.events_flag = False
self.baseline_flag = False
self.file_path = file_path
##### Trace plotting widgets #####
self.trace_frame = tk.LabelFrame(parent,text='Current Trace')
self.trace_fig = Figure(figsize=(7,5), dpi=100)
self.trace_canvas = FigureCanvasTkAgg(self.trace_fig, master=self.trace_frame)
self.trace_toolbar_frame = tk.Frame(self.trace_frame)
self.trace_toolbar = NavigationToolbar2TkAgg(self.trace_canvas, self.trace_toolbar_frame)
self.trace_toolbar.update()
self.trace_frame.grid(row=0,column=0,columnspan=6,sticky=tk.N+tk.S)
self.trace_toolbar_frame.grid(row=1,column=0,columnspan=6)
self.trace_canvas.get_tk_widget().grid(row=0,column=0,columnspan=6)
##### PSD plotting widgets #####
self.psd_frame = tk.LabelFrame(parent,text='Power Spectrum')
self.psd_fig = Figure(figsize=(7,5), dpi=100)
self.psd_canvas = FigureCanvasTkAgg(self.psd_fig, master=self.psd_frame)
self.psd_toolbar_frame = tk.Frame(self.psd_frame)
self.psd_toolbar = NavigationToolbar2TkAgg(self.psd_canvas, self.psd_toolbar_frame)
self.psd_toolbar.update()
self.psd_frame.grid(row=0,column=6,columnspan=6,sticky=tk.N+tk.S)
self.psd_toolbar_frame.grid(row=1,column=6,columnspan=6)
self.psd_canvas.get_tk_widget().grid(row=0,column=6,columnspan=6)
##### Control widgets #####
self.control_frame = tk.LabelFrame(parent, text='Controls')
self.control_frame.grid(row=2,column=0,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.start_entry = tk.Entry(self.control_frame)
self.start_entry.insert(0,'0')
self.start_label = tk.Label(self.control_frame, text='Start Time (s)')
self.start_label.grid(row=0,column=0,sticky=tk.E+tk.W)
self.start_entry.grid(row=0,column=1,sticky=tk.E+tk.W)
self.end_entry = tk.Entry(self.control_frame)
self.end_entry.insert(0,'10')
self.end_label = tk.Label(self.control_frame, text='End Time (s)')
self.end_label.grid(row=0,column=2,sticky=tk.E+tk.W)
self.end_entry.grid(row=0,column=3,sticky=tk.E+tk.W)
self.cutoff_entry = tk.Entry(self.control_frame)
self.cutoff_entry.insert(0,'')
self.cutoff_label = tk.Label(self.control_frame, text='Cutoff (Hz)')
self.cutoff_label.grid(row=1,column=0,sticky=tk.E+tk.W)
self.cutoff_entry.grid(row=1,column=1,sticky=tk.E+tk.W)
self.order_entry = tk.Entry(self.control_frame)
self.order_entry.insert(0,'')
self.order_label = tk.Label(self.control_frame, text='Filter Order')
self.order_label.grid(row=1,column=2,sticky=tk.E+tk.W)
self.order_entry.grid(row=1,column=3,sticky=tk.E+tk.W)
self.samplerate_entry = tk.Entry(self.control_frame)
self.samplerate_entry.insert(0,'250000')
self.samplerate_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.samplerate_label.grid(row=1,column=4,sticky=tk.E+tk.W)
self.samplerate_entry.grid(row=1,column=5,sticky=tk.E+tk.W)
self.savegain_entry = tk.Entry(self.control_frame)
self.savegain_entry.insert(0,'1')
self.savegain_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.savegain_label.grid(row=0,column=4,sticky=tk.E+tk.W)
self.savegain_entry.grid(row=0,column=5,sticky=tk.E+tk.W)
self.plot_trace = tk.Button(self.control_frame, text='Update Trace', command=self.update_trace)
self.plot_trace.grid(row=2,column=0,columnspan=2,sticky=tk.E+tk.W)
self.normalize = tk.IntVar()
self.normalize.set(0)
self.normalize_check = tk.Checkbutton(self.control_frame, text='Normalize', variable = self.normalize)
self.normalize_check.grid(row=2,column=2,sticky=tk.E+tk.W)
self.plot_psd = tk.Button(self.control_frame, text='Update PSD', command=self.update_psd)
self.plot_psd.grid(row=2,column=3,sticky=tk.E+tk.W)
##### Feedback Widgets #####
self.feedback_frame = tk.LabelFrame(parent, text='Status')
self.feedback_frame.grid(row=2,column=6,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.export_psd = tk.Button(self.feedback_frame, text='Export PSD',command=self.export_psd)
self.export_psd.grid(row=1,column=0,columnspan=6,sticky=tk.E+tk.W)
self.export_trace = tk.Button(self.feedback_frame, text='Export Trace',command=self.export_trace)
self.export_trace.grid(row=2,column=0,columnspan=6,sticky=tk.E+tk.W)
self.load_memmap()
self.initialize_samplerate()
def export_psd(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\PSDs for Sam')
np.savetxt(data_path,np.c_[self.f, self.Pxx, self.rms],delimiter=',')
except AttributeError:
self.wildcard.set('Plot the PSD first')
def export_trace(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\Analysis\Pores\NPN\PSDs')
np.savetxt(data_path,self.plot_data,delimiter=',')
except AttributeError:
self.wildcard.set('Plot the trace first')
def load_mapped_data(self):
self.total_samples = len(self.map)
self.samplerate = int(self.samplerate_entry.get())
if self.start_entry.get()!='':
self.start_time = float(self.start_entry.get())
start_index = int((float(self.start_entry.get())*self.samplerate))
else:
self.start_time = 0
start_index = 0
if self.end_entry.get()!='':
self.end_time = float(self.end_entry.get())
end_index = int((float(self.end_entry.get())*self.samplerate))
if end_index > self.total_samples:
end_index = self.total_samples
self.data = self.map[start_index:end_index]
self.data = float(self.savegain_entry.get()) * self.data
def load_memmap(self):
columntypes = np.dtype([('current', '>i2'), ('voltage', '>i2')])
self.map = np.memmap(self.file_path, dtype=columntypes, mode='r')['current']
def integrate_noise(self, f, Pxx):
df = f[1]-f[0]
return np.sqrt(np.cumsum(Pxx * df))
def filter_data(self):
cutoff = float(self.cutoff_entry.get())
order = int(self.order_entry.get())
Wn = 2.0 * cutoff/float(self.samplerate)
b, a = bessel(order,Wn,'low')
padding = 1000
padded = np.pad(self.data, pad_width=padding, mode='median')
self.filtered_data = filtfilt(b, a, padded, padtype=None)[padding:-padding]
def initialize_samplerate(self):
self.samplerate = float(self.samplerate_entry.get())
##### Plot Updating functions #####
def update_trace(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
self.trace_fig.clf()
a = self.trace_fig.add_subplot(111)
time = np.linspace(1.0/self.samplerate,len(self.plot_data)/float(self.samplerate),len(self.plot_data))+self.start_time
a.set_xlabel(r'Time ($\mu s$)')
a.set_ylabel('Current (pA)')
self.trace_fig.subplots_adjust(bottom=0.14,left=0.21)
a.plot(time*1e6,self.plot_data,'.',markersize=1)
self.trace_canvas.show()
def update_psd(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
maxf = 2*float(self.cutoff_entry.get())
else:
maxf = 2*float(self.samplerate_entry.get())
length = np.minimum(2**18,len(self.filtered_data))
end_index = int(np.floor(len(self.filtered_data)/length)*length)
current = np.average(self.filtered_data[:end_index])
f, Pxx = welch(self.filtered_data, plot_samplerate,nperseg=length)
self.rms = self.integrate_noise(f, Pxx)
if self.normalize.get():
Pxx /= current**2
Pxx *= maxf/2.0
self.rms /= np.absolute(current)
self.f = f
self.Pxx = Pxx
minf = 1
BW_index = np.searchsorted(f, maxf/2)
logPxx = np.log10(Pxx[1:BW_index])
minP = 10**np.floor(np.amin(logPxx))
maxP = 10**np.ceil(np.amax(logPxx))
self.psd_fig.clf()
a = self.psd_fig.add_subplot(111)
a.set_xlabel('Frequency (Hz)')
a.set_ylabel(r'Spectral Power ($\mathrm{pA}^2/\mathrm{Hz}$)')
a.set_xlim(minf, maxf)
a.set_ylim(minP, maxP)
self.psd_fig.subplots_adjust(bottom=0.14,left=0.21)
a.loglog(f[1:],Pxx[1:],'b-')
for tick in a.get_yticklabels():
tick.set_color('b')
a2 = a.twinx()
a2.semilogx(f, self.rms, 'r-')
a2.set_ylabel('RMS Noise (pA)')
a2.set_xlim(minf, maxf)
for tick in a2.get_yticklabels():
tick.set_color('r')
a2.format_coord = make_format(a2, a)
self.psd_canvas.show()
def main():
root=tk.Tk()
root.withdraw()
file_path = tkinter.filedialog.askopenfilename(initialdir='C:/Data/')
App(root,file_path).grid(row=0,column=0)
root.mainloop()
if __name__=="__main__":
main()
| gpl-3.0 |
syhpoon/xyzcmd | libxyz/vfs/vfsobj.py | 1 | 8497 | #-*- coding: utf8 -*
#
# Max E. Kuznecov ~syhpoon <[email protected]> 2008
#
# This file is part of XYZCommander.
# XYZCommander is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XYZCommander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
# You should have received a copy of the GNU Lesser Public License
# along with XYZCommander. If not, see <http://www.gnu.org/licenses/>.
import os
from libxyz.core.utils import bstring, ustring
from libxyz.vfs import types, util
class VFSObject(object):
"""
Abstract interface for VFS objects
"""
def __init__(self, xyz, path, full_path, ext_path, driver, parent,
enc=None, **kwargs):
self.xyz = xyz
self.enc = enc or xyzenc
# Internal VFS path
self.path = bstring(path, self.enc)
# Full VFS path
self.full_path = bstring(full_path, self.enc)
# External VFS path
self.ext_path = bstring(ext_path, self.enc)
self.parent = parent
self.driver = driver
self.kwargs = kwargs
self.fileobj = None
# File name
self.name = os.path.basename(self.path)
# File type
self.ftype = None
# Access time
self.atime = None
# Modified time
self.mtime = None
# Changed time
self.ctime = None
# Size in bytes
self.size = None
# Owner UID
self.uid = None
# Group
self.gid = None
# Mode
self.mode = None
# Inode
self.inode = None
# Visual file type
self.vtype = None
# Visual file representation
self.visual = None
# File info
self.info = None
# Any type-specific data
self.data = None
# List of significant attributes
self.attributes = ()
self.__ni_msg = _(u"Feature not implemented")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_file(self):
"""
Return True if instance is representing regular file
"""
return isinstance(self.ftype, types.VFSTypeFile)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir(self):
"""
Return True if instance is representing directory
"""
return isinstance(self.ftype, types.VFSTypeDir)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir_empty(self):
"""
Return True if instance is representing directory and it is empty
"""
if not self.is_dir():
return False
_, _, objs = self.walk()
return len(objs) == 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_link(self):
"""
Return True if instance is representing soft link
"""
return isinstance(self.ftype, types.VFSTypeLink)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_char(self):
"""
Return True if instance is representing soft char device
"""
return isinstance(self.ftype, types.VFSTypeChar)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_block(self):
"""
Return True if instance is representing block device
"""
return isinstance(self.ftype, types.VFSTypeBlock)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_fifo(self):
"""
Return True if instance is representing FIFO
"""
return isinstance(self.ftype, types.VFSTypeFifo)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_socket(self):
"""
Return True if instance is representing socket
"""
return isinstance(self.ftype, types.VFSTypeSocket)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def copy(self, path, existcb=None, errorcb=None,
save_attrs=True, follow_links=False, cancel=None):
"""
Copy file to specified location
@param path: Local path to copy file to
@param existcb: Callback function to be called if there exists
an object in target directory with the same name.
Callback function receives VFSObject instance as an
argument and must return one of:
'override' - to override this very object
'override all' - to override any future collisions
'skip' - to skip the object
'skip all' - to skip all future collisions
'abort' - to abort the process.
If no existscb provided 'abort' is used as default
@param errorcb: Callback function to be called in case an error occured
during copying. Function receives VFSObject instance
and error string as arguments and must return one of:
'skip' - to continue the process
'skip all' - to skip all future errors
'abort' - to abort the process.
If no errorcb provided 'abort' is used as default
@param save_attrs: Whether to save object attributes
@param follow_links: Whether to follow symlinks
@param cancel: a threading.Event instance, if it is found set - abort
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def move(self, path, existcb=None, errorcb=None, save_attrs=True,
follow_links=False, cancel=None):
"""
Move object
Arguments are the same as for copy()
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def mkdir(self, newdir):
"""
Create new dir inside object (only valid for directory object types)
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def remove(self, recursive=True):
"""
[Recursively] remove object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def walk(self):
"""
Directory tree walker
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def open(self, mode='r'):
"""
Open self object in provided mode
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def close(self):
"""
Close self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def read(self, bytes=None):
"""
Read bytes from self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def tell(self):
"""
Tell file position
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def seek(self, offset, whence=None):
"""
Perform seek() on object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def in_dir(self, d, e):
"""
Filter only those archive entries which exist in the same
directory level
"""
if e.startswith(d.lstrip(os.sep)) and \
len(util.split_path(e)) == (len(util.split_path(d)) + 1):
return True
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __repr__(self):
return self.__str__()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __unicode__(self):
return ustring(self.__str__())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __del__(self):
if self.fileobj:
try:
self.close()
except Exception:
pass
| gpl-3.0 |
simobasso/ansible | test/units/parsing/vault/test_vault.py | 60 | 5974 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import shutil
import time
import tempfile
import six
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultLib(unittest.TestCase):
def test_methods_exist(self):
v = VaultLib('ansible')
slots = ['is_encrypted',
'encrypt',
'decrypt',
'_format_output',
'_split_header',]
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_is_encrypted(self):
v = VaultLib(None)
assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed"
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
assert v.is_encrypted(data), "encryption check on headered text failed"
def test_format_output(self):
v = VaultLib('ansible')
v.cipher_name = "TEST"
sensitive_data = b"ansible"
data = v._format_output(sensitive_data)
lines = data.split(b'\n')
assert len(lines) > 1, "failed to properly add header"
header = to_bytes(lines[0])
assert header.endswith(b';TEST'), "header does end with cipher name"
header_parts = header.split(b';')
assert len(header_parts) == 3, "header has the wrong number of parts"
assert header_parts[0] == b'$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
assert header_parts[1] == v.b_version, "header version is incorrect"
assert header_parts[2] == b'TEST', "header does end with cipher name"
def test_split_header(self):
v = VaultLib('ansible')
data = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
rdata = v._split_header(data)
lines = rdata.split(b'\n')
assert lines[0] == b"ansible"
assert v.cipher_name == 'TEST', "cipher name was not set"
assert v.b_version == b"9.9"
def test_encrypt_decrypt_aes(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = u'AES'
# AES encryption code has been removed, so this is old output for
# AES-encrypted 'foobar' with password 'ansible'.
enc_data = b'$ANSIBLE_VAULT;1.1;AES\n53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3\nfe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e\n786a5a15efeb787e1958cbdd480d076c\n'
dec_data = v.decrypt(enc_data)
assert dec_data == b"foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES256'
enc_data = v.encrypt(b"foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != b"foobar", "encryption failed"
assert dec_data == b"foobar", "decryption failed"
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible"))
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to encrypt data with a header"
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
dec_data = v.decrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to decrypt data without a header"
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
| gpl-3.0 |
ClaudioNahmad/Servicio-Social | Parametros/CosmoMC/prerrequisitos/plc-2.0/build/pyfits-3.2.2/lib/pyfits/core.py | 3 | 5534 | #!/usr/bin/env python
# $Id$
"""
A module for reading and writing FITS files and manipulating their
contents.
A module for reading and writing Flexible Image Transport System
(FITS) files. This file format was endorsed by the International
Astronomical Union in 1999 and mandated by NASA as the standard format
for storing high energy astrophysics data. For details of the FITS
standard, see the NASA/Science Office of Standards and Technology
publication, NOST 100-2.0.
For detailed examples of usage, see the `PyFITS User's Manual
<http://stsdas.stsci.edu/download/wikidocs/The_PyFITS_Handbook.pdf>`_.
"""
# The existing unit tests, anyways, only require this in pyfits.hdu.table,
# but we should still leave new division here too in order to avoid any nasty
# surprises
from __future__ import division # confidence high
"""
Do you mean: "Profits"?
- Google Search, when asked for "PyFITS"
"""
import os
import sys
import warnings
import pyfits.py3compat
# Public API compatibility imports
import pyfits.card
import pyfits.column
import pyfits.convenience
import pyfits.diff
import pyfits.hdu
from pyfits.card import *
from pyfits.column import *
from pyfits.convenience import *
from pyfits.diff import *
from pyfits.fitsrec import FITS_record, FITS_rec
from pyfits.hdu import *
from pyfits.hdu.hdulist import fitsopen as open
from pyfits.hdu.image import Section
from pyfits.hdu.table import new_table
from pyfits.header import Header
# Additional imports used by the documentation (some of which should be
# restructured at some point)
from pyfits.verify import VerifyError
# Set module-global boolean variables--these variables can also get their
# values from environment variables
GLOBALS = [
# Variable name # Default
('ENABLE_RECORD_VALUED_KEYWORD_CARDS', True),
('EXTENSION_NAME_CASE_SENSITIVE', False),
('STRIP_HEADER_WHITESPACE', True),
('USE_MEMMAP', True)
]
for varname, default in GLOBALS:
try:
locals()[varname] = bool(int(os.environ.get('PYFITS_' + varname,
default)))
except ValueError:
locals()[varname] = default
__all__ = (pyfits.card.__all__ + pyfits.column.__all__ +
pyfits.convenience.__all__ + pyfits.diff.__all__ +
pyfits.hdu.__all__ +
['FITS_record', 'FITS_rec', 'open', 'Section', 'new_table',
'Header', 'VerifyError', 'TRUE', 'FALSE'] +
[g[0] for g in GLOBALS])
# These are of course deprecated, but a handful of external code still uses
# them
TRUE = True
FALSE = False
# Warnings routines
_formatwarning = warnings.formatwarning
def formatwarning(message, category, filename, lineno, line=None):
if issubclass(category, UserWarning):
return unicode(message) + '\n'
else:
if sys.version_info[:2] < (2, 6):
# Python versions prior to 2.6 don't support the line argument
return _formatwarning(message, category, filename, lineno)
else:
return _formatwarning(message, category, filename, lineno, line)
warnings.formatwarning = formatwarning
warnings.filterwarnings('always', category=UserWarning, append=True)
# This is a workaround for a bug that appears in some versions of Python 2.5
if sys.version_info[:2] < (2, 6):
import urllib
class ErrorURLopener(urllib.FancyURLopener):
"""A class to use with `urlretrieve` to allow `IOError` exceptions to be
raised when a file specified by a URL cannot be accessed.
"""
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise IOError((errcode, errmsg, url))
urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener
# class to the urllibrary
urllib._urlopener.tempcache = {} # Initialize tempcache with an empty
# dictionary to enable file cacheing
__credits__ = """
Copyright (C) 2004 Association of Universities for Research in Astronomy (AURA)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. The name of AURA and its representatives may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
| gpl-3.0 |
317070/kaggle-heart | ira/configurations/gauss_roi10_maxout.py | 1 | 9185 | from collections import namedtuple
import lasagne as nn
from lasagne.layers.dnn import Conv2DDNNLayer, MaxPool2DDNNLayer
import data_iterators
import numpy as np
import theano.tensor as T
from functools import partial
import utils_heart
import nn_heart
from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH
import utils
import data
caching = None
restart_from_save = None
rng = np.random.RandomState(42)
patch_size = (64, 64)
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1 / 1.5, 1.5),
'do_flip': True,
'sequence_shift': False
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
}
test_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1., 1.),
'do_flip': True,
'sequence_shift': False
}
data_prep_fun = data.transform_norm_rescale_after
batch_size = 32
nbatches_chunk = 16
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH)
train_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=train_transformation_params,
patient_ids=train_valid_ids['train'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=True, random=True, infinite=True,
data_prep_fun=data_prep_fun)
valid_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=valid_transformation_params,
patient_ids=train_valid_ids['valid'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
test_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_VALIDATE_DATA_PATH,
batch_size=chunk_size,
transform_params=test_transformation_params,
slice2roi_path='pkl_validate_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 150
learning_rate_schedule = {
0: 0.0002,
int(max_nchunks * 0.1): 0.0001,
int(max_nchunks * 0.3): 0.000075,
int(max_nchunks * 0.6): 0.00005,
int(max_nchunks * 0.9): 0.00001
}
validate_every = 2 * nchunks_per_epoch
save_every = 2 * nchunks_per_epoch
conv3 = partial(Conv2DDNNLayer,
stride=(1, 1),
pad="same",
filter_size=(3, 3),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
b=nn.init.Constant(0.1),
W=nn.init.Orthogonal("relu"))
max_pool = partial(MaxPool2DDNNLayer,
pool_size=(2, 2),
stride=(2, 2))
def build_model(l_in=None):
l_in = nn.layers.InputLayer((None, 30) + patch_size) if not l_in else l_in
l = conv3(l_in, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l_d01 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d01 = nn.layers.FeaturePoolLayer(l_d01, pool_size=2)
l_d02 = nn.layers.DenseLayer(nn.layers.dropout(l_d01), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d02 = nn.layers.FeaturePoolLayer(l_d02, pool_size=2)
mu0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(50), nonlinearity=nn_heart.lb_softplus())
sigma0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf0 = nn_heart.NormalCDFLayer(mu0, sigma0, sigma_logscale=False, mu_logscale=False)
# ---------------------------------------------------------------
l_d11 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d11 = nn.layers.FeaturePoolLayer(l_d11, pool_size=2)
l_d12 = nn.layers.DenseLayer(nn.layers.dropout(l_d11), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d12 = nn.layers.FeaturePoolLayer(l_d12, pool_size=2)
mu1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(100), nonlinearity=nn_heart.lb_softplus())
sigma1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf1 = nn_heart.NormalCDFLayer(mu1, sigma1, sigma_logscale=False, mu_logscale=False)
l_outs = [l_cdf0, l_cdf1]
l_top = nn.layers.MergeLayer(l_outs)
l_target_mu0 = nn.layers.InputLayer((None, 1))
l_target_mu1 = nn.layers.InputLayer((None, 1))
l_targets = [l_target_mu0, l_target_mu1]
dense_layers = [l_d01, l_d02, l_d11, l_d12, mu0, sigma0, mu0, mu1]
mu_layers = [mu0, mu1]
sigma_layers = [sigma0, sigma1]
return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'dense_layers', 'mu_layers', 'sigma_layers'])(
[l_in], l_outs, l_targets,
l_top, dense_layers, mu_layers, sigma_layers)
def build_objective(model, deterministic=False):
p0 = nn.layers.get_output(model.l_outs[0], deterministic=deterministic)
t0 = nn.layers.get_output(model.l_targets[0])
t0_heaviside = nn_heart.heaviside(t0)
crps0 = T.mean((p0 - t0_heaviside) ** 2)
p1 = nn.layers.get_output(model.l_outs[1], deterministic=deterministic)
t1 = nn.layers.get_output(model.l_targets[1])
t1_heaviside = nn_heart.heaviside(t1)
crps1 = T.mean((p1 - t1_heaviside) ** 2)
return 0.5 * (crps0 + crps1)
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_top), learning_rate)
return updates
def get_mean_validation_loss(batch_predictions, batch_targets):
return [0, 0]
def get_mean_crps_loss(batch_predictions, batch_targets, batch_ids):
nbatches = len(batch_predictions)
npredictions = len(batch_predictions[0])
crpss = []
for i in xrange(npredictions):
p, t = [], []
for j in xrange(nbatches):
p.append(batch_predictions[j][i])
t.append(batch_targets[j][i])
p, t = np.vstack(p), np.vstack(t)
target_cdf = utils_heart.heaviside_function(t)
crpss.append(np.mean((p - target_cdf) ** 2))
return crpss
def get_avg_patient_predictions(batch_predictions, batch_patient_ids, mean):
return utils_heart.get_patient_average_cdf_predictions(batch_predictions, batch_patient_ids, mean)
| mit |
mingwpy/numpy | numpy/lib/__init__.py | 114 | 1146 | from __future__ import division, absolute_import, print_function
import math
from .info import __doc__
from numpy.version import version as __version__
from .type_check import *
from .index_tricks import *
from .function_base import *
from .nanfunctions import *
from .shape_base import *
from .stride_tricks import *
from .twodim_base import *
from .ufunclike import *
from . import scimath as emath
from .polynomial import *
#import convertcode
from .utils import *
from .arraysetops import *
from .npyio import *
from .financial import *
from .arrayterator import *
from .arraypad import *
from ._version import *
__all__ = ['emath', 'math']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
__all__ += shape_base.__all__
__all__ += stride_tricks.__all__
__all__ += twodim_base.__all__
__all__ += ufunclike.__all__
__all__ += arraypad.__all__
__all__ += polynomial.__all__
__all__ += utils.__all__
__all__ += arraysetops.__all__
__all__ += npyio.__all__
__all__ += financial.__all__
__all__ += nanfunctions.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| bsd-3-clause |
chouseknecht/ansible | test/units/modules/network/nxos/test_nxos_nxapi.py | 68 | 3057 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_nxapi
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosNxapiModule(TestNxosModule):
module = nxos_nxapi
def setUp(self):
super(TestNxosNxapiModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_nxapi.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_nxapi.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos.nxos_nxapi.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'device_info': {'network_os_platform': 'N7K-C7018', 'network_os_version': '8.3(1)'}, 'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosNxapiModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
module_name = self.module.__name__.rsplit('.', 1)[1]
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture(module_name, filename, device))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_nxapi_no_change(self):
set_module_args(dict(http=True, https=False, http_port=80, https_port=443, sandbox=False))
self.execute_module_devices(changed=False, commands=[])
def test_nxos_nxapi_disable(self):
set_module_args(dict(state='absent'))
self.execute_module_devices(changed=True, commands=['no feature nxapi'])
def test_nxos_nxapi_no_http(self):
set_module_args(dict(https=True, http=False, https_port=8443))
self.execute_module_devices(changed=True, commands=['no nxapi http', 'nxapi https port 8443'])
| gpl-3.0 |
ChopChopKodi/pelisalacarta | python/main-classic/channels/piratestreaming.py | 3 | 12423 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para piratestreaming
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import urllib2
import urlparse
from core import config
from core import logger
from core import scrapertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("[piratestreaming.py] mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novità" , action="peliculas", url="http://www.piratestreaming.co/film-aggiornamenti.php"))
itemlist.append( Item(channel=item.channel, title="Per genere" , action="categorias", url="http://www.piratestreaming.co/"))
itemlist.append( Item(channel=item.channel, title="Cerca", action="search"))
return itemlist
def search(item,texto):
logger.info("[piratestreaming.py] search "+texto)
itemlist = []
texto = texto.replace(" ","%20")
item.url = "http://www.piratestreaming.co/cerca.php?all="+texto
item.extra = ""
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def categorias(item):
'''
<a href="#">Film</a>
<ul>
<li><a href="http://www.piratestreaming.co/film-aggiornamenti.php">AGGIORNAMENTI</a></li>
<li><a href="http://www.web-streaming-mania.net/" target=_blank><strong><font color="red">«FILM PORNO»</font></a></strong></li>
<li><a href="http://www.piratestreaming.co/categoria/film/animazione.html">ANIMAZIONE</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/avventura.html">AVVENTURA</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/azione.html">AZIONE</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/biografico.html">BIOGRAFICO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/comico.html">COMICO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/commedia.html">COMMEDIA</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/documentario.html">DOCUMENTARIO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/drammatico.html">DRAMMATICO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/erotico.html">EROTICO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/fantascienza.html">FANTASCIENZA</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/fantasy.html">FANTASY</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/giallo.html">GIALLO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/grottesco.html">GROTTESCO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/guerra.html">GUERRA</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/horror.html">HORROR</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/musical.html">MUSICAL</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/poliziesco.html">POLIZIESCO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/romantico.html">ROMANTICO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/romanzo.html">ROMANZO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/sentimentale.html">SENTIMENTALE</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/storico.html">STORICO</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/thriller.html">THRILLER</a></li>
<li><a href="http://www.piratestreaming.co/categoria/film/western.html">WESTERN</a></li>
</ul>
'''
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<a href="#">Film</a>[^<]+<ul>(.*?)</ul>' )
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info("[piratestreaming.py] peliculas")
itemlist = []
# Descarga la p�gina
data = scrapertools.cachePage(item.url)
# Extrae las entradas (carpetas)
'''
<div class="featuredItem"> <a href=http://www.imagerip.net/images/ilregnodig.jpg class="featuredImg img" rel="featured"><img src=http://www.imagerip.net/images/ilregnodig.jpg alt="featured item" style="width: 80.8px; height: 109.6px;" /></a>
<div class="featuredText">
<b><a href=http://www.piratestreaming.co/film/il-regno-di-gia-la-leggenda-dei-guardiani-streaming-ita.html>Il Regno di Ga' Hoole La leggenda dei guardiani Ita </a></b> <br /><g:plusone size="small" href=http://www.piratestreaming.co/film/il-regno-di-gia-la-leggenda-dei-guardiani-streaming-ita.html></g:plusone>
<div id="fb-root"></div><fb:like href="http://www.piratestreaming.co/film/il-regno-di-gia-la-leggenda-dei-guardiani-streaming-ita.html" send="false" layout="button_count" show_faces="false" action="like" colorscheme="dark" font=""></fb:like> </b>
</div>
</div>
<div class="featuredItem">
<a href="http://www.piratestreaming.co/film/paris-manhattan.html" class="featuredImg img rounded" rel="featured" style="border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom-right-radius: 4px; border-bottom-left-radius: 4px; ">
<img src="http://www.imagerip.net/images/Of6FN.jpg" alt="Locandina Film" style="width: 80.8px; height: 109.6px;"></a>
<div class="featuredText">
<b> <a href="http://www.piratestreaming.co/film/paris-manhattan.html">Paris Manhattan </a><br><div style="height: 15px; width: 70px; display: inline-block; text-indent: 0px; margin: 0px; padding: 0px; background-color: transparent; border-style: none; float: none; line-height: normal; font-size: 1px; vertical-align: baseline; background-position: initial initial; background-repeat: initial initial; " id="___plusone_0"><iframe allowtransparency="true" frameborder="0" hspace="0" marginheight="0" marginwidth="0" scrolling="no" style="position: static; top: 0px; width: 70px; margin: 0px; border-style: none; left: 0px; visibility: visible; height: 15px; " tabindex="0" vspace="0" width="100%" id="I0_1352901511754" name="I0_1352901511754" src="https://plusone.google.com/_/+1/fastbutton?bsv&size=small&hl=en-US&origin=http%3A%2F%2Fwww.piratestreaming.com&url=http%3A%2F%2Fwww.piratestreaming.com%2Ffilm%2Fparis-manhattan.html&jsh=m%3B%2F_%2Fapps-static%2F_%2Fjs%2Fgapi%2F__features__%2Frt%3Dj%2Fver%3Dmq7ez1ykxXY.it.%2Fsv%3D1%2Fam%3D!9YrXPIrxx2-ITyEIjA%2Fd%3D1%2Frs%3DAItRSTOgKZowsoksby8_wLnRD0d_umAXMQ#_methods=onPlusOne%2C_ready%2C_close%2C_open%2C_resizeMe%2C_renderstart%2Concircled&id=I0_1352901511754&parent=http%3A%2F%2Fwww.piratestreaming.com" title="+1"></iframe></div>
<div id="fb-root"></div><fb:like href="http://www.piratestreaming.co/film/paris-manhattan.html" send="false" layout="button_count" show_faces="false" action="like" colorscheme="dark" font="" fb-xfbml-state="rendered" class="fb_edge_widget_with_comment fb_iframe_widget"><span style="height: 20px; width: 98px; "><iframe id="f2834df314" name="f2e5c9573" scrolling="no" style="border: none; overflow: hidden; height: 20px; width: 98px; " title="Like this content on Facebook." class="fb_ltr" src="http://www.facebook.com/plugins/like.php?api_key=&locale=it_IT&sdk=joey&channel_url=http%3A%2F%2Fstatic.ak.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D17%23cb%3Df2495f47c%26origin%3Dhttp%253A%252F%252Fwww.piratestreaming.com%252Ff153526b2c%26domain%3Dwww.piratestreaming.com%26relation%3Dparent.parent&href=http%3A%2F%2Fwww.piratestreaming.com%2Ffilm%2Fparis-manhattan.html&node_type=link&width=90&layout=button_count&colorscheme=dark&action=like&show_faces=false&send=false&extended_social_context=false"></iframe></span></fb:like> <a href="http://www.piratestreaming.co/video1" target="_blank" rel="nofollow"><img src="http://www.imagerip.net/images/W57R.png"></a> </b>
</div>
</div>
'''
patron = '<div class="featuredItem">\s*'
patron += '<a[^>]*>'
patron += '<img src="([^"]+)"[^<]+</a>[^<]+'
patron += '<div class="featuredText">.*?'
patron += '<a href=([^>]+)>([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
scrapedplot = ""
logger.info("scrapedurl="+scrapedurl)
if scrapedurl.startswith("\""):
scrapedurl=scrapedurl[1:-1]
logger.info("scrapedurl="+scrapedurl)
try:
res = urllib2.urlopen(scrapedurl)
daa = res.read()
da = daa.split('justify;">');
da = da[1].split('</p>')
scrapedplot = scrapertools.htmlclean(da[0]).strip()
except:
scrapedplot= "Trama non disponibile"
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
'''
<div class="featuredItem"> <a href=http://www.piratestreaming.co/film/supercondriaco-ridere-fa-bene-alla-salute.html class="featuredImg img" rel="featured"><img src=http://imagerip.net/images/2014/06/19/Supercondriaco.jpg alt="featured item" style="width: 80.8px; height: 109.6px;" /></a>
<div class="featuredText">
<b><a href=http://www.piratestreaming.co/film/supercondriaco-ridere-fa-bene-alla-salute.html>Supercondriaco - Ridere fa bene alla salute </b><br /><g:plusone size="medium" href=http://www.piratestreaming.co/film/supercondriaco-ridere-fa-bene-alla-salute.html rel="nofollow"></g:plusone>
<div id="fb-root"></div><fb:like href="http://www.piratestreaming.co/film/supercondriaco-ridere-fa-bene-alla-salute.html" send="false" layout="button_count" show_faces="false" action="like" colorscheme="dark" font=""></fb:like>
</div>
</div>
'''
patron = '<div class="featuredItem"[^<]+'
patron += '<a href=(.*?) [^<]+'
patron += '<img src=(.*?) [^<]+</a[^<]+'
patron += '<div class="featuredText"[^<]+'
patron += '<b><a[^>]+>([^<]+)'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
logger.info("scrapedurl="+scrapedurl)
if scrapedurl.startswith("\""):
scrapedurl=scrapedurl[1:-1]
logger.info("scrapedurl="+scrapedurl)
try:
res = urllib2.urlopen(scrapedurl)
daa = res.read()
da = daa.split('justify;">');
da = da[1].split('</p>')
scrapedplot = scrapertools.htmlclean(da[0]).strip()
except:
scrapedplot= "Trama non disponibile"
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], plot=["+scrapedplot+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
# Extrae el paginador
patronvideos = '<td align="center">[^<]+</td>[^<]+<td align="center">\s*<a href="([^"]+)">[^<]+</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
scrapedurl = urlparse.urljoin(item.url,matches[0])
itemlist.append( Item(channel=item.channel, action="peliculas", title="Next Page >>" , url=scrapedurl , folder=True) )
return itemlist
| gpl-3.0 |
SafeW3rd/Ciphers | primeSieve.py | 1 | 1139 | # Prime Number Sieve
# http://inventwithpython.com/hacking (BSD Licensed)
import math
def isPrime(num):
# Returns True if num is a prime number, otherwise False.
# Note: Generally, isPrime() is slower than primeSieve().
# all numbers less than 2 are not prime
if num < 2:
return False
# see if num is divisible by any number up to the square root of num
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
return True
def primeSieve(sieveSize):
# Returns a list of prime numbers calculated using
# the Sieve of Eratosthenes algorithm.
sieve = [True] * sieveSize
sieve[0] = False # zero and one are not prime numbers
sieve[1] = False
# create the sieve
for i in range(2, int(math.sqrt(sieveSize)) + 1):
pointer = i * 2
while pointer < sieveSize:
sieve[pointer] = False
pointer += i
# compile the list of primes
primes = []
for i in range(sieveSize):
if sieve[i] == True:
primes.append(i)
return primes
| mit |
wdv4758h/ZipPy | lib-python/3/tkinter/test/test_tkinter/test_loadtk.py | 162 | 1503 | import os
import sys
import unittest
import test.support as test_support
from tkinter import Tcl, TclError
test_support.requires('gui')
class TkLoadTest(unittest.TestCase):
@unittest.skipIf('DISPLAY' not in os.environ, 'No $DISPLAY set.')
def testLoadTk(self):
tcl = Tcl()
self.assertRaises(TclError,tcl.winfo_geometry)
tcl.loadtk()
self.assertEqual('1x1+0+0', tcl.winfo_geometry())
tcl.destroy()
def testLoadTkFailure(self):
old_display = None
if sys.platform.startswith(('win', 'darwin', 'cygwin')):
# no failure possible on windows?
# XXX Maybe on tk older than 8.4.13 it would be possible,
# see tkinter.h.
return
with test_support.EnvironmentVarGuard() as env:
if 'DISPLAY' in os.environ:
del env['DISPLAY']
# on some platforms, deleting environment variables
# doesn't actually carry through to the process level
# because they don't support unsetenv
# If that's the case, abort.
with os.popen('echo $DISPLAY') as pipe:
display = pipe.read().strip()
if display:
return
tcl = Tcl()
self.assertRaises(TclError, tcl.winfo_geometry)
self.assertRaises(TclError, tcl.loadtk)
tests_gui = (TkLoadTest, )
if __name__ == "__main__":
test_support.run_unittest(*tests_gui)
| bsd-3-clause |
pku9104038/edx-platform | common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py | 17 | 5131 | # This class gives a common interface for logging into the grading controller
import json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
from .combined_open_ended_rubric import CombinedOpenEndedRubric
from lxml import etree
log = logging.getLogger(__name__)
class GradingServiceError(Exception):
"""
Exception for grading service. Shown when Open Response Assessment servers cannot be reached.
"""
pass
class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
self.session = requests.Session()
self.system = config['system']
def _login(self):
"""
Log into the staff grading service.
Raises requests.exceptions.HTTPError if something goes wrong.
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
response.raise_for_status()
return response.json()
def post(self, url, data, allow_redirects=False):
"""
Make a post request to the grading controller
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)
log.error(error_string)
raise GradingServiceError(error_string)
return r.text
def get(self, url, params, allow_redirects=False):
"""
Make a get request to the grading controller
"""
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
try:
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)
log.error(error_string)
raise GradingServiceError(error_string)
return r.text
def _try_with_login(self, operation):
"""
Call operation(), which should return a requests response object. If
the request fails with a 'login_required' error, call _login() and try
the operation again.
Returns the result of operation(). Does not catch exceptions.
"""
response = operation()
resp_json = response.json()
if (resp_json
and resp_json.get('success') is False
and resp_json.get('error') == 'login_required'):
# apparrently we aren't logged in. Try to fix that.
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into staff_grading backend. Response: %s",
r)
# try again
response = operation()
response.raise_for_status()
return response
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
response_json = json.loads(response)
except:
response_json = response
try:
if 'rubric' in response_json:
rubric = response_json['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.system, view_only)
rubric_dict = rubric_renderer.render_rubric(rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
response_json['rubric'] = rubric_html
return response_json
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError, RubricParsingError:
#This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
#This is a dev_facing_error
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
| agpl-3.0 |
abaditsegay/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_shlex.py | 179 | 5300 | # -*- coding: iso-8859-1 -*-
import unittest
import shlex
from test import test_support
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# The original test data set was from shellwords, by Hartmut Goebel.
data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|\|x|bar|
\ x bar|\|x|bar|
\ bar|\|bar|
foo \x bar|foo|\|x|bar|
foo \ x bar|foo|\|x|bar|
foo \ bar|foo|\|bar|
foo "bar" bla|foo|"bar"|bla|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
"foo" bar "bla"|"foo"|bar|"bla"|
"foo" bar bla|"foo"|bar|bla|
foo 'bar' bla|foo|'bar'|bla|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
'foo' bar 'bla'|'foo'|bar|'bla'|
'foo' bar bla|'foo'|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
""|""|
''|''|
foo "" bar|foo|""|bar|
foo '' bar|foo|''|bar|
foo "" "" "" bar|foo|""|""|""|bar|
foo '' '' '' bar|foo|''|''|''|bar|
\""|\|""|
"\"|"\"|
"foo\ bar"|"foo\ bar"|
"foo\\ bar"|"foo\\ bar"|
"foo\\ bar\"|"foo\\ bar\"|
"foo\\" bar\""|"foo\\"|bar|\|""|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
\''|\|''|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
\"foo"|\|"foo"|
\"foo"\x|\|"foo"|\|x|
"foo\x"|"foo\x"|
"foo\ "|"foo\ "|
foo\ xx|foo|\|xx|
foo\ x\x|foo|\|x|\|x|
foo\ x\x\""|foo|\|x|\|x|\|""|
"foo\ x\x"|"foo\ x\x"|
"foo\ x\x\\"|"foo\ x\x\\"|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
foo\ bar|foo|\|bar|
foo#bar\nbaz|foobaz|
:-) ;-)|:|-|)|;|-|)|
áéíóú|á|é|í|ó|ú|
"""
posix_data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|x|bar|
\ x bar| x|bar|
\ bar| bar|
foo \x bar|foo|x|bar|
foo \ x bar|foo| x|bar|
foo \ bar|foo| bar|
foo "bar" bla|foo|bar|bla|
"foo" "bar" "bla"|foo|bar|bla|
"foo" bar "bla"|foo|bar|bla|
"foo" bar bla|foo|bar|bla|
foo 'bar' bla|foo|bar|bla|
'foo' 'bar' 'bla'|foo|bar|bla|
'foo' bar 'bla'|foo|bar|bla|
'foo' bar bla|foo|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
""||
''||
foo "" bar|foo||bar|
foo '' bar|foo||bar|
foo "" "" "" bar|foo||||bar|
foo '' '' '' bar|foo||||bar|
\"|"|
"\""|"|
"foo\ bar"|foo\ bar|
"foo\\ bar"|foo\ bar|
"foo\\ bar\""|foo\ bar"|
"foo\\" bar\"|foo\|bar"|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
\'|'|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
\"foo|"foo|
\"foo\x|"foox|
"foo\x"|foo\x|
"foo\ "|foo\ |
foo\ xx|foo xx|
foo\ x\x|foo xx|
foo\ x\x\"|foo xx"|
"foo\ x\x"|foo\ x\x|
"foo\ x\x\\"|foo\ x\x\|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
foo\ bar|foo bar|
foo#bar\nbaz|foo|baz|
:-) ;-)|:-)|;-)|
áéíóú|áéíóú|
"""
class ShlexTest(unittest.TestCase):
def setUp(self):
self.data = [x.split("|")[:-1]
for x in data.splitlines()]
self.posix_data = [x.split("|")[:-1]
for x in posix_data.splitlines()]
for item in self.data:
item[0] = item[0].replace(r"\n", "\n")
for item in self.posix_data:
item[0] = item[0].replace(r"\n", "\n")
def splitTest(self, data, comments):
for i in range(len(data)):
l = shlex.split(data[i][0], comments=comments)
self.assertEqual(l, data[i][1:],
"%s: %s != %s" %
(data[i][0], l, data[i][1:]))
def oldSplit(self, s):
ret = []
lex = shlex.shlex(StringIO(s))
tok = lex.get_token()
while tok:
ret.append(tok)
tok = lex.get_token()
return ret
def testSplitPosix(self):
"""Test data splitting with posix parser"""
self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
for i in range(len(self.data)):
l = self.oldSplit(self.data[i][0])
self.assertEqual(l, self.data[i][1:],
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
# Allow this test to be used with old shlex.py
if not getattr(shlex, "split", None):
for methname in dir(ShlexTest):
if methname.startswith("test") and methname != "testCompat":
delattr(ShlexTest, methname)
def test_main():
test_support.run_unittest(ShlexTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
bufferapp/buffer-django-nonrel | django/contrib/sitemaps/tests/basic.py | 155 | 7620 | import os
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
class SitemapTests(TestCase):
urls = 'django.contrib.sitemaps.tests.urls'
def setUp(self):
if Site._meta.installed:
self.base_url = 'http://example.com'
else:
self.base_url = 'http://testserver'
self.old_USE_L10N = settings.USE_L10N
self.old_Site_meta_installed = Site._meta.installed
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
self.old_Site_meta_installed = Site._meta.installed
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
# Create a user that will double as sitemap content
User.objects.create_user('testuser', '[email protected]', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
Site._meta.installed = self.old_Site_meta_installed
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
Site._meta.installed = self.old_Site_meta_installed
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
# Localization should be active
settings.USE_L10N = True
activate('fr')
self.assertEqual(u'0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today().strftime('%Y-%m-%d'))
deactivate()
def test_generic_sitemap(self):
"A minimal generic sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/generic/sitemap.xml')
expected = ''
for username in User.objects.values_list("username", flat=True):
expected += "<url><loc>%s/users/%s/</loc></url>" % (self.base_url, username)
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
%s
</urlset>
""" % expected)
@skipUnless("django.contrib.flatpages" in settings.INSTALLED_APPS, "django.contrib.flatpages app not installed.")
def test_flatpage_sitemap(self):
"Basic FlatPage sitemap test"
# Import FlatPage inside the test so that when django.contrib.flatpages
# is not installed we don't get problems trying to delete Site
# objects (FlatPage has an M2M to Site, Site.delete() tries to
# delete related objects, but the M2M table doesn't exist.
from django.contrib.flatpages.models import FlatPage
public = FlatPage.objects.create(
url=u'/public/',
title=u'Public Page',
enable_comments=True,
registration_required=False,
)
public.sites.add(settings.SITE_ID)
private = FlatPage.objects.create(
url=u'/private/',
title=u'Private Page',
enable_comments=True,
registration_required=True
)
private.sites.add(settings.SITE_ID)
response = self.client.get('/flatpages/sitemap.xml')
# Public flatpage should be in the sitemap
self.assertContains(response, '<loc>%s%s</loc>' % (self.base_url, public.url))
# Private flatpage should not be in the sitemap
self.assertNotContains(response, '<loc>%s%s</loc>' % (self.base_url, private.url))
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today().strftime('%Y-%m-%d'))
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS, "django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
| bsd-3-clause |
bslatkin/8-bits | appengine-mapreduce/python/test/testlib/testutil.py | 2 | 4505 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utilities for mapreduce framework.
"""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
# os_compat must be first to ensure timezones are UTC.
# Disable "unused import" and "invalid import order"
# pylint: disable-msg=W0611
from google.appengine.tools import os_compat
# pylint: enable-msg=W0611
from testlib import mox
import os
import shutil
import sys
import tempfile
import unittest
import urllib
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.files import file_service_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api import datastore_file_stub
from google.appengine.api import queueinfo
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.taskqueue import taskqueue_stub
class MatchesDatastoreConfig(mox.Comparator):
"""Mox comparator for MatchesDatastoreConfig objects."""
def __init__(self, **kwargs):
self.kwargs = kwargs
def equals(self, config):
"""Check to see if config matches arguments."""
if self.kwargs.get("deadline", None) != config.deadline:
return False
if self.kwargs.get("force_writes", None) != config.force_writes:
return False
return True
def __repr__(self):
return "MatchesDatastoreConfig(%s)" % self.kwargs
class MatchesUserRPC(mox.Comparator):
"""Mox comparator for UserRPC objects."""
def __init__(self, **kwargs):
self.kwargs = kwargs
def equals(self, rpc):
"""Check to see if rpc matches arguments."""
if self.kwargs.get("deadline", None) != rpc.deadline:
return False
return True
def __repr__(self):
return "MatchesUserRPC(%s)" % self.kwargs
class HandlerTestBase(unittest.TestCase):
"""Base class for all webapp.RequestHandler tests."""
MAPREDUCE_URL = "/_ah/mapreduce/kickoffjob_callback"
def setUp(self):
unittest.TestCase.setUp(self)
self.mox = mox.Mox()
self.appid = "testapp"
self.version_id = "1.23456789"
os.environ["APPLICATION_ID"] = self.appid
os.environ["CURRENT_VERSION_ID"] = self.version_id
os.environ["HTTP_HOST"] = "localhost"
self.memcache = memcache_stub.MemcacheServiceStub()
self.taskqueue = taskqueue_stub.TaskQueueServiceStub()
self.taskqueue.queue_yaml_parser = (
lambda x: queueinfo.LoadSingleQueue(
"queue:\n"
"- name: default\n"
" rate: 10/s\n"
"- name: crazy-queue\n"
" rate: 2000/d\n"
" bucket_size: 10\n"))
self.datastore = datastore_file_stub.DatastoreFileStub(
self.appid, "/dev/null", "/dev/null")
self.blob_storage_directory = tempfile.mkdtemp()
blob_storage = file_blob_storage.FileBlobStorage(
self.blob_storage_directory, self.appid)
self.blobstore_stub = blobstore_stub.BlobstoreServiceStub(blob_storage)
self.file_service = self.createFileServiceStub(blob_storage)
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub("taskqueue", self.taskqueue)
apiproxy_stub_map.apiproxy.RegisterStub("memcache", self.memcache)
apiproxy_stub_map.apiproxy.RegisterStub("datastore_v3", self.datastore)
apiproxy_stub_map.apiproxy.RegisterStub("blobstore", self.blobstore_stub)
apiproxy_stub_map.apiproxy.RegisterStub("file", self.file_service)
def createFileServiceStub(self, blob_storage):
return file_service_stub.FileServiceStub(blob_storage)
def tearDown(self):
try:
self.mox.VerifyAll()
finally:
self.mox.UnsetStubs()
shutil.rmtree(self.blob_storage_directory)
unittest.TestCase.tearDown(self)
def assertTaskStarted(self, queue="default"):
tasks = self.taskqueue.GetTasks(queue)
self.assertEquals(1, len(tasks))
self.assertEquals(tasks[0]["url"], self.MAPREDUCE_URL)
| apache-2.0 |
pedro2d10/SickRage-FR | lib/unidecode/x05f.py | 252 | 4660 | data = (
'Kai ', # 0x00
'Bian ', # 0x01
'Yi ', # 0x02
'Qi ', # 0x03
'Nong ', # 0x04
'Fen ', # 0x05
'Ju ', # 0x06
'Yan ', # 0x07
'Yi ', # 0x08
'Zang ', # 0x09
'Bi ', # 0x0a
'Yi ', # 0x0b
'Yi ', # 0x0c
'Er ', # 0x0d
'San ', # 0x0e
'Shi ', # 0x0f
'Er ', # 0x10
'Shi ', # 0x11
'Shi ', # 0x12
'Gong ', # 0x13
'Diao ', # 0x14
'Yin ', # 0x15
'Hu ', # 0x16
'Fu ', # 0x17
'Hong ', # 0x18
'Wu ', # 0x19
'Tui ', # 0x1a
'Chi ', # 0x1b
'Jiang ', # 0x1c
'Ba ', # 0x1d
'Shen ', # 0x1e
'Di ', # 0x1f
'Zhang ', # 0x20
'Jue ', # 0x21
'Tao ', # 0x22
'Fu ', # 0x23
'Di ', # 0x24
'Mi ', # 0x25
'Xian ', # 0x26
'Hu ', # 0x27
'Chao ', # 0x28
'Nu ', # 0x29
'Jing ', # 0x2a
'Zhen ', # 0x2b
'Yi ', # 0x2c
'Mi ', # 0x2d
'Quan ', # 0x2e
'Wan ', # 0x2f
'Shao ', # 0x30
'Ruo ', # 0x31
'Xuan ', # 0x32
'Jing ', # 0x33
'Dun ', # 0x34
'Zhang ', # 0x35
'Jiang ', # 0x36
'Qiang ', # 0x37
'Peng ', # 0x38
'Dan ', # 0x39
'Qiang ', # 0x3a
'Bi ', # 0x3b
'Bi ', # 0x3c
'She ', # 0x3d
'Dan ', # 0x3e
'Jian ', # 0x3f
'Gou ', # 0x40
'Sei ', # 0x41
'Fa ', # 0x42
'Bi ', # 0x43
'Kou ', # 0x44
'Nagi ', # 0x45
'Bie ', # 0x46
'Xiao ', # 0x47
'Dan ', # 0x48
'Kuo ', # 0x49
'Qiang ', # 0x4a
'Hong ', # 0x4b
'Mi ', # 0x4c
'Kuo ', # 0x4d
'Wan ', # 0x4e
'Jue ', # 0x4f
'Ji ', # 0x50
'Ji ', # 0x51
'Gui ', # 0x52
'Dang ', # 0x53
'Lu ', # 0x54
'Lu ', # 0x55
'Tuan ', # 0x56
'Hui ', # 0x57
'Zhi ', # 0x58
'Hui ', # 0x59
'Hui ', # 0x5a
'Yi ', # 0x5b
'Yi ', # 0x5c
'Yi ', # 0x5d
'Yi ', # 0x5e
'Huo ', # 0x5f
'Huo ', # 0x60
'Shan ', # 0x61
'Xing ', # 0x62
'Wen ', # 0x63
'Tong ', # 0x64
'Yan ', # 0x65
'Yan ', # 0x66
'Yu ', # 0x67
'Chi ', # 0x68
'Cai ', # 0x69
'Biao ', # 0x6a
'Diao ', # 0x6b
'Bin ', # 0x6c
'Peng ', # 0x6d
'Yong ', # 0x6e
'Piao ', # 0x6f
'Zhang ', # 0x70
'Ying ', # 0x71
'Chi ', # 0x72
'Chi ', # 0x73
'Zhuo ', # 0x74
'Tuo ', # 0x75
'Ji ', # 0x76
'Pang ', # 0x77
'Zhong ', # 0x78
'Yi ', # 0x79
'Wang ', # 0x7a
'Che ', # 0x7b
'Bi ', # 0x7c
'Chi ', # 0x7d
'Ling ', # 0x7e
'Fu ', # 0x7f
'Wang ', # 0x80
'Zheng ', # 0x81
'Cu ', # 0x82
'Wang ', # 0x83
'Jing ', # 0x84
'Dai ', # 0x85
'Xi ', # 0x86
'Xun ', # 0x87
'Hen ', # 0x88
'Yang ', # 0x89
'Huai ', # 0x8a
'Lu ', # 0x8b
'Hou ', # 0x8c
'Wa ', # 0x8d
'Cheng ', # 0x8e
'Zhi ', # 0x8f
'Xu ', # 0x90
'Jing ', # 0x91
'Tu ', # 0x92
'Cong ', # 0x93
'[?] ', # 0x94
'Lai ', # 0x95
'Cong ', # 0x96
'De ', # 0x97
'Pai ', # 0x98
'Xi ', # 0x99
'[?] ', # 0x9a
'Qi ', # 0x9b
'Chang ', # 0x9c
'Zhi ', # 0x9d
'Cong ', # 0x9e
'Zhou ', # 0x9f
'Lai ', # 0xa0
'Yu ', # 0xa1
'Xie ', # 0xa2
'Jie ', # 0xa3
'Jian ', # 0xa4
'Chi ', # 0xa5
'Jia ', # 0xa6
'Bian ', # 0xa7
'Huang ', # 0xa8
'Fu ', # 0xa9
'Xun ', # 0xaa
'Wei ', # 0xab
'Pang ', # 0xac
'Yao ', # 0xad
'Wei ', # 0xae
'Xi ', # 0xaf
'Zheng ', # 0xb0
'Piao ', # 0xb1
'Chi ', # 0xb2
'De ', # 0xb3
'Zheng ', # 0xb4
'Zheng ', # 0xb5
'Bie ', # 0xb6
'De ', # 0xb7
'Chong ', # 0xb8
'Che ', # 0xb9
'Jiao ', # 0xba
'Wei ', # 0xbb
'Jiao ', # 0xbc
'Hui ', # 0xbd
'Mei ', # 0xbe
'Long ', # 0xbf
'Xiang ', # 0xc0
'Bao ', # 0xc1
'Qu ', # 0xc2
'Xin ', # 0xc3
'Shu ', # 0xc4
'Bi ', # 0xc5
'Yi ', # 0xc6
'Le ', # 0xc7
'Ren ', # 0xc8
'Dao ', # 0xc9
'Ding ', # 0xca
'Gai ', # 0xcb
'Ji ', # 0xcc
'Ren ', # 0xcd
'Ren ', # 0xce
'Chan ', # 0xcf
'Tan ', # 0xd0
'Te ', # 0xd1
'Te ', # 0xd2
'Gan ', # 0xd3
'Qi ', # 0xd4
'Shi ', # 0xd5
'Cun ', # 0xd6
'Zhi ', # 0xd7
'Wang ', # 0xd8
'Mang ', # 0xd9
'Xi ', # 0xda
'Fan ', # 0xdb
'Ying ', # 0xdc
'Tian ', # 0xdd
'Min ', # 0xde
'Min ', # 0xdf
'Zhong ', # 0xe0
'Chong ', # 0xe1
'Wu ', # 0xe2
'Ji ', # 0xe3
'Wu ', # 0xe4
'Xi ', # 0xe5
'Ye ', # 0xe6
'You ', # 0xe7
'Wan ', # 0xe8
'Cong ', # 0xe9
'Zhong ', # 0xea
'Kuai ', # 0xeb
'Yu ', # 0xec
'Bian ', # 0xed
'Zhi ', # 0xee
'Qi ', # 0xef
'Cui ', # 0xf0
'Chen ', # 0xf1
'Tai ', # 0xf2
'Tun ', # 0xf3
'Qian ', # 0xf4
'Nian ', # 0xf5
'Hun ', # 0xf6
'Xiong ', # 0xf7
'Niu ', # 0xf8
'Wang ', # 0xf9
'Xian ', # 0xfa
'Xin ', # 0xfb
'Kang ', # 0xfc
'Hu ', # 0xfd
'Kai ', # 0xfe
'Fen ', # 0xff
)
| gpl-3.0 |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/psycopg2/tests/test_bug_gc.py | 16 | 1723 | #!/usr/bin/env python
# bug_gc.py - test for refcounting/GC bug
#
# Copyright (C) 2010-2011 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions
import unittest
import gc
from .testutils import ConnectingTestCase, skip_if_no_uuid
class StolenReferenceTestCase(ConnectingTestCase):
@skip_if_no_uuid
def test_stolen_reference_bug(self):
def fish(val, cur):
gc.collect()
return 42
UUID = psycopg2.extensions.new_type((2950,), "UUID", fish)
psycopg2.extensions.register_type(UUID, self.conn)
curs = self.conn.cursor()
curs.execute("select 'b5219e01-19ab-4994-b71e-149225dc51e4'::uuid")
curs.fetchone()
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
ual/urbansim | urbansim/utils/tests/test_misc.py | 5 | 3159 | import os
import shutil
import numpy as np
import pandas as pd
import pytest
from .. import misc
class _FakeTable(object):
def __init__(self, name, columns):
self.name = name
self.columns = columns
@pytest.fixture
def fta():
return _FakeTable('a', ['aa', 'ab', 'ac'])
@pytest.fixture
def ftb():
return _FakeTable('b', ['bx', 'by', 'bz'])
@pytest.fixture
def clean_fake_data_home(request):
def fin():
if os.path.isdir('fake_data_home'):
shutil.rmtree('fake_data_home')
request.addfinalizer(fin)
def test_column_map_raises(fta, ftb):
with pytest.raises(RuntimeError):
misc.column_map([fta, ftb], ['aa', 'by', 'bz', 'cw'])
def test_column_map_none(fta, ftb):
assert misc.column_map([fta, ftb], None) == {'a': None, 'b': None}
def test_column_map(fta, ftb):
assert misc.column_map([fta, ftb], ['aa', 'by', 'bz']) == \
{'a': ['aa'], 'b': ['by', 'bz']}
assert misc.column_map([fta, ftb], ['by', 'bz']) == \
{'a': [], 'b': ['by', 'bz']}
def test_dirs(clean_fake_data_home):
misc._mkifnotexists("fake_data_home")
os.environ["DATA_HOME"] = "fake_data_home"
misc.get_run_number()
misc.get_run_number()
misc.data_dir()
misc.configs_dir()
misc.models_dir()
misc.charts_dir()
misc.maps_dir()
misc.simulations_dir()
misc.reports_dir()
misc.runs_dir()
misc.config("test")
@pytest.fixture
def range_df():
df = pd.DataFrame({'to_zone_id': [2, 3, 4],
'from_zone_id': [1, 1, 1],
'distance': [.1, .2, .9]})
df = df.set_index(['from_zone_id', 'to_zone_id'])
return df
@pytest.fixture
def range_series():
return pd.Series([10, 150, 75, 275], index=[1, 2, 3, 4])
def test_compute_range(range_df, range_series):
assert misc.compute_range(range_df, range_series, "distance", .5).loc[1] == 225
def test_reindex():
s = pd.Series([.5, 1.0, 1.5], index=[2, 1, 3])
s2 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
assert list(misc.reindex(s, s2).values) == [1.0, .5, 1.5]
def test_naics():
assert misc.naicsname(54) == "Professional"
def test_signif():
assert misc.signif(4.0) == '***'
assert misc.signif(3.0) == '**'
assert misc.signif(2.0) == '*'
assert misc.signif(1.5) == '.'
assert misc.signif(1.0) == ''
@pytest.fixture
def simple_dev_inputs():
return pd.DataFrame(
{'residential': [40, 40, 40],
'office': [15, 18, 15],
'retail': [12, 10, 10],
'industrial': [12, 12, 12],
'land_cost': [1000000, 2000000, 3000000],
'parcel_size': [10000, 20000, 30000],
'max_far': [2.0, 3.0, 4.0],
'names': ['a', 'b', 'c'],
'max_height': [40, 60, 80]},
index=['a', 'b', 'c'])
def test_misc_dffunctions(simple_dev_inputs):
misc.df64bitto32bit(simple_dev_inputs)
misc.pandasdfsummarytojson(simple_dev_inputs[['land_cost', 'parcel_size']])
misc.numpymat2df(np.array([[1, 2], [3, 4]]))
def test_column_list(fta, ftb):
assert misc.column_list([fta, ftb], ['aa', 'by', 'bz', 'c']) == \
['aa', 'by', 'bz']
| bsd-3-clause |
tienjunhsu/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
hopeall/odoo | addons/project_timesheet/__openerp__.py | 260 | 2151 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bill Time on Tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
Synchronization of project task work entries with timesheet entries.
====================================================================
This module lets you transfer the entries under tasks defined for Project
Management to the Timesheet line entries for particular date and particular user
with the effect of creating, editing and deleting either ways.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'depends': ['resource', 'project', 'hr_timesheet_sheet', 'hr_timesheet_invoice', 'account_analytic_analysis', 'procurement'],
'data': [
'security/ir.model.access.csv',
'security/project_timesheet_security.xml',
'report/task_report_view.xml',
'project_timesheet_view.xml',
],
'demo': ['project_timesheet_demo.xml'],
'test': [
'test/worktask_entry_to_timesheetline_entry.yml',
'test/work_timesheet.yml',
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
home-assistant/home-assistant | homeassistant/components/sighthound/image_processing.py | 4 | 5695 | """Person detection using Sighthound cloud service."""
import io
import logging
from pathlib import Path
from PIL import Image, ImageDraw, UnidentifiedImageError
import simplehound.core as hound
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_API_KEY
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.util.pil import draw_box
_LOGGER = logging.getLogger(__name__)
EVENT_PERSON_DETECTED = "sighthound.person_detected"
ATTR_BOUNDING_BOX = "bounding_box"
ATTR_PEOPLE = "people"
CONF_ACCOUNT_TYPE = "account_type"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
DATETIME_FORMAT = "%Y-%m-%d_%H:%M:%S"
DEV = "dev"
PROD = "prod"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ACCOUNT_TYPE, default=DEV): vol.In([DEV, PROD]),
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform."""
# Validate credentials by processing image.
api_key = config[CONF_API_KEY]
account_type = config[CONF_ACCOUNT_TYPE]
api = hound.cloud(api_key, account_type)
try:
api.detect(b"Test")
except hound.SimplehoundException as exc:
_LOGGER.error("Sighthound error %s setup aborted", exc)
return
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
entities = []
for camera in config[CONF_SOURCE]:
sighthound = SighthoundEntity(
api,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
save_file_folder,
config[CONF_SAVE_TIMESTAMPTED_FILE],
)
entities.append(sighthound)
add_entities(entities)
class SighthoundEntity(ImageProcessingEntity):
"""Create a sighthound entity."""
def __init__(
self, api, camera_entity, name, save_file_folder, save_timestamped_file
):
"""Init."""
self._api = api
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = f"sighthound_{camera_name}"
self._state = None
self._last_detection = None
self._image_width = None
self._image_height = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
def process_image(self, image):
"""Process an image."""
detections = self._api.detect(image)
people = hound.get_people(detections)
self._state = len(people)
if self._state > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
metadata = hound.get_metadata(detections)
self._image_width = metadata["image_width"]
self._image_height = metadata["image_height"]
for person in people:
self.fire_person_detected_event(person)
if self._save_file_folder and self._state > 0:
self.save_image(image, people, self._save_file_folder)
def fire_person_detected_event(self, person):
"""Send event with detected total_persons."""
self.hass.bus.fire(
EVENT_PERSON_DETECTED,
{
ATTR_ENTITY_ID: self.entity_id,
ATTR_BOUNDING_BOX: hound.bbox_to_tf_style(
person["boundingBox"], self._image_width, self._image_height
),
},
)
def save_image(self, image, people, directory):
"""Save a timestamped image with bounding boxes around targets."""
try:
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Sighthound unable to process image, bad data")
return
draw = ImageDraw.Draw(img)
for person in people:
box = hound.bbox_to_tf_style(
person["boundingBox"], self._image_width, self._image_height
)
draw_box(draw, box, self._image_width, self._image_height)
latest_save_path = directory / f"{self._name}_latest.jpg"
img.save(latest_save_path)
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
img.save(timestamp_save_path)
_LOGGER.info("Sighthound saved file %s", timestamp_save_path)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return ATTR_PEOPLE
@property
def extra_state_attributes(self):
"""Return the attributes."""
if not self._last_detection:
return {}
return {"last_person": self._last_detection}
| apache-2.0 |
BeegorMif/HTPC-Manager | lib/guessit/transfo/guess_release_group.py | 21 | 3682 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import prop_multi, compute_canonical_form, _dash, _psep
import re
import logging
log = logging.getLogger(__name__)
def get_patterns(property_name):
return [ p.replace(_dash, _psep) for patterns in prop_multi[property_name].values() for p in patterns ]
CODECS = get_patterns('videoCodec')
FORMATS = get_patterns('format')
VAPIS = get_patterns('videoApi')
# RG names following a codec or format, with a potential space or dash inside the name
GROUP_NAMES = [ r'(?P<videoCodec>' + codec + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for codec in CODECS ]
GROUP_NAMES += [ r'(?P<format>' + fmt + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for fmt in FORMATS ]
GROUP_NAMES += [ r'(?P<videoApi>' + api + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for api in VAPIS ]
GROUP_NAMES2 = [ r'\.(?P<videoCodec>' + codec + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for codec in CODECS ]
GROUP_NAMES2 += [ r'\.(?P<format>' + fmt + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for fmt in FORMATS ]
GROUP_NAMES2 += [ r'\.(?P<videoApi>' + vapi + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for vapi in VAPIS ]
GROUP_NAMES = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES ]
GROUP_NAMES2 = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES2 ]
def adjust_metadata(md):
return dict((property_name, compute_canonical_form(property_name, value) or value)
for property_name, value in md.items())
def guess_release_group(string):
# first try to see whether we have both a known codec and a known release group
for rexp in GROUP_NAMES:
match = rexp.search(string)
while match:
metadata = match.groupdict()
# make sure this is an actual release group we caught
release_group = (compute_canonical_form('releaseGroup', metadata['releaseGroup']) or
compute_canonical_form('weakReleaseGroup', metadata['releaseGroup']))
if release_group:
return adjust_metadata(metadata), (match.start(1), match.end(2))
# we didn't find anything conclusive, keep searching
match = rexp.search(string, match.span()[0]+1)
# pick anything as releaseGroup as long as we have a codec in front
# this doesn't include a potential dash ('-') ending the release group
# eg: [...].X264-HiS@SiLUHD-English.[...]
for rexp in GROUP_NAMES2:
match = rexp.search(string)
if match:
return adjust_metadata(match.groupdict()), (match.start(1), match.end(2))
return None, None
def process(mtree):
SingleNodeGuesser(guess_release_group, 0.8, log).process(mtree)
| gpl-3.0 |
iamaris/CMUAnalysis | Common/generateObjectTree.py | 1 | 11728 | import re
import os
objects = ['Photon', 'Electron', 'Muon', 'Jet', 'Vertex']
susyObjects = {'Photon': 'Photon', 'Electron': 'Electron', 'Muon': 'Muon', 'Jet': 'PFJet', 'Vertex': 'Vertex'}
objectVars = file('ObjectVars.h')
classPat = re.compile('^[ ]*class[ ]+([a-zA-Z0-9]+)Vars[ ]*{')
cTorPat = re.compile('^[ ]*[a-zA-Z0-9]+Vars\([^,]+(,[ ]+Event.*|)\);')
varPat = re.compile('^[ ]*((?:unsigned[ ]|)(?:bool|char|short|int|unsigned|long|float|double))[ ]+([a-zA-Z_][a-zA-Z0-9_]*);')
useEvent = dict()
varList = dict()
obj = ''
for line in objectVars:
if '};' in line:
obj = ''
if obj:
cTorMatch = cTorPat.match(line)
if cTorMatch:
useEvent[obj] = len(cTorMatch.group(1)) != 0
varMatch = varPat.match(line)
if varMatch:
varList[obj].append((varMatch.group(1), varMatch.group(2)))
lineMatch = classPat.match(line)
if lineMatch and lineMatch.group(1) in objects:
obj = lineMatch.group(1)
varList[obj] = []
objectVars.close()
# GENERATE HEADER
headerContent = '''/* Auto-generated header file */
#ifndef ObjectTree_h
#define ObjectTree_h
#include "ObjectVars.h"
#include "TTree.h"
#include "TString.h"
namespace susy {
unsigned const NMAX(512);
'''
for obj in objects:
headerContent += '''
class ''' + obj + '''VarsArray {
public:
''' + obj + '''VarsArray() {}
~''' + obj + '''VarsArray() {}
void setBranches(TTree&);
void setAddress(TTree&);
void push_back(''' + obj + '''Vars const&);
void clear() { size = 0; }
''' + obj + '''Vars at(unsigned) const;
unsigned size;
'''
for (type, name) in varList[obj]:
headerContent += '''
''' + type + ' ' + name + '[NMAX];'
headerContent += '''
};
'''
headerContent += '''
class ObjectTree {
public:
ObjectTree();
~ObjectTree();
void setOutput(TString const&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void setOutput(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
static void setBranchStatus(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void initEvent(Event const&);
void fill() { output_->Fill(); }'''
for obj in objects:
lowerName = obj.lower()
headerContent += '''
void save(''' + obj + 'Vars const& _vars) { ' + lowerName + 'Array_.push_back(_vars); }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
unsigned get''' + obj + 'Size() const { return ' + lowerName + 'Array_.size; }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + 'VarsArray const& get' + obj + 'Array() const { return ' + lowerName + 'Array_; }'
headerContent += '''
private:
void setBranches_('''
for i in range(len(objects)):
headerContent += 'bool'
if i != len(objects) - 1:
headerContent += ', '
else:
headerContent += ');'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + '''VarsArray ''' + lowerName + '''Array_;'''
headerContent += '''
unsigned runNumber_;
unsigned lumiNumber_;
unsigned eventNumber_;
TTree* output_;
bool ownOutput_;
};
}
#endif
'''
headerFile = file('ObjectTree.h', 'w')
headerFile.write(headerContent)
headerFile.close()
# GENERATE SRC
cTors = dict()
setBranches = dict()
setAddress = dict()
pushBack = dict()
at = dict()
for obj in objects:
lowerName = obj.lower()
cTorText = '''
''' + obj + 'Vars::' + obj + '''Vars() :'''
initList = ''
for (type, name) in varList[obj]:
initList += '''
''' + name + '('
if type == 'float' or type == 'double':
initList += '0.'
elif type == 'bool':
initList += 'false'
else:
initList += '0'
initList += '),'
initList = initList.rstrip(',')
cTorText += initList
cTorText += '''
{
}
'''
cTors[obj] = cTorText
setBranchText = '''
void
''' + obj + '''VarsArray::setBranches(TTree& _tree)
{
_tree.Branch("''' + lowerName + '.size", &size, "' + lowerName + '.size/i");'
for (type, name) in varList[obj]:
branch = '''
_tree.Branch("''' + lowerName + '.' + name + '", ' + name + ', "' + name + '[' + lowerName + '.size]/'
if type == 'char':
branch += 'B'
elif type == 'unsigned char':
branch += 'b'
elif type == 'short':
branch += 'S'
elif type == 'unsigned short':
branch += 's'
elif type == 'int':
branch += 'I'
elif type == 'unsigned' or type == 'unsigned int':
branch += 'i'
elif type == 'long':
branch += 'L'
elif type == 'unsigned long':
branch += 'l'
elif type == 'float':
branch += 'F'
elif type == 'double':
branch += 'D'
elif type == 'bool':
branch += 'O'
branch += '");'
setBranchText += branch
setBranchText += '''
}
'''
setBranches[obj] = setBranchText
setAddressText = '''
void
''' + obj + '''VarsArray::setAddress(TTree& _tree)
{
std::vector<TString> notFound;
_tree.SetBranchAddress("''' + lowerName + '.size", &size);'
for (type, name) in varList[obj]:
bName = lowerName + '.' + name
setAddressText += '''
if(_tree.GetBranch("''' + bName + '")) _tree.SetBranchAddress("' + bName + '", ' + name + ''');
else notFound.push_back("''' + bName + '");'
setAddressText += '''
for(unsigned iN(0); iN != notFound.size(); ++iN)
std::cerr << "Branch " << notFound[iN] << " not found in input" << std::endl;
}
'''
setAddress[obj] = setAddressText
pushBackText = '''
void
''' + obj + 'VarsArray::push_back(' + obj + '''Vars const& _vars)
{
if(size == NMAX - 1)
throw std::runtime_error("Too many ''' + obj + '''s");
'''
for (type, name) in varList[obj]:
pushBackText += '''
''' + name + '[size] = _vars.' + name + ';'
pushBackText += '''
++size;
}
'''
pushBack[obj] = pushBackText
atText = '''
''' + obj + '''Vars
''' + obj + '''VarsArray::at(unsigned _pos) const
{
if(_pos >= size)
throw std::runtime_error("''' + obj + '''Vars out-of-bounds");
''' + obj + '''Vars vars;
'''
for (type, name) in varList[obj]:
atText += '''
vars.''' + name + ' = ' + name + '[_pos];'
atText += '''
return vars;
}
'''
at[obj] = atText
preamble = '#include "ObjectVars.h"\n'
try:
originalSrc = file('ObjectVars.cc', 'r')
userDef = ''
copy = False
namespace = False
for line in originalSrc:
if 'namespace susy' in line:
namespace = True
if not namespace and 'ObjectVars.h' not in line and not re.match('^[ ]*/\*.*\*/[ ]*$', line):
preamble += line
if '/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = True
if copy:
userDef += line
if '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = False
originalSrc.close()
except:
userDef = '\n/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
for obj in objects:
userDef += '''
void
''' + obj + '''Vars::set(''' + susyObjects[obj] + ' const&'
if useEvent[obj]:
userDef += ', Event const&'
userDef += ''')
{
}
/*static*/
''' + obj + '''Vars::setBranchStatus(TTree&)
{
}
'''
userDef += '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
# ObjectTree.cc
objTreeContent = '''/* Auto-generated source file */
#include "ObjectTree.h"
#include "TFile.h"
#include <stdexcept>
#include <iostream>
namespace susy {
'''
for obj in objects:
objTreeContent += setBranches[obj]
objTreeContent += setAddress[obj]
objTreeContent += pushBack[obj]
objTreeContent += at[obj]
objTreeContent += '''
ObjectTree::ObjectTree() :'''
for obj in objects:
lowerName = obj.lower()
objTreeContent += '''
''' + lowerName + '''Array_(),'''
objTreeContent += '''
runNumber_(0),
lumiNumber_(0),
eventNumber_(0),
output_(0),
ownOutput_(false)
{
}
ObjectTree::~ObjectTree()
{
if(ownOutput_ && output_){
TFile* outFile(output_->GetCurrentFile());
outFile->cd();
output_->Write();
delete outFile;
}
}
void
ObjectTree::setOutput(TString const& _fileName'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
ownOutput_ = true;
TFile::Open(_fileName, "recreate");
output_ = new TTree("objectVars", "Object ID variables");
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
void
ObjectTree::setOutput(TTree& _tree'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
output_ = &_tree;
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
/*static*/
void
ObjectTree::setBranchStatus(TTree& _input'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
_input.SetBranchStatus("runNumber", 1);
_input.SetBranchStatus("luminosityBlockNumber", 1);
_input.SetBranchStatus("eventNumber", 1);
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj + 'Vars::setBranchStatus(_input);'
objTreeContent += '''
}
#ifdef STANDALONE
void
ObjectTree::initEvent(Event const&)
{
runNumber_ = 0;
lumiNumber_ = 0;
eventNumber_ = 0;
#else
void
ObjectTree::initEvent(Event const& _event)
{
runNumber_ = _event.runNumber;
lumiNumber_ = _event.luminosityBlockNumber;
eventNumber_ = _event.eventNumber;
#endif'''
for obj in objects:
objTreeContent += '''
''' + obj.lower() + 'Array_.clear();'
objTreeContent += '''
}
void
ObjectTree::setBranches_('''
for obj in objects:
objTreeContent += 'bool _set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ') + ')'
objTreeContent += '''
{
output_->Branch("runNumber", &runNumber_, "runNumber/i");
output_->Branch("lumiNumber", &lumiNumber_, "lumiNumber/i");
output_->Branch("eventNumber", &eventNumber_, "eventNumber/i");
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj.lower() + 'Array_.setBranches(*output_);'
objTreeContent += '''
}
'''
objTreeContent += '}\n'
objTreeFile = file('ObjectTree.cc', 'w')
objTreeFile.write(objTreeContent)
objTreeFile.close()
# ObjectVars.cc
objVarsContent = '''/* Partially auto-generated source file - edit where indicated */
/* Add necessary inclusions below */
''' + preamble + '''
namespace susy {
'''
for obj in objects:
objVarsContent += cTors[obj]
objVarsContent += '\n'
objVarsContent += userDef
objVarsContent += '''
}
'''
objVarsFile = file('ObjectVars.cc', 'w')
objVarsFile.write(objVarsContent)
objVarsFile.close()
| apache-2.0 |
riyer15/python_koans | python3/koans/about_scoring_project.py | 107 | 2207 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
def score(dice):
# You need to write this method
pass
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2])) | mit |
victorbriz/rethinkdb | scripts/ui-tests.py | 50 | 3779 | #!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
import os, sys, subprocess, argparse
from termcolor import colored, cprint
import time
tests = [
'add-a-namespace',
'add-a-datacenter',
'view-dashboard',
]
git_root = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip('\r\n')
test_file_dir = os.path.join(git_root, 'test/ui_test/')
cwd = os.getcwd()
# Define and parse command-line arguments
parser = argparse.ArgumentParser(description='Run a set of UI tests using CasperJS / PhantomJS.')
parser.add_argument('tests', nargs='*', help='List of tests to run. Specify \'all\' to run all tests.')
parser.add_argument('-p','--rdb-port', nargs='?', dest='rdb_port', default='6001', help='Port of the RethinkDB server to connect to (default is 6001).')
parser.add_argument('-i','--output-images', nargs='?', dest='image_output_directory', const='./casper-results', help='Include if images should be scraped and saved. Optionally specify the output directory (default is ./casper-results/).')
parser.add_argument('-l','--list-tests', action='store_true', help='List available tests to run.')
parser.add_argument('-r','--output-results', nargs='?', dest='result_output_directory', const='./casper-results', help='Include if test results should be saved. Optionally specify the output directory (default is ./casper-results/).')
args = parser.parse_args()
def print_available_tests():
print 'Available tests:'
print '\t- all: run all of the following tests'
for test in tests:
print '\t- ' + test
if args.list_tests:
print_available_tests()
exit(0)
if len(args.tests) < 1:
parser.print_usage()
print '\nNo test specified.',
print_available_tests()
exit(1)
# Prepare the list of tests to process; if 'all' was one of the specified tests then process all tests
if 'all' in args.tests:
test_list = tests
else:
test_list = args.tests
# Process each test name specified on the command line
successful_tests = 0
os.chdir(test_file_dir)
for test_name in test_list:
# Look for a matching test among known tests
casper_script = os.path.join(test_file_dir, test_name + '.coffee')
try:
with open(casper_script) as f: pass
except IOError as e:
print "No test script found for CasperJS test '%s'." % test_name
continue
# Build command with arguments for casperjs test
cl = ['casperjs', '--rdb-server=http://localhost:' + args.rdb_port + '/', casper_script]
# If the option to scrape images was specified, add it to the casperjs argument list
if args.image_output_directory:
image_dir = os.path.abspath(args.image_output_directory)
cl.extend(['--images=' + image_dir])
# Execute casperjs and pretty-print its output
process = subprocess.Popen(cl, stdout=subprocess.PIPE)
stdout = process.stdout.readlines()
for i, line in enumerate(stdout):
cprint('[%s]' % test_name, attrs=['bold'], end=' ')
print line.rstrip('\n')
# If the option to save results was specified, save stdout to a file
if args.result_output_directory:
result_dir = os.path.abspath(args.result_output_directory)
result_filename = "casper-result_%s" % test_name
result_file = open(os.path.join(result_dir, result_filename), 'w')
for line in stdout:
result_file.write(line)
result_file.close()
# Check the exit code of the process
# 0: casper test passed
# 1: casper test failed
process.poll()
if process.returncode == 0:
successful_tests += 1
print
# Print test suite summary
cprint(" %d of %d tests ran successfully. " % (successful_tests, len(test_list)), attrs=['reverse'])
| agpl-3.0 |
jakesyl/androguard | androguard/core/analysis/sign.py | 38 | 13670 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core.analysis.analysis import TAINTED_PACKAGE_CREATE, TAINTED_PACKAGE_CALL
from androguard.core.bytecodes import dvm
TAINTED_PACKAGE_INTERNAL_CALL = 2
FIELD_ACCESS = { "R" : 0, "W" : 1 }
PACKAGE_ACCESS = { TAINTED_PACKAGE_CREATE : 0, TAINTED_PACKAGE_CALL : 1, TAINTED_PACKAGE_INTERNAL_CALL : 2 }
class Sign :
def __init__(self) :
self.levels = {}
self.hlevels = []
def add(self, level, value) :
self.levels[ level ] = value
self.hlevels.append( level )
def get_level(self, l) :
return self.levels[ "L%d" % l ]
def get_string(self) :
buff = ""
for i in self.hlevels :
buff += self.levels[ i ]
return buff
def get_list(self) :
return self.levels[ "sequencebb" ]
class Signature :
def __init__(self, vmx) :
self.vmx = vmx
self.tainted_packages = self.vmx.get_tainted_packages()
self.tainted_variables = self.vmx.get_tainted_variables()
self._cached_signatures = {}
self._cached_fields = {}
self._cached_packages = {}
self._global_cached = {}
self.levels = {
# Classical method signature with basic blocks, strings, fields, packages
"L0" : {
0 : ( "_get_strings_a", "_get_fields_a", "_get_packages_a" ),
1 : ( "_get_strings_pa", "_get_fields_a", "_get_packages_a" ),
2 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_1" ),
3 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_2" ),
},
# strings
"L1" : [ "_get_strings_a1" ],
# exceptions
"L2" : [ "_get_exceptions" ],
# fill array data
"L3" : [ "_get_fill_array_data" ],
}
self.classes_names = None
self._init_caches()
def _get_method_info(self, m) :
m1 = m.get_method()
return "%s-%s-%s" % (m1.get_class_name(), m1.get_name(), m1.get_descriptor())
def _get_sequence_bb(self, analysis_method) :
l = []
for i in analysis_method.basic_blocks.get() :
buff = ""
instructions = [j for j in i.get_instructions()]
if len(instructions) > 5 :
for ins in instructions :
buff += ins.get_name()
if buff != "" :
l.append( buff )
return l
def _get_hex(self, analysis_method) :
code = analysis_method.get_method().get_code()
if code == None :
return ""
buff = ""
for i in code.get_bc().get_instructions() :
buff += dvm.clean_name_instruction( i )
buff += dvm.static_operand_instruction( i )
return buff
def _get_bb(self, analysis_method, functions, options) :
bbs = []
for b in analysis_method.basic_blocks.get() :
l = []
l.append( (b.start, "B") )
l.append( (b.start, "[") )
internal = []
op_value = b.get_last().get_op_value()
# return
if op_value >= 0x0e and op_value <= 0x11 :
internal.append( (b.end-1, "R") )
# if
elif op_value >= 0x32 and op_value <= 0x3d :
internal.append( (b.end-1, "I") )
# goto
elif op_value >= 0x28 and op_value <= 0x2a :
internal.append( (b.end-1, "G") )
# sparse or packed switch
elif op_value >= 0x2b and op_value <= 0x2c :
internal.append( (b.end-1, "G") )
for f in functions :
try :
internal.extend( getattr( self, f )( analysis_method, options ) )
except TypeError :
internal.extend( getattr( self, f )( analysis_method ) )
internal.sort()
for i in internal :
if i[0] >= b.start and i[0] < b.end :
l.append( i )
del internal
l.append( (b.end, "]") )
bbs.append( ''.join(i[1] for i in l) )
return bbs
def _init_caches(self) :
if self._cached_fields == {} :
for f_t, f in self.tainted_variables.get_fields() :
self._cached_fields[ f ] = f_t.get_paths_length()
n = 0
for f in sorted( self._cached_fields ) :
self._cached_fields[ f ] = n
n += 1
if self._cached_packages == {} :
for m_t, m in self.tainted_packages.get_packages() :
self._cached_packages[ m ] = m_t.get_paths_length()
n = 0
for m in sorted( self._cached_packages ) :
self._cached_packages[ m ] = n
n += 1
def _get_fill_array_data(self, analysis_method) :
buff = ""
for b in analysis_method.basic_blocks.get() :
for i in b.get_instructions() :
if i.get_name() == "FILL-ARRAY-DATA" :
buff_tmp = i.get_operands()
for j in range(0, len(buff_tmp)) :
buff += "\\x%02x" % ord( buff_tmp[j] )
return buff
def _get_exceptions(self, analysis_method) :
buff = ""
method = analysis_method.get_method()
code = method.get_code()
if code == None or code.get_tries_size() <= 0 :
return buff
handler_catch_list = code.get_handlers()
for handler_catch in handler_catch_list.get_list() :
for handler in handler_catch.get_handlers() :
buff += analysis_method.get_vm().get_cm_type( handler.get_type_idx() )
return buff
def _get_strings_a1(self, analysis_method) :
buff = ""
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
buff += s.replace('\n', ' ')
return buff
def _get_strings_pa(self, analysis_method) :
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
l.append( ( path[1], "S%d" % len(s) ) )
return l
def _get_strings_a(self, analysis_method) :
key = "SA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached :
return self._global_cached[ key ]
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method :
for path in strings_method[s] :
l.append( ( path[1], "S") )
self._global_cached[ key ] = l
return l
def _get_fields_a(self, analysis_method) :
key = "FA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached :
return self._global_cached[ key ]
fields_method = self.tainted_variables.get_fields_by_method( analysis_method.get_method() )
l = []
for f in fields_method :
for path in fields_method[ f ] :
l.append( (path[1], "F%d" % FIELD_ACCESS[ path[0] ]) )
self._global_cached[ key ] = l
return l
def _get_packages_a(self, analysis_method) :
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method :
for path in packages_method[ m ] :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
return l
def _get_packages(self, analysis_method, include_packages) :
l = self._get_packages_pa_1( analysis_method, include_packages )
return "".join([ i[1] for i in l ])
def _get_packages_pa_1(self, analysis_method, include_packages) :
key = "PA1-%s-%s" % (self._get_method_info(analysis_method), include_packages)
if key in self._global_cached :
return self._global_cached[ key ]
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
if self.classes_names == None :
self.classes_names = analysis_method.get_vm().get_classes_names()
l = []
for m in packages_method :
for path in packages_method[ m ] :
present = False
for i in include_packages :
if m.find(i) == 0 :
present = True
break
if path.get_access_flag() == 1 :
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
if dst_class_name in self.classes_names :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ 2 ]) ) )
else :
if present == True :
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
else :
if present == True :
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
else :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
self._global_cached[ key ] = l
return l
def _get_packages_pa_2(self, analysis_method, include_packages) :
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method :
for path in packages_method[ m ] :
present = False
for i in include_packages :
if m.find(i) == 0 :
present = True
break
if present == True :
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
continue
if path.get_access_flag() == 1 :
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else :
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
return l
def get_method(self, analysis_method, signature_type, signature_arguments={}) :
key = "%s-%s-%s" % (self._get_method_info(analysis_method), signature_type, signature_arguments)
if key in self._cached_signatures :
return self._cached_signatures[ key ]
s = Sign()
#print signature_type, signature_arguments
for i in signature_type.split(":") :
# print i, signature_arguments[ i ]
if i == "L0" :
_type = self.levels[ i ][ signature_arguments[ i ][ "type" ] ]
try :
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError :
_arguments = []
value = self._get_bb( analysis_method, _type, _arguments )
s.add( i, ''.join(z for z in value) )
elif i == "L4" :
try :
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError :
_arguments = []
value = self._get_packages( analysis_method, _arguments )
s.add( i , value )
elif i == "hex" :
value = self._get_hex( analysis_method )
s.add( i, value )
elif i == "sequencebb" :
_type = ('_get_strings_a', '_get_fields_a', '_get_packages_pa_1')
_arguments = ['Landroid', 'Ljava']
#value = self._get_bb( analysis_method, _type, _arguments )
#s.add( i, value )
value = self._get_sequence_bb( analysis_method )
s.add( i, value )
else :
for f in self.levels[ i ] :
value = getattr( self, f )( analysis_method )
s.add( i, value )
self._cached_signatures[ key ] = s
return s
| apache-2.0 |
dya2/python-for-android | python-modules/twisted/twisted/test/test_text.py | 49 | 5450 |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.python import text
import string
from cStringIO import StringIO
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
lineWidth = 72
def set_lineWidth(n):
global lineWidth
lineWidth = n
class WrapTest(unittest.TestCase):
def setUp(self):
self.sampleSplitText = string.split(sampleText)
self.output = text.wordWrap(sampleText, lineWidth)
def test_wordCount(self):
"""Compare the number of words."""
words = []
for line in self.output:
words.extend(string.split(line))
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.failUnlessEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
"""Compare the lists of words."""
words = []
for line in self.output:
words.extend(string.split(line))
# Using failUnlessEqual here prints out some
# rather too long lists.
self.failUnless(self.sampleSplitText == words)
def test_lineLength(self):
"""Check the length of the lines."""
failures = []
for line in self.output:
if not len(line) <= lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
lineWidth, failures))
class SplitTest(unittest.TestCase):
"""Tests for text.splitQuoted()"""
def test_oneWord(self):
"""Splitting strings with one-word phrases."""
s = 'This code "works."'
r = text.splitQuoted(s)
self.failUnlessEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.failUnlessEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
# Some of the many tests that would fail:
#def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.failUnlessEqual(['With', phrase, 'between.'], r)
#def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.failUnlessEqual(["One Phrase"], r)
class StrFileTest(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEquals(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEquals(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEquals(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEquals(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEquals(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEquals(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEquals(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEquals(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEquals(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEquals(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEquals(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEquals(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEquals(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEquals(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEquals(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEquals(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEquals(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEquals(True, text.strFile("ThIs is A test STRING", self.io, False))
class DeprecationTest(unittest.TestCase):
"""
Tests for deprecations in L{twisted.python.text}
"""
def test_docstringLStrip(self):
"""
L{docstringLStrip} is deprecated as of 10.2.0
"""
text.docstringLStrip("")
warningsShown = self.flushWarnings([self.test_docstringLStrip])
self.assertEquals(1, len(warningsShown))
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEquals(warningsShown[0]['message'],
"twisted.python.text.docstringLStrip was "
"deprecated in Twisted 10.2.0: Please use "
"inspect.getdoc instead.")
testCases = [WrapTest, SplitTest, StrFileTest]
| apache-2.0 |
kambysese/mne-python | mne/connectivity/utils.py | 15 | 2957 | # Authors: Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
def check_indices(indices):
"""Check indices parameter."""
if not isinstance(indices, tuple) or len(indices) != 2:
raise ValueError('indices must be a tuple of length 2')
if len(indices[0]) != len(indices[1]):
raise ValueError('Index arrays indices[0] and indices[1] must '
'have the same length')
return indices
def seed_target_indices(seeds, targets):
"""Generate indices parameter for seed based connectivity analysis.
Parameters
----------
seeds : array of int | int
Seed indices.
targets : array of int | int
Indices of signals for which to compute connectivity.
Returns
-------
indices : tuple of array
The indices parameter used for connectivity computation.
"""
# make them arrays
seeds = np.asarray((seeds,)).ravel()
targets = np.asarray((targets,)).ravel()
n_seeds = len(seeds)
n_targets = len(targets)
indices = (np.concatenate([np.tile(i, n_targets) for i in seeds]),
np.tile(targets, n_seeds))
return indices
def degree(connectivity, threshold_prop=0.2):
"""Compute the undirected degree of a connectivity matrix.
Parameters
----------
connectivity : ndarray, shape (n_nodes, n_nodes)
The connectivity matrix.
threshold_prop : float
The proportion of edges to keep in the graph before
computing the degree. The value should be between 0
and 1.
Returns
-------
degree : ndarray, shape (n_nodes,)
The computed degree.
Notes
-----
During thresholding, the symmetry of the connectivity matrix is
auto-detected based on :func:`numpy.allclose` of it with its transpose.
"""
connectivity = np.array(connectivity)
if connectivity.ndim != 2 or \
connectivity.shape[0] != connectivity.shape[1]:
raise ValueError('connectivity must be have shape (n_nodes, n_nodes), '
'got %s' % (connectivity.shape,))
n_nodes = len(connectivity)
if np.allclose(connectivity, connectivity.T):
split = 2.
connectivity[np.tril_indices(n_nodes)] = 0
else:
split = 1.
threshold_prop = float(threshold_prop)
if not 0 < threshold_prop <= 1:
raise ValueError('threshold must be 0 <= threshold < 1, got %s'
% (threshold_prop,))
degree = connectivity.ravel() # no need to copy because np.array does
degree[::n_nodes + 1] = 0.
n_keep = int(round((degree.size - len(connectivity)) *
threshold_prop / split))
degree[np.argsort(degree)[:-n_keep]] = 0
degree.shape = connectivity.shape
if split == 2:
degree += degree.T # normally unsafe, but we know where our zeros are
degree = np.sum(degree > 0, axis=0)
return degree
| bsd-3-clause |
irisfeng/CodeScanner | SZQRCodeViewController/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/ninja/solibs_avoid_relinking/gyptest-solibs-avoid-relinking.py | 216 | 1427 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that relinking a solib doesn't relink a dependent executable if the
solib's public API hasn't changed.
"""
import os
import sys
import TestCommon
import TestGyp
# NOTE(fischman): This test will not work with other generators because the
# API-hash-based-mtime-preservation optimization is only implemented in
# ninja.py. It could be extended to the make.py generator as well pretty
# easily, probably.
# (also, it tests ninja-specific out paths, which would have to be generalized
# if this was extended to other generators).
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('solibs_avoid_relinking.gyp')
# Build the executable, grab its timestamp, touch the solib's source, rebuild
# executable, ensure timestamp hasn't changed.
test.build('solibs_avoid_relinking.gyp', 'b')
test.built_file_must_exist('b' + TestCommon.exe_suffix)
pre_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
os.utime(os.path.join(test.workdir, 'solib.cc'),
(pre_stat.st_atime, pre_stat.st_mtime + 100))
test.sleep()
test.build('solibs_avoid_relinking.gyp', 'b')
post_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
if pre_stat.st_mtime != post_stat.st_mtime:
test.fail_test()
else:
test.pass_test()
| mit |
spisneha25/django | django/views/i18n.py | 82 | 11102 | import gettext as gettext_module
import importlib
import json
import os
from django import http
from django.apps import apps
from django.conf import settings
from django.core.urlresolvers import translate_url
from django.template import Context, Engine
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import smart_text
from django.utils.formats import get_format, get_format_modules
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language, to_locale,
)
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language')
if lang_code and check_for_language(lang_code):
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = http.HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function(globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function(n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };
{% endif %}
/* gettext library */
django.catalog = django.catalog || {};
{% if catalog_str %}
var newcatalog = {{ catalog_str }};
for (var key in newcatalog) {
django.catalog[key] = newcatalog[key];
}
{% endif %}
if (!django.jsi18n_initialized) {
django.gettext = function(msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function(singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function(msgid) { return msgid; };
django.pgettext = function(context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function(context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
django.interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function(format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
django.jsi18n_initialized = true;
}
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Engine().from_string(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
app_configs = apps.get_app_configs()
allowable_packages = set(app_config.name for app_config in app_configs)
allowable_packages.add('django.conf')
packages = [p for p in packages if p in allowable_packages]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(reversed(settings.LOCALE_PATHS))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| bsd-3-clause |
aaronzirbes/ansible | contrib/inventory/jail.py | 132 | 1288 | #!/usr/bin/env python
# (c) 2013, Michael Scherer <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print json.dumps(result)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({'ansible_connection': 'jail'})
else:
print "Need an argument, either --list or --host <host>"
| gpl-3.0 |
dsimic/taxsims | ss.py | 1 | 1112 | import pandas as pd
import numpy as np
def ss_calc(
contrib_yearly, inv_gwth_rt, num_years, safe_withdrw_rate, start_age=28
):
"""
inv_gwth_rt is infaltion adjusted.
contrib_yearly is in first years dollars
"""
tot_years = max(0, 62 - start_age - num_years) + num_years
df = pd.DataFrame({
'contrib_yearly': [contrib_yearly] * num_years + [0.] *
max(0, (62 - num_years - start_age)),
'inv_value': [0] * tot_years,
}, index=range(tot_years))
for year in range(0, tot_years):
print year
multiplier = np.array([
(1. + inv_gwth_rt) ** (year - y_) for y_ in range(year + 1)])
print multiplier
df['inv_value'][year] = np.sum(
np.array(df['contrib_yearly'][0: year + 1]) * multiplier)
df['monthly_inv_income'] = safe_withdrw_rate * df['inv_value'] / 12.
df['monthly_inv_income_w_spouse'] = df['monthly_inv_income'] * 1.5
return df
if __name__ == "__main__":
df = ss_calc(15e3, .03, 10, .03)
ss_benefit_monthly = 939.00
ss_benefit_w_spouse_monthly = 1.5 * ss_benefit_monthly
| gpl-2.0 |
makermade/arm_android-21_arm-linux-androideabi-4.8 | lib/python2.7/distutils/msvc9compiler.py | 148 | 31018 | """distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS2005 and VS 2008 by Christian Heimes
__revision__ = "$Id$"
import os
import subprocess
import sys
import re
from distutils.errors import (DistutilsExecError, DistutilsPlatformError,
CompileError, LibError, LinkError)
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
import _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
HKEYS = (_winreg.HKEY_USERS,
_winreg.HKEY_CURRENT_USER,
_winreg.HKEY_LOCAL_MACHINE,
_winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Wow6432Node\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targetting amd64.)
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
# trying Express edition
if productdir is None:
vsbase = VSEXPRESS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
log.debug("Unable to find productdir in registry")
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
# More globals
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
# MACROS = MacroExpander(VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
# take care to only use strings in the environment.
self.__paths = vc_env['path'].encode('mbcs').split(os.pathsep)
os.environ['lib'] = vc_env['lib'].encode('mbcs')
os.environ['include'] = vc_env['include'].encode('mbcs')
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
if mfinfo is not None:
mffilename, mfid = mfinfo
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
mffilename, out_arg])
except DistutilsExecError, msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
# If we need a manifest at all, an embedded manifest is recommended.
# See MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can check it, and possibly embed it, later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
# Returns either the filename of the modified manifest or
# None if no manifest should be embedded.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
# Now see if any other assemblies are referenced - if not, we
# don't want a manifest embedded.
pattern = re.compile(
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
if re.search(pattern, manifest_buf) is None:
return None
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
return manifest_file
finally:
manifest_f.close()
except IOError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
| gpl-2.0 |
pacoqueen/bbinn | PyChart-1.39/demos/linestyles.py | 1 | 1258 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
#
# Copyright (C) 2000-2005 by Yasushi Saito ([email protected])
#
# Pychart is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Pychart is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from pychart import *
import pychart.doc_support
import chartdemo
import re
can = canvas.default_canvas()
x = 100
y = 500
def drawLine(style):
global x, y
name = pychart.doc_support.stringify_value(style)
name = re.sub("line_style\\.", "", name)
name = pychart.doc_support.break_string(name)
can.line(style, x, y, x+40, y)
#print "name=", name
height = font.text_height(name)[0] + 5
tb = text_box.T(text=name, loc=(x, y-height), line_style=None)
x = x + 60
tb.draw()
for style in line_style.standards.list():
drawLine(style)
if x >= chartdemo.MaxWidth:
x=100
y=y-40
| gpl-2.0 |
leiferikb/bitpop | src/third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py | 2 | 4048 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
class MockSCM(object):
executable_name = "MockSCM"
def __init__(self, filesystem=None, executive=None):
self.checkout_root = "/mock-checkout/third_party/WebKit"
self.added_paths = set()
self._filesystem = filesystem or MockFileSystem()
self._executive = executive or MockExecutive()
def add(self, destination_path, return_exit_code=False):
self.add_list([destination_path], return_exit_code)
def add_list(self, destination_paths, return_exit_code=False):
self.added_paths.update(set(destination_paths))
if return_exit_code:
return 0
def has_working_directory_changes(self):
return False
def ensure_cleanly_tracking_remote_master(self):
pass
def current_branch(self):
return "mock-branch-name"
def checkout_branch(self, name):
pass
def create_clean_branch(self, name):
pass
def delete_branch(self, name):
pass
def supports_local_commits(self):
return True
def exists(self, path):
# TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
# We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
return True
def absolute_path(self, *comps):
return self._filesystem.join(self.checkout_root, *comps)
def svn_revision(self, path):
return '5678'
def svn_revision_from_git_commit(self, git_commit):
if git_commit == '6469e754a1':
return 1234
if git_commit == '624c3081c0':
return 5678
if git_commit == '624caaaaaa':
return 10000
return None
def timestamp_of_revision(self, path, revision):
return '2013-02-01 08:48:05 +0000'
def commit_locally_with_message(self, message, commit_all_working_directory_changes=True):
pass
def delete(self, path):
return self.delete_list([path])
def delete_list(self, paths):
if not self._filesystem:
return
for path in paths:
if self._filesystem.exists(path):
self._filesystem.remove(path)
def move(self, origin, destination):
if self._filesystem:
self._filesystem.move(self.absolute_path(origin), self.absolute_path(destination))
| gpl-3.0 |
kookie424/googletest | test/gtest_test_utils.py | 674 | 10826 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause |
40223247/test2 | static/Brython3.1.1-20150328-091302/Lib/fractions.py | 722 | 23203 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Fraction, infinite-precision, real numbers."""
from decimal import Decimal
import math
import numbers
import operator
import re
import sys
__all__ = ['Fraction', 'gcd']
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
# Value to be used for rationals that reduce to infinity modulo
# _PyHASH_MODULUS.
_PyHASH_INF = sys.hash_info.inf
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(numbers.Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Rational.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, numbers.Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, str):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, numbers.Rational) and
isinstance(denominator, numbers.Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f):
raise ValueError("Cannot convert %r to %s." % (f, cls.__name__))
if math.isinf(f):
raise OverflowError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if dec.is_infinite():
raise OverflowError(
"Cannot convert %s to %s." % (dec, cls.__name__))
if dec.is_nan():
raise ValueError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, numbers.Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, numbers.Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
def __floordiv__(a, b):
"""a // b"""
return math.floor(a / b)
def __rfloordiv__(b, a):
"""a // b"""
return math.floor(a / b)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, numbers.Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __floor__(a):
"""Will be math.floor(a) in 3.0."""
return a.numerator // a.denominator
def __ceil__(a):
"""Will be math.ceil(a) in 3.0."""
# The negations cleverly convince floordiv to return the ceiling.
return -(-a.numerator // a.denominator)
def __round__(self, ndigits=None):
"""Will be round(self, ndigits) in 3.0.
Rounds half toward even.
"""
if ndigits is None:
floor, remainder = divmod(self.numerator, self.denominator)
if remainder * 2 < self.denominator:
return floor
elif remainder * 2 > self.denominator:
return floor + 1
# Deal with the half case:
elif floor % 2 == 0:
return floor
else:
return floor + 1
shift = 10**abs(ndigits)
# See _operator_fallbacks.forward to check that the results of
# these operations will always be Fraction and therefore have
# round().
if ndigits > 0:
return Fraction(round(self * shift), shift)
else:
return Fraction(round(self / shift) * shift)
def __hash__(self):
"""hash(self)"""
# XXX since this method is expensive, consider caching the result
# In order to make sure that the hash of a Fraction agrees
# with the hash of a numerically equal integer, float or
# Decimal instance, we follow the rules for numeric hashes
# outlined in the documentation. (See library docs, 'Built-in
# Types').
# dinv is the inverse of self._denominator modulo the prime
# _PyHASH_MODULUS, or 0 if self._denominator is divisible by
# _PyHASH_MODULUS.
dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
if not dinv:
hash_ = _PyHASH_INF
else:
hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS
result = hash_ if self >= 0 else -hash_
return -2 if result == -1 else result
def __eq__(a, b):
"""a == b"""
if isinstance(b, numbers.Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, numbers.Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __bool__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| gpl-3.0 |
Psycojoker/wanawana | wanawana/settings.py | 1 | 2687 | """
Django settings for wanawana project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w2=4yi@cyc@vsio@$tvz$%&_po6si@533=cwh5kr2dk#pd69)v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'django_extensions',
'debug_toolbar',
'django_pdb',
'wanawana',
'users',
'events',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
)
ROOT_URLCONF = 'wanawana.urls'
TEMPLATE_LOADERS = (
'hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
WSGI_APPLICATION = 'wanawana.wsgi.application'
# Email configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from settings_local import *
except ImportError:
pass
| gpl-3.0 |
bertucho/epic-movie-quotes-quiz | dialogos/build/Twisted/twisted/protocols/sip.py | 8 | 42262 | # -*- test-case-name: twisted.test.test_sip -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Session Initialization Protocol.
Documented in RFC 2543.
[Superseded by 3261]
This module contains a deprecated implementation of HTTP Digest authentication.
See L{twisted.cred.credentials} and L{twisted.cred._digest} for its new home.
"""
# system imports
import socket, time, sys, random, warnings
from hashlib import md5
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, util
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version
from twisted.internet import protocol, defer, reactor
from twisted import cred
from twisted.cred.credentials import UsernameHashedPassword, UsernamePassword
# sibling imports
from twisted.protocols import basic
PORT = 5060
# SIP headers have short forms
shortHeaders = {"call-id": "i",
"contact": "m",
"content-encoding": "e",
"content-length": "l",
"content-type": "c",
"from": "f",
"subject": "s",
"to": "t",
"via": "v",
}
longHeaders = {}
for k, v in shortHeaders.items():
longHeaders[v] = k
del k, v
statusCodes = {
100: "Trying",
180: "Ringing",
181: "Call Is Being Forwarded",
182: "Queued",
183: "Session Progress",
200: "OK",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
305: "Use Proxy",
380: "Alternative Service",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict", # Not in RFC3261
410: "Gone",
411: "Length Required", # Not in RFC3261
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
416: "Unsupported URI Scheme",
420: "Bad Extension",
421: "Extension Required",
423: "Interval Too Brief",
480: "Temporarily Unavailable",
481: "Call/Transaction Does Not Exist",
482: "Loop Detected",
483: "Too Many Hops",
484: "Address Incomplete",
485: "Ambiguous",
486: "Busy Here",
487: "Request Terminated",
488: "Not Acceptable Here",
491: "Request Pending",
493: "Undecipherable",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway", # no donut
503: "Service Unavailable",
504: "Server Time-out",
505: "SIP Version not supported",
513: "Message Too Large",
600: "Busy Everywhere",
603: "Decline",
604: "Does not exist anywhere",
606: "Not Acceptable",
}
specialCases = {
'cseq': 'CSeq',
'call-id': 'Call-ID',
'www-authenticate': 'WWW-Authenticate',
}
def dashCapitalize(s):
''' Capitalize a string, making sure to treat - as a word separator '''
return '-'.join([ x.capitalize() for x in s.split('-')])
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def DigestCalcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
):
m = md5()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
if pszAlg == "md5-sess":
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
DigestCalcHA1 = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcHA1)
def DigestCalcResponse(
HA1,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = md5()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce: # pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
hash = m.digest().encode('hex')
return hash
DigestCalcResponse = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcResponse)
_absent = object()
class Via(object):
"""
A L{Via} is a SIP Via header, representing a segment of the path taken by
the request.
See RFC 3261, sections 8.1.1.7, 18.2.2, and 20.42.
@ivar transport: Network protocol used for this leg. (Probably either "TCP"
or "UDP".)
@type transport: C{str}
@ivar branch: Unique identifier for this request.
@type branch: C{str}
@ivar host: Hostname or IP for this leg.
@type host: C{str}
@ivar port: Port used for this leg.
@type port C{int}, or None.
@ivar rportRequested: Whether to request RFC 3581 client processing or not.
@type rportRequested: C{bool}
@ivar rportValue: Servers wishing to honor requests for RFC 3581 processing
should set this parameter to the source port the request was received
from.
@type rportValue: C{int}, or None.
@ivar ttl: Time-to-live for requests on multicast paths.
@type ttl: C{int}, or None.
@ivar maddr: The destination multicast address, if any.
@type maddr: C{str}, or None.
@ivar hidden: Obsolete in SIP 2.0.
@type hidden: C{bool}
@ivar otherParams: Any other parameters in the header.
@type otherParams: C{dict}
"""
def __init__(self, host, port=PORT, transport="UDP", ttl=None,
hidden=False, received=None, rport=_absent, branch=None,
maddr=None, **kw):
"""
Set parameters of this Via header. All arguments correspond to
attributes of the same name.
To maintain compatibility with old SIP
code, the 'rport' argument is used to determine the values of
C{rportRequested} and C{rportValue}. If None, C{rportRequested} is set
to True. (The deprecated method for doing this is to pass True.) If an
integer, C{rportValue} is set to the given value.
Any arguments not explicitly named here are collected into the
C{otherParams} dict.
"""
self.transport = transport
self.host = host
self.port = port
self.ttl = ttl
self.hidden = hidden
self.received = received
if rport is True:
warnings.warn(
"rport=True is deprecated since Twisted 9.0.",
DeprecationWarning,
stacklevel=2)
self.rportValue = None
self.rportRequested = True
elif rport is None:
self.rportValue = None
self.rportRequested = True
elif rport is _absent:
self.rportValue = None
self.rportRequested = False
else:
self.rportValue = rport
self.rportRequested = False
self.branch = branch
self.maddr = maddr
self.otherParams = kw
def _getrport(self):
"""
Returns the rport value expected by the old SIP code.
"""
if self.rportRequested == True:
return True
elif self.rportValue is not None:
return self.rportValue
else:
return None
def _setrport(self, newRPort):
"""
L{Base._fixupNAT} sets C{rport} directly, so this method sets
C{rportValue} based on that.
@param newRPort: The new rport value.
@type newRPort: C{int}
"""
self.rportValue = newRPort
self.rportRequested = False
rport = property(_getrport, _setrport)
def toString(self):
"""
Serialize this header for use in a request or response.
"""
s = "SIP/2.0/%s %s:%s" % (self.transport, self.host, self.port)
if self.hidden:
s += ";hidden"
for n in "ttl", "branch", "maddr", "received":
value = getattr(self, n)
if value is not None:
s += ";%s=%s" % (n, value)
if self.rportRequested:
s += ";rport"
elif self.rportValue is not None:
s += ";rport=%s" % (self.rport,)
etc = self.otherParams.items()
etc.sort()
for k, v in etc:
if v is None:
s += ";" + k
else:
s += ";%s=%s" % (k, v)
return s
def parseViaHeader(value):
"""
Parse a Via header.
@return: The parsed version of this header.
@rtype: L{Via}
"""
parts = value.split(";")
sent, params = parts[0], parts[1:]
protocolinfo, by = sent.split(" ", 1)
by = by.strip()
result = {}
pname, pversion, transport = protocolinfo.split("/")
if pname != "SIP" or pversion != "2.0":
raise ValueError, "wrong protocol or version: %r" % value
result["transport"] = transport
if ":" in by:
host, port = by.split(":")
result["port"] = int(port)
result["host"] = host
else:
result["host"] = by
for p in params:
# it's the comment-striping dance!
p = p.strip().split(" ", 1)
if len(p) == 1:
p, comment = p[0], ""
else:
p, comment = p
if p == "hidden":
result["hidden"] = True
continue
parts = p.split("=", 1)
if len(parts) == 1:
name, value = parts[0], None
else:
name, value = parts
if name in ("rport", "ttl"):
value = int(value)
result[name] = value
return Via(**result)
class URL:
"""A SIP URL."""
def __init__(self, host, username=None, password=None, port=None,
transport=None, usertype=None, method=None,
ttl=None, maddr=None, tag=None, other=None, headers=None):
self.username = username
self.host = host
self.password = password
self.port = port
self.transport = transport
self.usertype = usertype
self.method = method
self.tag = tag
self.ttl = ttl
self.maddr = maddr
if other == None:
self.other = []
else:
self.other = other
if headers == None:
self.headers = {}
else:
self.headers = headers
def toString(self):
l = []; w = l.append
w("sip:")
if self.username != None:
w(self.username)
if self.password != None:
w(":%s" % self.password)
w("@")
w(self.host)
if self.port != None:
w(":%d" % self.port)
if self.usertype != None:
w(";user=%s" % self.usertype)
for n in ("transport", "ttl", "maddr", "method", "tag"):
v = getattr(self, n)
if v != None:
w(";%s=%s" % (n, v))
for v in self.other:
w(";%s" % v)
if self.headers:
w("?")
w("&".join([("%s=%s" % (specialCases.get(h) or dashCapitalize(h), v)) for (h, v) in self.headers.items()]))
return "".join(l)
def __str__(self):
return self.toString()
def __repr__(self):
return '<URL %s:%s@%s:%r/%s>' % (self.username, self.password, self.host, self.port, self.transport)
def parseURL(url, host=None, port=None):
"""Return string into URL object.
URIs are of form 'sip:[email protected]'.
"""
d = {}
if not url.startswith("sip:"):
raise ValueError("unsupported scheme: " + url[:4])
parts = url[4:].split(";")
userdomain, params = parts[0], parts[1:]
udparts = userdomain.split("@", 1)
if len(udparts) == 2:
userpass, hostport = udparts
upparts = userpass.split(":", 1)
if len(upparts) == 1:
d["username"] = upparts[0]
else:
d["username"] = upparts[0]
d["password"] = upparts[1]
else:
hostport = udparts[0]
hpparts = hostport.split(":", 1)
if len(hpparts) == 1:
d["host"] = hpparts[0]
else:
d["host"] = hpparts[0]
d["port"] = int(hpparts[1])
if host != None:
d["host"] = host
if port != None:
d["port"] = port
for p in params:
if p == params[-1] and "?" in p:
d["headers"] = h = {}
p, headers = p.split("?", 1)
for header in headers.split("&"):
k, v = header.split("=")
h[k] = v
nv = p.split("=", 1)
if len(nv) == 1:
d.setdefault("other", []).append(p)
continue
name, value = nv
if name == "user":
d["usertype"] = value
elif name in ("transport", "ttl", "maddr", "method", "tag"):
if name == "ttl":
value = int(value)
d[name] = value
else:
d.setdefault("other", []).append(p)
return URL(**d)
def cleanRequestURL(url):
"""Clean a URL from a Request line."""
url.transport = None
url.maddr = None
url.ttl = None
url.headers = {}
def parseAddress(address, host=None, port=None, clean=0):
"""Return (name, uri, params) for From/To/Contact header.
@param clean: remove unnecessary info, usually for From and To headers.
"""
address = address.strip()
# simple 'sip:foo' case
if address.startswith("sip:"):
return "", parseURL(address, host=host, port=port), {}
params = {}
name, url = address.split("<", 1)
name = name.strip()
if name.startswith('"'):
name = name[1:]
if name.endswith('"'):
name = name[:-1]
url, paramstring = url.split(">", 1)
url = parseURL(url, host=host, port=port)
paramstring = paramstring.strip()
if paramstring:
for l in paramstring.split(";"):
if not l:
continue
k, v = l.split("=")
params[k] = v
if clean:
# rfc 2543 6.21
url.ttl = None
url.headers = {}
url.transport = None
url.maddr = None
return name, url, params
class SIPError(Exception):
def __init__(self, code, phrase=None):
if phrase is None:
phrase = statusCodes[code]
Exception.__init__(self, "SIP error (%d): %s" % (code, phrase))
self.code = code
self.phrase = phrase
class RegistrationError(SIPError):
"""Registration was not possible."""
class Message:
"""A SIP message."""
length = None
def __init__(self):
self.headers = util.OrderedDict() # map name to list of values
self.body = ""
self.finished = 0
def addHeader(self, name, value):
name = name.lower()
name = longHeaders.get(name, name)
if name == "content-length":
self.length = int(value)
self.headers.setdefault(name,[]).append(value)
def bodyDataReceived(self, data):
self.body += data
def creationFinished(self):
if (self.length != None) and (self.length != len(self.body)):
raise ValueError, "wrong body length"
self.finished = 1
def toString(self):
s = "%s\r\n" % self._getHeaderLine()
for n, vs in self.headers.items():
for v in vs:
s += "%s: %s\r\n" % (specialCases.get(n) or dashCapitalize(n), v)
s += "\r\n"
s += self.body
return s
def _getHeaderLine(self):
raise NotImplementedError
class Request(Message):
"""A Request for a URI"""
def __init__(self, method, uri, version="SIP/2.0"):
Message.__init__(self)
self.method = method
if isinstance(uri, URL):
self.uri = uri
else:
self.uri = parseURL(uri)
cleanRequestURL(self.uri)
def __repr__(self):
return "<SIP Request %d:%s %s>" % (id(self), self.method, self.uri.toString())
def _getHeaderLine(self):
return "%s %s SIP/2.0" % (self.method, self.uri.toString())
class Response(Message):
"""A Response to a URI Request"""
def __init__(self, code, phrase=None, version="SIP/2.0"):
Message.__init__(self)
self.code = code
if phrase == None:
phrase = statusCodes[code]
self.phrase = phrase
def __repr__(self):
return "<SIP Response %d:%s>" % (id(self), self.code)
def _getHeaderLine(self):
return "SIP/2.0 %s %s" % (self.code, self.phrase)
class MessagesParser(basic.LineReceiver):
"""A SIP messages parser.
Expects dataReceived, dataDone repeatedly,
in that order. Shouldn't be connected to actual transport.
"""
version = "SIP/2.0"
acceptResponses = 1
acceptRequests = 1
state = "firstline" # or "headers", "body" or "invalid"
debug = 0
def __init__(self, messageReceivedCallback):
self.messageReceived = messageReceivedCallback
self.reset()
def reset(self, remainingData=""):
self.state = "firstline"
self.length = None # body length
self.bodyReceived = 0 # how much of the body we received
self.message = None
self.header = None
self.setLineMode(remainingData)
def invalidMessage(self):
self.state = "invalid"
self.setRawMode()
def dataDone(self):
# clear out any buffered data that may be hanging around
self.clearLineBuffer()
if self.state == "firstline":
return
if self.state != "body":
self.reset()
return
if self.length == None:
# no content-length header, so end of data signals message done
self.messageDone()
elif self.length < self.bodyReceived:
# aborted in the middle
self.reset()
else:
# we have enough data and message wasn't finished? something is wrong
raise RuntimeError, "this should never happen"
def dataReceived(self, data):
try:
basic.LineReceiver.dataReceived(self, data)
except:
log.err()
self.invalidMessage()
def handleFirstLine(self, line):
"""Expected to create self.message."""
raise NotImplementedError
def lineLengthExceeded(self, line):
self.invalidMessage()
def lineReceived(self, line):
if self.state == "firstline":
while line.startswith("\n") or line.startswith("\r"):
line = line[1:]
if not line:
return
try:
a, b, c = line.split(" ", 2)
except ValueError:
self.invalidMessage()
return
if a == "SIP/2.0" and self.acceptResponses:
# response
try:
code = int(b)
except ValueError:
self.invalidMessage()
return
self.message = Response(code, c)
elif c == "SIP/2.0" and self.acceptRequests:
self.message = Request(a, b)
else:
self.invalidMessage()
return
self.state = "headers"
return
else:
assert self.state == "headers"
if line:
# multiline header
if line.startswith(" ") or line.startswith("\t"):
name, value = self.header
self.header = name, (value + line.lstrip())
else:
# new header
if self.header:
self.message.addHeader(*self.header)
self.header = None
try:
name, value = line.split(":", 1)
except ValueError:
self.invalidMessage()
return
self.header = name, value.lstrip()
# XXX we assume content-length won't be multiline
if name.lower() == "content-length":
try:
self.length = int(value.lstrip())
except ValueError:
self.invalidMessage()
return
else:
# CRLF, we now have message body until self.length bytes,
# or if no length was given, until there is no more data
# from the connection sending us data.
self.state = "body"
if self.header:
self.message.addHeader(*self.header)
self.header = None
if self.length == 0:
self.messageDone()
return
self.setRawMode()
def messageDone(self, remainingData=""):
assert self.state == "body"
self.message.creationFinished()
self.messageReceived(self.message)
self.reset(remainingData)
def rawDataReceived(self, data):
assert self.state in ("body", "invalid")
if self.state == "invalid":
return
if self.length == None:
self.message.bodyDataReceived(data)
else:
dataLen = len(data)
expectedLen = self.length - self.bodyReceived
if dataLen > expectedLen:
self.message.bodyDataReceived(data[:expectedLen])
self.messageDone(data[expectedLen:])
return
else:
self.bodyReceived += dataLen
self.message.bodyDataReceived(data)
if self.bodyReceived == self.length:
self.messageDone()
class Base(protocol.DatagramProtocol):
"""Base class for SIP clients and servers."""
PORT = PORT
debug = False
def __init__(self):
self.messages = []
self.parser = MessagesParser(self.addMessage)
def addMessage(self, msg):
self.messages.append(msg)
def datagramReceived(self, data, addr):
self.parser.dataReceived(data)
self.parser.dataDone()
for m in self.messages:
self._fixupNAT(m, addr)
if self.debug:
log.msg("Received %r from %r" % (m.toString(), addr))
if isinstance(m, Request):
self.handle_request(m, addr)
else:
self.handle_response(m, addr)
self.messages[:] = []
def _fixupNAT(self, message, (srcHost, srcPort)):
# RFC 2543 6.40.2,
senderVia = parseViaHeader(message.headers["via"][0])
if senderVia.host != srcHost:
senderVia.received = srcHost
if senderVia.port != srcPort:
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
elif senderVia.rport == True:
senderVia.received = srcHost
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def sendMessage(self, destURL, message):
"""Send a message.
@param destURL: C{URL}. This should be a *physical* URL, not a logical one.
@param message: The message to send.
"""
if destURL.transport not in ("udp", None):
raise RuntimeError, "only UDP currently supported"
if self.debug:
log.msg("Sending %r to %r" % (message.toString(), destURL))
self.transport.write(message.toString(), (destURL.host, destURL.port or self.PORT))
def handle_request(self, message, addr):
"""Override to define behavior for requests received
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
def handle_response(self, message, addr):
"""Override to define behavior for responses received.
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
class IContact(Interface):
"""A user of a registrar or proxy"""
class Registration:
def __init__(self, secondsToExpiry, contactURL):
self.secondsToExpiry = secondsToExpiry
self.contactURL = contactURL
class IRegistry(Interface):
"""Allows registration of logical->physical URL mapping."""
def registerAddress(domainURL, logicalURL, physicalURL):
"""Register the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def unregisterAddress(domainURL, logicalURL, physicalURL):
"""Unregister the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def getRegistrationInfo(logicalURL):
"""Get registration info for logical URL.
@return: Deferred of C{Registration} object or failure of LookupError.
"""
class ILocator(Interface):
"""Allow looking up physical address for logical URL."""
def getAddress(logicalURL):
"""Return physical URL of server for logical URL of user.
@param logicalURL: a logical C{URL}.
@return: Deferred which becomes URL or fails with LookupError.
"""
class Proxy(Base):
"""SIP proxy."""
PORT = PORT
locator = None # object implementing ILocator
def __init__(self, host=None, port=PORT):
"""Create new instance.
@param host: our hostname/IP as set in Via headers.
@param port: our port as set in Via headers.
"""
self.host = host or socket.getfqdn()
self.port = port
Base.__init__(self)
def getVia(self):
"""Return value of Via header for this proxy."""
return Via(host=self.host, port=self.port)
def handle_request(self, message, addr):
# send immediate 100/trying message before processing
#self.deliverResponse(self.responseFromRequest(100, message))
f = getattr(self, "handle_%s_request" % message.method, None)
if f is None:
f = self.handle_request_default
try:
d = f(message, addr)
except SIPError, e:
self.deliverResponse(self.responseFromRequest(e.code, message))
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
if d is not None:
d.addErrback(lambda e:
self.deliverResponse(self.responseFromRequest(e.code, message))
)
def handle_request_default(self, message, (srcHost, srcPort)):
"""Default request handler.
Default behaviour for OPTIONS and unknown methods for proxies
is to forward message on to the client.
Since at the moment we are stateless proxy, that's basically
everything.
"""
def _mungContactHeader(uri, message):
message.headers['contact'][0] = uri.toString()
return self.sendMessage(uri, message)
viaHeader = self.getVia()
if viaHeader.toString() in message.headers["via"]:
# must be a loop, so drop message
log.msg("Dropping looped message.")
return
message.headers["via"].insert(0, viaHeader.toString())
name, uri, tags = parseAddress(message.headers["to"][0], clean=1)
# this is broken and needs refactoring to use cred
d = self.locator.getAddress(uri)
d.addCallback(self.sendMessage, message)
d.addErrback(self._cantForwardRequest, message)
def _cantForwardRequest(self, error, message):
error.trap(LookupError)
del message.headers["via"][0] # this'll be us
self.deliverResponse(self.responseFromRequest(404, message))
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def handle_response(self, message, addr):
"""Default response handler."""
v = parseViaHeader(message.headers["via"][0])
if (v.host, v.port) != (self.host, self.port):
# we got a message not intended for us?
# XXX note this check breaks if we have multiple external IPs
# yay for suck protocols
log.msg("Dropping incorrectly addressed message")
return
del message.headers["via"][0]
if not message.headers["via"]:
# this message is addressed to us
self.gotResponse(message, addr)
return
self.deliverResponse(message)
def gotResponse(self, message, addr):
"""Called with responses that are addressed at this server."""
pass
class IAuthorizer(Interface):
def getChallenge(peer):
"""Generate a challenge the client may respond to.
@type peer: C{tuple}
@param peer: The client's address
@rtype: C{str}
@return: The challenge string
"""
def decode(response):
"""Create a credentials object from the given response.
@type response: C{str}
"""
class BasicAuthorizer:
"""Authorizer for insecure Basic (base64-encoded plaintext) authentication.
This form of authentication is broken and insecure. Do not use it.
"""
implements(IAuthorizer)
def __init__(self):
"""
This method exists solely to issue a deprecation warning.
"""
warnings.warn(
"twisted.protocols.sip.BasicAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
def getChallenge(self, peer):
return None
def decode(self, response):
# At least one SIP client improperly pads its Base64 encoded messages
for i in range(3):
try:
creds = (response + ('=' * i)).decode('base64')
except:
pass
else:
break
else:
# Totally bogus
raise SIPError(400)
p = creds.split(':', 1)
if len(p) == 2:
return UsernamePassword(*p)
raise SIPError(400)
class DigestedCredentials(UsernameHashedPassword):
"""Yet Another Simple Digest-MD5 authentication scheme"""
def __init__(self, username, fields, challenges):
warnings.warn(
"twisted.protocols.sip.DigestedCredentials was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.username = username
self.fields = fields
self.challenges = challenges
def checkPassword(self, password):
method = 'REGISTER'
response = self.fields.get('response')
uri = self.fields.get('uri')
nonce = self.fields.get('nonce')
cnonce = self.fields.get('cnonce')
nc = self.fields.get('nc')
algo = self.fields.get('algorithm', 'MD5')
qop = self.fields.get('qop-options', 'auth')
opaque = self.fields.get('opaque')
if opaque not in self.challenges:
return False
del self.challenges[opaque]
user, domain = self.username.split('@', 1)
if uri is None:
uri = 'sip:' + domain
expected = DigestCalcResponse(
DigestCalcHA1(algo, user, domain, password, nonce, cnonce),
nonce, nc, cnonce, qop, method, uri, None,
)
return expected == response
class DigestAuthorizer:
CHALLENGE_LIFETIME = 15
implements(IAuthorizer)
def __init__(self):
warnings.warn(
"twisted.protocols.sip.DigestAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.outstanding = {}
def generateNonce(self):
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
c = '%d%d%d' % c
return c
def generateOpaque(self):
return str(random.randrange(sys.maxint))
def getChallenge(self, peer):
c = self.generateNonce()
o = self.generateOpaque()
self.outstanding[o] = c
return ','.join((
'nonce="%s"' % c,
'opaque="%s"' % o,
'qop-options="auth"',
'algorithm="MD5"',
))
def decode(self, response):
response = ' '.join(response.splitlines())
parts = response.split(',')
auth = dict([(k.strip(), unq(v.strip())) for (k, v) in [p.split('=', 1) for p in parts]])
try:
username = auth['username']
except KeyError:
raise SIPError(401)
try:
return DigestedCredentials(username, auth, self.outstanding)
except:
raise SIPError(400)
class RegisterProxy(Proxy):
"""A proxy that allows registration for a specific domain.
Unregistered users won't be handled.
"""
portal = None
registry = None # should implement IRegistry
authorizers = {}
def __init__(self, *args, **kw):
Proxy.__init__(self, *args, **kw)
self.liveChallenges = {}
if "digest" not in self.authorizers:
self.authorizers["digest"] = DigestAuthorizer()
def handle_ACK_request(self, message, (host, port)):
# XXX
# ACKs are a client's way of indicating they got the last message
# Responding to them is not a good idea.
# However, we should keep track of terminal messages and re-transmit
# if no ACK is received.
pass
def handle_REGISTER_request(self, message, (host, port)):
"""Handle a registration request.
Currently registration is not proxied.
"""
if self.portal is None:
# There is no portal. Let anyone in.
self.register(message, host, port)
else:
# There is a portal. Check for credentials.
if not message.headers.has_key("authorization"):
return self.unauthorized(message, host, port)
else:
return self.login(message, host, port)
def unauthorized(self, message, host, port):
m = self.responseFromRequest(401, message)
for (scheme, auth) in self.authorizers.iteritems():
chal = auth.getChallenge((host, port))
if chal is None:
value = '%s realm="%s"' % (scheme.title(), self.host)
else:
value = '%s %s,realm="%s"' % (scheme.title(), chal, self.host)
m.headers.setdefault('www-authenticate', []).append(value)
self.deliverResponse(m)
def login(self, message, host, port):
parts = message.headers['authorization'][0].split(None, 1)
a = self.authorizers.get(parts[0].lower())
if a:
try:
c = a.decode(parts[1])
except SIPError:
raise
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
c.username += '@' + self.host
self.portal.login(c, None, IContact
).addCallback(self._cbLogin, message, host, port
).addErrback(self._ebLogin, message, host, port
).addErrback(log.err
)
else:
self.deliverResponse(self.responseFromRequest(501, message))
def _cbLogin(self, (i, a, l), message, host, port):
# It's stateless, matey. What a joke.
self.register(message, host, port)
def _ebLogin(self, failure, message, host, port):
failure.trap(cred.error.UnauthorizedLogin)
self.unauthorized(message, host, port)
def register(self, message, host, port):
"""Allow all users to register"""
name, toURL, params = parseAddress(message.headers["to"][0], clean=1)
contact = None
if message.headers.has_key("contact"):
contact = message.headers["contact"][0]
if message.headers.get("expires", [None])[0] == "0":
self.unregister(message, toURL, contact)
else:
# XXX Check expires on appropriate URL, and pass it to registry
# instead of having registry hardcode it.
if contact is not None:
name, contactURL, params = parseAddress(contact, host=host, port=port)
d = self.registry.registerAddress(message.uri, toURL, contactURL)
else:
d = self.registry.getRegistrationInfo(toURL)
d.addCallbacks(self._cbRegister, self._ebRegister,
callbackArgs=(message,),
errbackArgs=(message,)
)
def _cbRegister(self, registration, message):
response = self.responseFromRequest(200, message)
if registration.contactURL != None:
response.addHeader("contact", registration.contactURL.toString())
response.addHeader("expires", "%d" % registration.secondsToExpiry)
response.addHeader("content-length", "0")
self.deliverResponse(response)
def _ebRegister(self, error, message):
error.trap(RegistrationError, LookupError)
# XXX return error message, and alter tests to deal with
# this, currently tests assume no message sent on failure
def unregister(self, message, toURL, contact):
try:
expires = int(message.headers["expires"][0])
except ValueError:
self.deliverResponse(self.responseFromRequest(400, message))
else:
if expires == 0:
if contact == "*":
contactURL = "*"
else:
name, contactURL, params = parseAddress(contact)
d = self.registry.unregisterAddress(message.uri, toURL, contactURL)
d.addCallback(self._cbUnregister, message
).addErrback(self._ebUnregister, message
)
def _cbUnregister(self, registration, message):
msg = self.responseFromRequest(200, message)
msg.headers.setdefault('contact', []).append(registration.contactURL.toString())
msg.addHeader("expires", "0")
self.deliverResponse(msg)
def _ebUnregister(self, registration, message):
pass
class InMemoryRegistry:
"""A simplistic registry for a specific domain."""
implements(IRegistry, ILocator)
def __init__(self, domain):
self.domain = domain # the domain we handle registration for
self.users = {} # map username to (IDelayedCall for expiry, address URI)
def getAddress(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if userURI.username in self.users:
dc, url = self.users[userURI.username]
return defer.succeed(url)
else:
return defer.fail(LookupError("no such user"))
def getRegistrationInfo(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if self.users.has_key(userURI.username):
dc, url = self.users[userURI.username]
return defer.succeed(Registration(int(dc.getTime() - time.time()), url))
else:
return defer.fail(LookupError("no such user"))
def _expireRegistration(self, username):
try:
dc, url = self.users[username]
except KeyError:
return defer.fail(LookupError("no such user"))
else:
dc.cancel()
del self.users[username]
return defer.succeed(Registration(0, url))
def registerAddress(self, domainURL, logicalURL, physicalURL):
if domainURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if logicalURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if logicalURL.username in self.users:
dc, old = self.users[logicalURL.username]
dc.reset(3600)
else:
dc = reactor.callLater(3600, self._expireRegistration, logicalURL.username)
log.msg("Registered %s at %s" % (logicalURL.toString(), physicalURL.toString()))
self.users[logicalURL.username] = (dc, physicalURL)
return defer.succeed(Registration(int(dc.getTime() - time.time()), physicalURL))
def unregisterAddress(self, domainURL, logicalURL, physicalURL):
return self._expireRegistration(logicalURL.username)
| mit |
pombredanne/tahoe-lafs | src/allmydata/mutable/filenode.py | 2 | 46275 |
import random
from zope.interface import implements
from twisted.internet import defer, reactor
from foolscap.api import eventually
from allmydata.interfaces import IMutableFileNode, ICheckable, ICheckResults, \
NotEnoughSharesError, MDMF_VERSION, SDMF_VERSION, IMutableUploadable, \
IMutableFileVersion, IWriteable
from allmydata.util import hashutil, log, consumer, deferredutil, mathutil
from allmydata.util.assertutil import precondition
from allmydata.uri import WriteableSSKFileURI, ReadonlySSKFileURI, \
WriteableMDMFFileURI, ReadonlyMDMFFileURI
from allmydata.monitor import Monitor
from pycryptopp.cipher.aes import AES
from allmydata.mutable.publish import Publish, MutableData,\
TransformingUploadable
from allmydata.mutable.common import MODE_READ, MODE_WRITE, MODE_CHECK, UnrecoverableFileError, \
UncoordinatedWriteError
from allmydata.mutable.servermap import ServerMap, ServermapUpdater
from allmydata.mutable.retrieve import Retrieve
from allmydata.mutable.checker import MutableChecker, MutableCheckAndRepairer
from allmydata.mutable.repairer import Repairer
class BackoffAgent:
# these parameters are copied from foolscap.reconnector, which gets them
# from twisted.internet.protocol.ReconnectingClientFactory
initialDelay = 1.0
factor = 2.7182818284590451 # (math.e)
jitter = 0.11962656492 # molar Planck constant times c, Joule meter/mole
maxRetries = 4
def __init__(self):
self._delay = self.initialDelay
self._count = 0
def delay(self, node, f):
self._count += 1
if self._count == 4:
return f
self._delay = self._delay * self.factor
self._delay = random.normalvariate(self._delay,
self._delay * self.jitter)
d = defer.Deferred()
reactor.callLater(self._delay, d.callback, None)
return d
# use nodemaker.create_mutable_file() to make one of these
class MutableFileNode:
implements(IMutableFileNode, ICheckable)
def __init__(self, storage_broker, secret_holder,
default_encoding_parameters, history):
self._storage_broker = storage_broker
self._secret_holder = secret_holder
self._default_encoding_parameters = default_encoding_parameters
self._history = history
self._pubkey = None # filled in upon first read
self._privkey = None # filled in if we're mutable
# we keep track of the last encoding parameters that we use. These
# are updated upon retrieve, and used by publish. If we publish
# without ever reading (i.e. overwrite()), then we use these values.
self._required_shares = default_encoding_parameters["k"]
self._total_shares = default_encoding_parameters["n"]
self._sharemap = {} # known shares, shnum-to-[nodeids]
self._most_recent_size = None
# filled in after __init__ if we're being created for the first time;
# filled in by the servermap updater before publishing, otherwise.
# set to this default value in case neither of those things happen,
# or in case the servermap can't find any shares to tell us what
# to publish as.
self._protocol_version = None
# all users of this MutableFileNode go through the serializer. This
# takes advantage of the fact that Deferreds discard the callbacks
# that they're done with, so we can keep using the same Deferred
# forever without consuming more and more memory.
self._serializer = defer.succeed(None)
# Starting with MDMF, we can get these from caps if they're
# there. Leave them alone for now; they'll be filled in by my
# init_from_cap method if necessary.
self._downloader_hints = {}
def __repr__(self):
if hasattr(self, '_uri'):
return "<%s %x %s %s>" % (self.__class__.__name__, id(self), self.is_readonly() and 'RO' or 'RW', self._uri.abbrev())
else:
return "<%s %x %s %s>" % (self.__class__.__name__, id(self), None, None)
def init_from_cap(self, filecap):
# we have the URI, but we have not yet retrieved the public
# verification key, nor things like 'k' or 'N'. If and when someone
# wants to get our contents, we'll pull from shares and fill those
# in.
if isinstance(filecap, (WriteableMDMFFileURI, ReadonlyMDMFFileURI)):
self._protocol_version = MDMF_VERSION
elif isinstance(filecap, (ReadonlySSKFileURI, WriteableSSKFileURI)):
self._protocol_version = SDMF_VERSION
self._uri = filecap
self._writekey = None
if not filecap.is_readonly() and filecap.is_mutable():
self._writekey = self._uri.writekey
self._readkey = self._uri.readkey
self._storage_index = self._uri.storage_index
self._fingerprint = self._uri.fingerprint
# the following values are learned during Retrieval
# self._pubkey
# self._required_shares
# self._total_shares
# and these are needed for Publish. They are filled in by Retrieval
# if possible, otherwise by the first peer that Publish talks to.
self._privkey = None
self._encprivkey = None
return self
def create_with_keys(self, (pubkey, privkey), contents,
version=SDMF_VERSION):
"""Call this to create a brand-new mutable file. It will create the
shares, find homes for them, and upload the initial contents (created
with the same rules as IClient.create_mutable_file() ). Returns a
Deferred that fires (with the MutableFileNode instance you should
use) when it completes.
"""
self._pubkey, self._privkey = pubkey, privkey
pubkey_s = self._pubkey.serialize()
privkey_s = self._privkey.serialize()
self._writekey = hashutil.ssk_writekey_hash(privkey_s)
self._encprivkey = self._encrypt_privkey(self._writekey, privkey_s)
self._fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
if version == MDMF_VERSION:
self._uri = WriteableMDMFFileURI(self._writekey, self._fingerprint)
self._protocol_version = version
elif version == SDMF_VERSION:
self._uri = WriteableSSKFileURI(self._writekey, self._fingerprint)
self._protocol_version = version
self._readkey = self._uri.readkey
self._storage_index = self._uri.storage_index
initial_contents = self._get_initial_contents(contents)
return self._upload(initial_contents, None)
def _get_initial_contents(self, contents):
if contents is None:
return MutableData("")
if isinstance(contents, str):
return MutableData(contents)
if IMutableUploadable.providedBy(contents):
return contents
assert callable(contents), "%s should be callable, not %s" % \
(contents, type(contents))
return contents(self)
def _encrypt_privkey(self, writekey, privkey):
enc = AES(writekey)
crypttext = enc.process(privkey)
return crypttext
def _decrypt_privkey(self, enc_privkey):
enc = AES(self._writekey)
privkey = enc.process(enc_privkey)
return privkey
def _populate_pubkey(self, pubkey):
self._pubkey = pubkey
def _populate_required_shares(self, required_shares):
self._required_shares = required_shares
def _populate_total_shares(self, total_shares):
self._total_shares = total_shares
def _populate_privkey(self, privkey):
self._privkey = privkey
def _populate_encprivkey(self, encprivkey):
self._encprivkey = encprivkey
def get_write_enabler(self, server):
seed = server.get_foolscap_write_enabler_seed()
assert len(seed) == 20
return hashutil.ssk_write_enabler_hash(self._writekey, seed)
def get_renewal_secret(self, server):
crs = self._secret_holder.get_renewal_secret()
frs = hashutil.file_renewal_secret_hash(crs, self._storage_index)
lease_seed = server.get_lease_seed()
assert len(lease_seed) == 20
return hashutil.bucket_renewal_secret_hash(frs, lease_seed)
def get_cancel_secret(self, server):
ccs = self._secret_holder.get_cancel_secret()
fcs = hashutil.file_cancel_secret_hash(ccs, self._storage_index)
lease_seed = server.get_lease_seed()
assert len(lease_seed) == 20
return hashutil.bucket_cancel_secret_hash(fcs, lease_seed)
def get_writekey(self):
return self._writekey
def get_readkey(self):
return self._readkey
def get_storage_index(self):
return self._storage_index
def get_fingerprint(self):
return self._fingerprint
def get_privkey(self):
return self._privkey
def get_encprivkey(self):
return self._encprivkey
def get_pubkey(self):
return self._pubkey
def get_required_shares(self):
return self._required_shares
def get_total_shares(self):
return self._total_shares
####################################
# IFilesystemNode
def get_size(self):
return self._most_recent_size
def get_current_size(self):
d = self.get_size_of_best_version()
d.addCallback(self._stash_size)
return d
def _stash_size(self, size):
self._most_recent_size = size
return size
def get_cap(self):
return self._uri
def get_readcap(self):
return self._uri.get_readonly()
def get_verify_cap(self):
return self._uri.get_verify_cap()
def get_repair_cap(self):
if self._uri.is_readonly():
return None
return self._uri
def get_uri(self):
return self._uri.to_string()
def get_write_uri(self):
if self.is_readonly():
return None
return self._uri.to_string()
def get_readonly_uri(self):
return self._uri.get_readonly().to_string()
def get_readonly(self):
if self.is_readonly():
return self
ro = MutableFileNode(self._storage_broker, self._secret_holder,
self._default_encoding_parameters, self._history)
ro.init_from_cap(self._uri.get_readonly())
return ro
def is_mutable(self):
return self._uri.is_mutable()
def is_readonly(self):
return self._uri.is_readonly()
def is_unknown(self):
return False
def is_allowed_in_immutable_directory(self):
return not self._uri.is_mutable()
def raise_error(self):
pass
def __hash__(self):
return hash((self.__class__, self._uri))
def __cmp__(self, them):
if cmp(type(self), type(them)):
return cmp(type(self), type(them))
if cmp(self.__class__, them.__class__):
return cmp(self.__class__, them.__class__)
return cmp(self._uri, them._uri)
#################################
# ICheckable
def check(self, monitor, verify=False, add_lease=False):
checker = MutableChecker(self, self._storage_broker,
self._history, monitor)
return checker.check(verify, add_lease)
def check_and_repair(self, monitor, verify=False, add_lease=False):
checker = MutableCheckAndRepairer(self, self._storage_broker,
self._history, monitor)
return checker.check(verify, add_lease)
#################################
# IRepairable
def repair(self, check_results, force=False, monitor=None):
assert ICheckResults(check_results)
r = Repairer(self, check_results, self._storage_broker,
self._history, monitor)
d = r.start(force)
return d
#################################
# IFileNode
def get_best_readable_version(self):
"""
I return a Deferred that fires with a MutableFileVersion
representing the best readable version of the file that I
represent
"""
return self.get_readable_version()
def get_readable_version(self, servermap=None, version=None):
"""
I return a Deferred that fires with an MutableFileVersion for my
version argument, if there is a recoverable file of that version
on the grid. If there is no recoverable version, I fire with an
UnrecoverableFileError.
If a servermap is provided, I look in there for the requested
version. If no servermap is provided, I create and update a new
one.
If no version is provided, then I return a MutableFileVersion
representing the best recoverable version of the file.
"""
d = self._get_version_from_servermap(MODE_READ, servermap, version)
def _build_version((servermap, their_version)):
assert their_version in servermap.recoverable_versions()
assert their_version in servermap.make_versionmap()
mfv = MutableFileVersion(self,
servermap,
their_version,
self._storage_index,
self._storage_broker,
self._readkey,
history=self._history)
assert mfv.is_readonly()
mfv.set_downloader_hints(self._downloader_hints)
# our caller can use this to download the contents of the
# mutable file.
return mfv
return d.addCallback(_build_version)
def _get_version_from_servermap(self,
mode,
servermap=None,
version=None):
"""
I return a Deferred that fires with (servermap, version).
This function performs validation and a servermap update. If it
returns (servermap, version), the caller can assume that:
- servermap was last updated in mode.
- version is recoverable, and corresponds to the servermap.
If version and servermap are provided to me, I will validate
that version exists in the servermap, and that the servermap was
updated correctly.
If version is not provided, but servermap is, I will validate
the servermap and return the best recoverable version that I can
find in the servermap.
If the version is provided but the servermap isn't, I will
obtain a servermap that has been updated in the correct mode and
validate that version is found and recoverable.
If neither servermap nor version are provided, I will obtain a
servermap updated in the correct mode, and return the best
recoverable version that I can find in there.
"""
# XXX: wording ^^^^
if servermap and servermap.get_last_update()[0] == mode:
d = defer.succeed(servermap)
else:
d = self._get_servermap(mode)
def _get_version(servermap, v):
if v and v not in servermap.recoverable_versions():
v = None
elif not v:
v = servermap.best_recoverable_version()
if not v:
raise UnrecoverableFileError("no recoverable versions")
return (servermap, v)
return d.addCallback(_get_version, version)
def download_best_version(self, progress=None):
"""
I return a Deferred that fires with the contents of the best
version of this mutable file.
"""
return self._do_serialized(self._download_best_version, progress=progress)
def _download_best_version(self, progress=None):
"""
I am the serialized sibling of download_best_version.
"""
d = self.get_best_readable_version()
d.addCallback(self._record_size)
d.addCallback(lambda version: version.download_to_data(progress=progress))
# It is possible that the download will fail because there
# aren't enough shares to be had. If so, we will try again after
# updating the servermap in MODE_WRITE, which may find more
# shares than updating in MODE_READ, as we just did. We can do
# this by getting the best mutable version and downloading from
# that -- the best mutable version will be a MutableFileVersion
# with a servermap that was last updated in MODE_WRITE, as we
# want. If this fails, then we give up.
def _maybe_retry(failure):
failure.trap(NotEnoughSharesError)
d = self.get_best_mutable_version()
d.addCallback(self._record_size)
d.addCallback(lambda version: version.download_to_data(progress=progress))
return d
d.addErrback(_maybe_retry)
return d
def _record_size(self, mfv):
"""
I record the size of a mutable file version.
"""
self._most_recent_size = mfv.get_size()
return mfv
def get_size_of_best_version(self):
"""
I return the size of the best version of this mutable file.
This is equivalent to calling get_size() on the result of
get_best_readable_version().
"""
d = self.get_best_readable_version()
return d.addCallback(lambda mfv: mfv.get_size())
#################################
# IMutableFileNode
def get_best_mutable_version(self, servermap=None):
"""
I return a Deferred that fires with a MutableFileVersion
representing the best readable version of the file that I
represent. I am like get_best_readable_version, except that I
will try to make a writeable version if I can.
"""
return self.get_mutable_version(servermap=servermap)
def get_mutable_version(self, servermap=None, version=None):
"""
I return a version of this mutable file. I return a Deferred
that fires with a MutableFileVersion
If version is provided, the Deferred will fire with a
MutableFileVersion initailized with that version. Otherwise, it
will fire with the best version that I can recover.
If servermap is provided, I will use that to find versions
instead of performing my own servermap update.
"""
if self.is_readonly():
return self.get_readable_version(servermap=servermap,
version=version)
# get_mutable_version => write intent, so we require that the
# servermap is updated in MODE_WRITE
d = self._get_version_from_servermap(MODE_WRITE, servermap, version)
def _build_version((servermap, smap_version)):
# these should have been set by the servermap update.
assert self._secret_holder
assert self._writekey
mfv = MutableFileVersion(self,
servermap,
smap_version,
self._storage_index,
self._storage_broker,
self._readkey,
self._writekey,
self._secret_holder,
history=self._history)
assert not mfv.is_readonly()
mfv.set_downloader_hints(self._downloader_hints)
return mfv
return d.addCallback(_build_version)
# XXX: I'm uncomfortable with the difference between upload and
# overwrite, which, FWICT, is basically that you don't have to
# do a servermap update before you overwrite. We split them up
# that way anyway, so I guess there's no real difficulty in
# offering both ways to callers, but it also makes the
# public-facing API cluttery, and makes it hard to discern the
# right way of doing things.
# In general, we leave it to callers to ensure that they aren't
# going to cause UncoordinatedWriteErrors when working with
# MutableFileVersions. We know that the next three operations
# (upload, overwrite, and modify) will all operate on the same
# version, so we say that only one of them can be going on at once,
# and serialize them to ensure that that actually happens, since as
# the caller in this situation it is our job to do that.
def overwrite(self, new_contents):
"""
I overwrite the contents of the best recoverable version of this
mutable file with new_contents. This is equivalent to calling
overwrite on the result of get_best_mutable_version with
new_contents as an argument. I return a Deferred that eventually
fires with the results of my replacement process.
"""
# TODO: Update downloader hints.
return self._do_serialized(self._overwrite, new_contents)
def _overwrite(self, new_contents):
"""
I am the serialized sibling of overwrite.
"""
d = self.get_best_mutable_version()
d.addCallback(lambda mfv: mfv.overwrite(new_contents))
d.addCallback(self._did_upload, new_contents.get_size())
return d
def upload(self, new_contents, servermap):
"""
I overwrite the contents of the best recoverable version of this
mutable file with new_contents, using servermap instead of
creating/updating our own servermap. I return a Deferred that
fires with the results of my upload.
"""
# TODO: Update downloader hints
return self._do_serialized(self._upload, new_contents, servermap)
def modify(self, modifier, backoffer=None):
"""
I modify the contents of the best recoverable version of this
mutable file with the modifier. This is equivalent to calling
modify on the result of get_best_mutable_version. I return a
Deferred that eventually fires with an UploadResults instance
describing this process.
"""
# TODO: Update downloader hints.
return self._do_serialized(self._modify, modifier, backoffer)
def _modify(self, modifier, backoffer):
"""
I am the serialized sibling of modify.
"""
d = self.get_best_mutable_version()
d.addCallback(lambda mfv: mfv.modify(modifier, backoffer))
return d
def download_version(self, servermap, version, fetch_privkey=False):
"""
Download the specified version of this mutable file. I return a
Deferred that fires with the contents of the specified version
as a bytestring, or errbacks if the file is not recoverable.
"""
d = self.get_readable_version(servermap, version)
return d.addCallback(lambda mfv: mfv.download_to_data(fetch_privkey))
def get_servermap(self, mode):
"""
I return a servermap that has been updated in mode.
mode should be one of MODE_READ, MODE_WRITE, MODE_CHECK or
MODE_ANYTHING. See servermap.py for more on what these mean.
"""
return self._do_serialized(self._get_servermap, mode)
def _get_servermap(self, mode):
"""
I am a serialized twin to get_servermap.
"""
servermap = ServerMap()
d = self._update_servermap(servermap, mode)
# The servermap will tell us about the most recent size of the
# file, so we may as well set that so that callers might get
# more data about us.
if not self._most_recent_size:
d.addCallback(self._get_size_from_servermap)
return d
def _get_size_from_servermap(self, servermap):
"""
I extract the size of the best version of this file and record
it in self._most_recent_size. I return the servermap that I was
given.
"""
if servermap.recoverable_versions():
v = servermap.best_recoverable_version()
size = v[4] # verinfo[4] == size
self._most_recent_size = size
return servermap
def _update_servermap(self, servermap, mode):
u = ServermapUpdater(self, self._storage_broker, Monitor(), servermap,
mode)
if self._history:
self._history.notify_mapupdate(u.get_status())
return u.update()
#def set_version(self, version):
# I can be set in two ways:
# 1. When the node is created.
# 2. (for an existing share) when the Servermap is updated
# before I am read.
# assert version in (MDMF_VERSION, SDMF_VERSION)
# self._protocol_version = version
def get_version(self):
return self._protocol_version
def _do_serialized(self, cb, *args, **kwargs):
# note: to avoid deadlock, this callable is *not* allowed to invoke
# other serialized methods within this (or any other)
# MutableFileNode. The callable should be a bound method of this same
# MFN instance.
d = defer.Deferred()
self._serializer.addCallback(lambda ignore: cb(*args, **kwargs))
# we need to put off d.callback until this Deferred is finished being
# processed. Otherwise the caller's subsequent activities (like,
# doing other things with this node) can cause reentrancy problems in
# the Deferred code itself
self._serializer.addBoth(lambda res: eventually(d.callback, res))
# add a log.err just in case something really weird happens, because
# self._serializer stays around forever, therefore we won't see the
# usual Unhandled Error in Deferred that would give us a hint.
self._serializer.addErrback(log.err)
return d
def _upload(self, new_contents, servermap):
"""
A MutableFileNode still has to have some way of getting
published initially, which is what I am here for. After that,
all publishing, updating, modifying and so on happens through
MutableFileVersions.
"""
assert self._pubkey, "update_servermap must be called before publish"
# Define IPublishInvoker with a set_downloader_hints method?
# Then have the publisher call that method when it's done publishing?
p = Publish(self, self._storage_broker, servermap)
if self._history:
self._history.notify_publish(p.get_status(),
new_contents.get_size())
d = p.publish(new_contents)
d.addCallback(self._did_upload, new_contents.get_size())
return d
def set_downloader_hints(self, hints):
self._downloader_hints = hints
def _did_upload(self, res, size):
self._most_recent_size = size
return res
class MutableFileVersion:
"""
I represent a specific version (most likely the best version) of a
mutable file.
Since I implement IReadable, instances which hold a
reference to an instance of me are guaranteed the ability (absent
connection difficulties or unrecoverable versions) to read the file
that I represent. Depending on whether I was initialized with a
write capability or not, I may also provide callers the ability to
overwrite or modify the contents of the mutable file that I
reference.
"""
implements(IMutableFileVersion, IWriteable)
def __init__(self,
node,
servermap,
version,
storage_index,
storage_broker,
readcap,
writekey=None,
write_secrets=None,
history=None):
self._node = node
self._servermap = servermap
self._version = version
self._storage_index = storage_index
self._write_secrets = write_secrets
self._history = history
self._storage_broker = storage_broker
#assert isinstance(readcap, IURI)
self._readcap = readcap
self._writekey = writekey
self._serializer = defer.succeed(None)
def get_sequence_number(self):
"""
Get the sequence number of the mutable version that I represent.
"""
return self._version[0] # verinfo[0] == the sequence number
# TODO: Terminology?
def get_writekey(self):
"""
I return a writekey or None if I don't have a writekey.
"""
return self._writekey
def set_downloader_hints(self, hints):
"""
I set the downloader hints.
"""
assert isinstance(hints, dict)
self._downloader_hints = hints
def get_downloader_hints(self):
"""
I return the downloader hints.
"""
return self._downloader_hints
def overwrite(self, new_contents):
"""
I overwrite the contents of this mutable file version with the
data in new_contents.
"""
assert not self.is_readonly()
return self._do_serialized(self._overwrite, new_contents)
def _overwrite(self, new_contents):
assert IMutableUploadable.providedBy(new_contents)
assert self._servermap.get_last_update()[0] == MODE_WRITE
return self._upload(new_contents)
def modify(self, modifier, backoffer=None):
"""I use a modifier callback to apply a change to the mutable file.
I implement the following pseudocode::
obtain_mutable_filenode_lock()
first_time = True
while True:
update_servermap(MODE_WRITE)
old = retrieve_best_version()
new = modifier(old, servermap, first_time)
first_time = False
if new == old: break
try:
publish(new)
except UncoordinatedWriteError, e:
backoffer(e)
continue
break
release_mutable_filenode_lock()
The idea is that your modifier function can apply a delta of some
sort, and it will be re-run as necessary until it succeeds. The
modifier must inspect the old version to see whether its delta has
already been applied: if so it should return the contents unmodified.
Note that the modifier is required to run synchronously, and must not
invoke any methods on this MutableFileNode instance.
The backoff-er is a callable that is responsible for inserting a
random delay between subsequent attempts, to help competing updates
from colliding forever. It is also allowed to give up after a while.
The backoffer is given two arguments: this MutableFileNode, and the
Failure object that contains the UncoordinatedWriteError. It should
return a Deferred that will fire when the next attempt should be
made, or return the Failure if the loop should give up. If
backoffer=None, a default one is provided which will perform
exponential backoff, and give up after 4 tries. Note that the
backoffer should not invoke any methods on this MutableFileNode
instance, and it needs to be highly conscious of deadlock issues.
"""
assert not self.is_readonly()
return self._do_serialized(self._modify, modifier, backoffer)
def _modify(self, modifier, backoffer):
if backoffer is None:
backoffer = BackoffAgent().delay
return self._modify_and_retry(modifier, backoffer, True)
def _modify_and_retry(self, modifier, backoffer, first_time):
"""
I try to apply modifier to the contents of this version of the
mutable file. If I succeed, I return an UploadResults instance
describing my success. If I fail, I try again after waiting for
a little bit.
"""
log.msg("doing modify")
if first_time:
d = self._update_servermap()
else:
# We ran into trouble; do MODE_CHECK so we're a little more
# careful on subsequent tries.
d = self._update_servermap(mode=MODE_CHECK)
d.addCallback(lambda ignored:
self._modify_once(modifier, first_time))
def _retry(f):
f.trap(UncoordinatedWriteError)
# Uh oh, it broke. We're allowed to trust the servermap for our
# first try, but after that we need to update it. It's
# possible that we've failed due to a race with another
# uploader, and if the race is to converge correctly, we
# need to know about that upload.
d2 = defer.maybeDeferred(backoffer, self, f)
d2.addCallback(lambda ignored:
self._modify_and_retry(modifier,
backoffer, False))
return d2
d.addErrback(_retry)
return d
def _modify_once(self, modifier, first_time):
"""
I attempt to apply a modifier to the contents of the mutable
file.
"""
assert self._servermap.get_last_update()[0] != MODE_READ
# download_to_data is serialized, so we have to call this to
# avoid deadlock.
d = self._try_to_download_data()
def _apply(old_contents):
new_contents = modifier(old_contents, self._servermap, first_time)
precondition((isinstance(new_contents, str) or
new_contents is None),
"Modifier function must return a string "
"or None")
if new_contents is None or new_contents == old_contents:
log.msg("no changes")
# no changes need to be made
if first_time:
return
# However, since Publish is not automatically doing a
# recovery when it observes UCWE, we need to do a second
# publish. See #551 for details. We'll basically loop until
# we managed an uncontested publish.
old_uploadable = MutableData(old_contents)
new_contents = old_uploadable
else:
new_contents = MutableData(new_contents)
return self._upload(new_contents)
d.addCallback(_apply)
return d
def is_readonly(self):
"""
I return True if this MutableFileVersion provides no write
access to the file that it encapsulates, and False if it
provides the ability to modify the file.
"""
return self._writekey is None
def is_mutable(self):
"""
I return True, since mutable files are always mutable by
somebody.
"""
return True
def get_storage_index(self):
"""
I return the storage index of the reference that I encapsulate.
"""
return self._storage_index
def get_size(self):
"""
I return the length, in bytes, of this readable object.
"""
return self._servermap.size_of_version(self._version)
def download_to_data(self, fetch_privkey=False, progress=None):
"""
I return a Deferred that fires with the contents of this
readable object as a byte string.
"""
c = consumer.MemoryConsumer(progress=progress)
d = self.read(c, fetch_privkey=fetch_privkey)
d.addCallback(lambda mc: "".join(mc.chunks))
return d
def _try_to_download_data(self):
"""
I am an unserialized cousin of download_to_data; I am called
from the children of modify() to download the data associated
with this mutable version.
"""
c = consumer.MemoryConsumer()
# modify will almost certainly write, so we need the privkey.
d = self._read(c, fetch_privkey=True)
d.addCallback(lambda mc: "".join(mc.chunks))
return d
def read(self, consumer, offset=0, size=None, fetch_privkey=False):
"""
I read a portion (possibly all) of the mutable file that I
reference into consumer.
"""
return self._do_serialized(self._read, consumer, offset, size,
fetch_privkey)
def _read(self, consumer, offset=0, size=None, fetch_privkey=False):
"""
I am the serialized companion of read.
"""
r = Retrieve(self._node, self._storage_broker, self._servermap,
self._version, fetch_privkey)
if self._history:
self._history.notify_retrieve(r.get_status())
d = r.download(consumer, offset, size)
return d
def _do_serialized(self, cb, *args, **kwargs):
# note: to avoid deadlock, this callable is *not* allowed to invoke
# other serialized methods within this (or any other)
# MutableFileNode. The callable should be a bound method of this same
# MFN instance.
d = defer.Deferred()
self._serializer.addCallback(lambda ignore: cb(*args, **kwargs))
# we need to put off d.callback until this Deferred is finished being
# processed. Otherwise the caller's subsequent activities (like,
# doing other things with this node) can cause reentrancy problems in
# the Deferred code itself
self._serializer.addBoth(lambda res: eventually(d.callback, res))
# add a log.err just in case something really weird happens, because
# self._serializer stays around forever, therefore we won't see the
# usual Unhandled Error in Deferred that would give us a hint.
self._serializer.addErrback(log.err)
return d
def _upload(self, new_contents):
#assert self._pubkey, "update_servermap must be called before publish"
p = Publish(self._node, self._storage_broker, self._servermap)
if self._history:
self._history.notify_publish(p.get_status(),
new_contents.get_size())
d = p.publish(new_contents)
d.addCallback(self._did_upload, new_contents.get_size())
return d
def _did_upload(self, res, size):
self._most_recent_size = size
return res
def update(self, data, offset):
"""
Do an update of this mutable file version by inserting data at
offset within the file. If offset is the EOF, this is an append
operation. I return a Deferred that fires with the results of
the update operation when it has completed.
In cases where update does not append any data, or where it does
not append so many blocks that the block count crosses a
power-of-two boundary, this operation will use roughly
O(data.get_size()) memory/bandwidth/CPU to perform the update.
Otherwise, it must download, re-encode, and upload the entire
file again, which will use O(filesize) resources.
"""
return self._do_serialized(self._update, data, offset)
def _update(self, data, offset):
"""
I update the mutable file version represented by this particular
IMutableVersion by inserting the data in data at the offset
offset. I return a Deferred that fires when this has been
completed.
"""
new_size = data.get_size() + offset
old_size = self.get_size()
segment_size = self._version[3]
num_old_segments = mathutil.div_ceil(old_size,
segment_size)
num_new_segments = mathutil.div_ceil(new_size,
segment_size)
log.msg("got %d old segments, %d new segments" % \
(num_old_segments, num_new_segments))
# We do a whole file re-encode if the file is an SDMF file.
if self._version[2]: # version[2] == SDMF salt, which MDMF lacks
log.msg("doing re-encode instead of in-place update")
return self._do_modify_update(data, offset)
# Otherwise, we can replace just the parts that are changing.
log.msg("updating in place")
d = self._do_update_update(data, offset)
d.addCallback(self._decode_and_decrypt_segments, data, offset)
d.addCallback(self._build_uploadable_and_finish, data, offset)
return d
def _do_modify_update(self, data, offset):
"""
I perform a file update by modifying the contents of the file
after downloading it, then reuploading it. I am less efficient
than _do_update_update, but am necessary for certain updates.
"""
def m(old, servermap, first_time):
start = offset
rest = offset + data.get_size()
new = old[:start]
new += "".join(data.read(data.get_size()))
new += old[rest:]
return new
return self._modify(m, None)
def _do_update_update(self, data, offset):
"""
I start the Servermap update that gets us the data we need to
continue the update process. I return a Deferred that fires when
the servermap update is done.
"""
assert IMutableUploadable.providedBy(data)
assert self.is_mutable()
# offset == self.get_size() is valid and means that we are
# appending data to the file.
assert offset <= self.get_size()
segsize = self._version[3]
# We'll need the segment that the data starts in, regardless of
# what we'll do later.
start_segment = offset // segsize
# We only need the end segment if the data we append does not go
# beyond the current end-of-file.
end_segment = start_segment
if offset + data.get_size() < self.get_size():
end_data = offset + data.get_size()
# The last byte we touch is the end_data'th byte, which is actually
# byte end_data - 1 because bytes are zero-indexed.
end_data -= 1
end_segment = end_data // segsize
self._start_segment = start_segment
self._end_segment = end_segment
# Now ask for the servermap to be updated in MODE_WRITE with
# this update range.
return self._update_servermap(update_range=(start_segment,
end_segment))
def _decode_and_decrypt_segments(self, ignored, data, offset):
"""
After the servermap update, I take the encrypted and encoded
data that the servermap fetched while doing its update and
transform it into decoded-and-decrypted plaintext that can be
used by the new uploadable. I return a Deferred that fires with
the segments.
"""
r = Retrieve(self._node, self._storage_broker, self._servermap,
self._version)
# decode: takes in our blocks and salts from the servermap,
# returns a Deferred that fires with the corresponding plaintext
# segments. Does not download -- simply takes advantage of
# existing infrastructure within the Retrieve class to avoid
# duplicating code.
sm = self._servermap
# XXX: If the methods in the servermap don't work as
# abstractions, you should rewrite them instead of going around
# them.
update_data = sm.update_data
start_segments = {} # shnum -> start segment
end_segments = {} # shnum -> end segment
blockhashes = {} # shnum -> blockhash tree
for (shnum, original_data) in update_data.iteritems():
data = [d[1] for d in original_data if d[0] == self._version]
# data is [(blockhashes,start,end)..]
# Every data entry in our list should now be share shnum for
# a particular version of the mutable file, so all of the
# entries should be identical.
datum = data[0]
assert [x for x in data if x != datum] == []
# datum is (blockhashes,start,end)
blockhashes[shnum] = datum[0]
start_segments[shnum] = datum[1] # (block,salt) bytestrings
end_segments[shnum] = datum[2]
d1 = r.decode(start_segments, self._start_segment)
d2 = r.decode(end_segments, self._end_segment)
d3 = defer.succeed(blockhashes)
return deferredutil.gatherResults([d1, d2, d3])
def _build_uploadable_and_finish(self, segments_and_bht, data, offset):
"""
After the process has the plaintext segments, I build the
TransformingUploadable that the publisher will eventually
re-upload to the grid. I then invoke the publisher with that
uploadable, and return a Deferred when the publish operation has
completed without issue.
"""
u = TransformingUploadable(data, offset,
self._version[3],
segments_and_bht[0],
segments_and_bht[1])
p = Publish(self._node, self._storage_broker, self._servermap)
return p.update(u, offset, segments_and_bht[2], self._version)
def _update_servermap(self, mode=MODE_WRITE, update_range=None):
"""
I update the servermap. I return a Deferred that fires when the
servermap update is done.
"""
if update_range:
u = ServermapUpdater(self._node, self._storage_broker, Monitor(),
self._servermap,
mode=mode,
update_range=update_range)
else:
u = ServermapUpdater(self._node, self._storage_broker, Monitor(),
self._servermap,
mode=mode)
return u.update()
| gpl-2.0 |
shinyChen/browserscope | test/test_util.py | 9 | 6660 | #!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Models Unit Tests."""
__author__ = '[email protected] (Lindsey Simon)'
import unittest
import random
import logging
from google.appengine.ext import db
from google.appengine.api import memcache
from django.test.client import Client
from base import util
from categories import all_test_sets
from categories import test_set_params
from models import result
from models.user_agent import UserAgent
import mock_data
import settings
from categories import richtext
class TestHome(unittest.TestCase):
def setUp(self):
self.client = Client()
def testHome(self):
response = self.client.get('/', {}, **mock_data.UNIT_TEST_UA)
self.assertEqual(200, response.status_code)
#def testHomeWithResults(self):
#test_set = mock_data.MockTestSet('cat_home')
#params = {'cat_home_results': 'apple=0,banana=97,coconut=677'}
#response = self.client.get('/', params, **mock_data.UNIT_TEST_UA)
#self.assertEqual(200, response.status_code)
class TestBeacon(unittest.TestCase):
def setUp(self):
self.test_set = mock_data.MockTestSet()
all_test_sets.AddTestSet(self.test_set)
self.client = Client()
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set)
def testBeaconWithoutCsrfToken(self):
params = {}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(403, response.status_code)
def testBeaconWithoutCategory(self):
csrf_token = self.client.get('/get_csrf').content
params = {'results': 'testDisply:200', 'csrf_token': csrf_token}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(util.BAD_BEACON_MSG + 'Category/Results', response.content)
def testBeacon(self):
csrf_token = self.client.get('/get_csrf').content
params = {
'category': self.test_set.category,
'results': 'apple=1,banana=2,coconut=4',
'csrf_token': csrf_token
}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(204, response.status_code)
# Did a ResultParent get created?
query = db.Query(result.ResultParent)
query.filter('category =', self.test_set.category)
result_parent = query.get()
self.assertNotEqual(result_parent, None)
result_times = result_parent.GetResultTimes()
self.assertEqual(
[('apple', 1, False), ('banana', 2, False), ('coconut', 4, False)],
sorted((x.test, x.score, x.dirty) for x in result_times))
def testBeaconWithChromeFrame(self):
csrf_token = self.client.get('/get_csrf').content
chrome_ua_string = ('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) '
'AppleWebKit/530.1 (KHTML, like Gecko) Chrome/4.0.169.1 Safari/530.1')
chrome_frame_ua_string = ('Mozilla/4.0 '
'(compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; '
'chromeframe; '
'.NET CLR 2.0.50727; .NET CLR 1.1.4322; '
'.NET CLR 3.0.04506.648; .NET CLR 3.5.21022)')
unit_test_ua = mock_data.UNIT_TEST_UA
unit_test_ua['HTTP_USER_AGENT'] = chrome_frame_ua_string
params = {
'category': self.test_set.category,
'results': 'apple=0,banana=0,coconut=1000',
'csrf_token': csrf_token,
'js_ua': chrome_ua_string
}
response = self.client.get('/beacon', params, **unit_test_ua)
self.assertEqual(204, response.status_code)
# Did a ResultParent get created?
query = db.Query(result.ResultParent)
query.filter('category =', self.test_set.category)
result_parent = query.get()
self.assertNotEqual(result_parent, None)
# What UA did the ResultParent get tied to? Chrome Frame (IE 7) I hope.
user_agent = result_parent.user_agent
self.assertEqual('Chrome Frame (IE 7) 4.0.169', user_agent.pretty())
# Were ResultTimes created?
result_times = result_parent.GetResultTimes()
self.assertEqual(
[('apple', 0, False), ('banana', 0, False), ('coconut', 1000, False)],
sorted((x.test, x.score, x.dirty) for x in result_times))
def testBeaconWithBogusTests(self):
csrf_token = self.client.get('/get_csrf').content
params = {
'category': self.test_set.category,
'results': 'testBogus=1,testVisibility=2',
'csrf_token': csrf_token
}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(util.BAD_BEACON_MSG + 'ResultParent', response.content)
# Did a ResultParent get created? Shouldn't have.
query = db.Query(result.ResultParent)
query.filter('category =', self.test_set.category)
result_parent = query.get()
self.assertEqual(None, result_parent)
def testBeaconWithoutTestSet(self):
category = 'test_beacon_wo_test_set'
csrf_token = self.client.get('/get_csrf').content
params = {
'category': category,
'results': 'testDisplay=1,testVisibility=2',
'csrf_token': csrf_token
}
response = self.client.get('/beacon', params, **mock_data.UNIT_TEST_UA)
self.assertEqual(util.BAD_BEACON_MSG + 'TestSet', response.content)
class TestUtilFunctions(unittest.TestCase):
def testCheckThrottleIpAddress(self):
ip = mock_data.UNIT_TEST_UA['REMOTE_ADDR']
ua_string = mock_data.UNIT_TEST_UA['HTTP_USER_AGENT']
category = 'foo'
for i in range(11):
self.assertTrue(util.CheckThrottleIpAddress(ip, ua_string, category))
# The next one should bomb.
self.assertFalse(util.CheckThrottleIpAddress(ip, ua_string, category))
# But a new category should work fine.
self.assertTrue(util.CheckThrottleIpAddress(ip, ua_string, 'bar'))
class TestClearMemcache(unittest.TestCase):
def setUp(self):
self.client = Client()
def testClearMemcacheRecentTests(self):
memcache.set(util.RECENT_TESTS_MEMCACHE_KEY, 'foo')
params = {'recent': 1}
response = self.client.get('/clear_memcache', params)
recent_tests = memcache.get(util.RECENT_TESTS_MEMCACHE_KEY)
self.assertEqual(None, recent_tests)
self.assertEqual(200, response.status_code)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/calculus/tests/test_finite_diff.py | 18 | 7438 | from sympy import S, symbols, Function
from sympy.calculus.finite_diff import (
apply_finite_diff, finite_diff_weights, as_finite_diff
)
def test_apply_finite_diff():
x, h = symbols('x h')
f = Function('f')
assert (apply_finite_diff(1, [x-h, x+h], [f(x-h), f(x+h)], x) -
(f(x+h)-f(x-h))/(2*h)).simplify() == 0
assert (apply_finite_diff(1, [5, 6, 7], [f(5), f(6), f(7)], 5) -
(-S(3)/2*f(5) + 2*f(6) - S(1)/2*f(7))).simplify() == 0
def test_finite_diff_weights():
d = finite_diff_weights(1, [5, 6, 7], 5)
assert d[1][2] == [-S(3)/2, 2, -S(1)/2]
# Table 1, p. 702 in doi:10.1090/S0025-5718-1988-0935077-0
# --------------------------------------------------------
# x = [[0], [-1, 0, 1], ...]
xl = [[j for j in range(-i, i+1)] for i in range(0, 5)]
# d holds all coefficients
d = [finite_diff_weights({0: 0, 1: 2, 2: 4, 3: 4, 4: 4}[i],
xl[i], 0) for i in range(5)]
# Zeroeth derivative
assert d[0][0][0] == [S(1)]
# First derivative
assert d[1][1][2] == [-S(1)/2, S(0), S(1)/2]
assert d[2][1][4] == [S(1)/12, -S(2)/3, S(0), S(2)/3, -S(1)/12]
assert d[3][1][6] == [-S(1)/60, S(3)/20, -S(3)/4, S(0), S(3)/4, -S(3)/20,
S(1)/60]
assert d[4][1][8] == [S(1)/280, -S(4)/105, S(1)/5, -S(4)/5, S(0), S(4)/5,
-S(1)/5, S(4)/105, -S(1)/280]
# Second derivative
assert d[1][2][2] == [S(1), -S(2), S(1)]
assert d[2][2][4] == [-S(1)/12, S(4)/3, -S(5)/2, S(4)/3, -S(1)/12]
assert d[3][2][6] == [S(1)/90, -S(3)/20, S(3)/2, -S(49)/18, S(3)/2,
-S(3)/20, S(1)/90]
assert d[4][2][8] == [-S(1)/560, S(8)/315, -S(1)/5, S(8)/5, -S(205)/72,
S(8)/5, -S(1)/5, S(8)/315, -S(1)/560]
# Third derivative
assert d[2][3][4] == [-S(1)/2, S(1), S(0), -S(1), S(1)/2]
assert d[3][3][6] == [S(1)/8, -S(1), S(13)/8, S(0), -S(13)/8, S(1),
-S(1)/8]
assert d[4][3][8] == [-S(7)/240, S(3)/10, -S(169)/120, S(61)/30, S(0),
-S(61)/30, S(169)/120, -S(3)/10, S(7)/240]
# Fourth derivative
assert d[2][4][4] == [S(1), -S(4), S(6), -S(4), S(1)]
assert d[3][4][6] == [-S(1)/6, S(2), -S(13)/2, S(28)/3, -S(13)/2, S(2),
-S(1)/6]
assert d[4][4][8] == [S(7)/240, -S(2)/5, S(169)/60, -S(122)/15, S(91)/8,
-S(122)/15, S(169)/60, -S(2)/5, S(7)/240]
# Table 2, p. 703 in doi:10.1090/S0025-5718-1988-0935077-0
# --------------------------------------------------------
xl = [[j/S(2) for j in list(range(-i*2+1, 0, 2))+list(range(1, i*2+1, 2))]
for i in range(1, 5)]
# d holds all coefficients
d = [finite_diff_weights({0: 1, 1: 2, 2: 4, 3: 4}[i], xl[i], 0) for
i in range(4)]
# Zeroth derivative
assert d[0][0][1] == [S(1)/2, S(1)/2]
assert d[1][0][3] == [-S(1)/16, S(9)/16, S(9)/16, -S(1)/16]
assert d[2][0][5] == [S(3)/256, -S(25)/256, S(75)/128, S(75)/128,
-S(25)/256, S(3)/256]
assert d[3][0][7] == [-S(5)/2048, S(49)/2048, -S(245)/2048, S(1225)/2048,
S(1225)/2048, -S(245)/2048, S(49)/2048, -S(5)/2048]
# First derivative
assert d[0][1][1] == [-S(1), S(1)]
assert d[1][1][3] == [S(1)/24, -S(9)/8, S(9)/8, -S(1)/24]
assert d[2][1][5] == [-S(3)/640, S(25)/384, -S(75)/64, S(75)/64,
-S(25)/384, S(3)/640]
assert d[3][1][7] == [S(5)/7168, -S(49)/5120, S(245)/3072, S(-1225)/1024,
S(1225)/1024, -S(245)/3072, S(49)/5120, -S(5)/7168]
# Reasonably the rest of the table is also correct... (testing of that
# deemed excessive at the moment)
def test_as_finite_diff():
x, h = symbols('x h')
f = Function('f')
# Central 1st derivative at gridpoint
assert (as_finite_diff(f(x).diff(x), [x-2, x-1, x, x+1, x+2]) -
(S(1)/12*(f(x-2)-f(x+2)) + S(2)/3*(f(x+1)-f(x-1)))).simplify() == 0
# Central 1st derivative "half-way"
assert (as_finite_diff(f(x).diff(x)) -
(f(x + S(1)/2)-f(x - S(1)/2))).simplify() == 0
assert (as_finite_diff(f(x).diff(x), h) -
(f(x + h/S(2))-f(x - h/S(2)))/h).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x - 3*h, x-h, x+h, x + 3*h]) -
(S(9)/(8*2*h)*(f(x+h) - f(x-h)) +
S(1)/(24*2*h)*(f(x - 3*h) - f(x + 3*h)))).simplify() == 0
# One sided 1st derivative at gridpoint
assert (as_finite_diff(f(x).diff(x), [0, 1, 2], 0) -
(-S(3)/2*f(0) + 2*f(1) - f(2)/2)).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x, x+h], x) -
(f(x+h) - f(x))/h).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x-h, x, x+h], x-h) -
(-S(3)/(2*h)*f(x-h) + 2/h*f(x) -
S(1)/(2*h)*f(x+h))).simplify() == 0
# One sided 1st derivative "half-way"
assert (as_finite_diff(f(x).diff(x), [x-h, x+h, x + 3*h, x + 5*h, x + 7*h])
- 1/(2*h)*(-S(11)/(12)*f(x-h) + S(17)/(24)*f(x+h)
+ S(3)/8*f(x + 3*h) - S(5)/24*f(x + 5*h)
+ S(1)/24*f(x + 7*h))).simplify() == 0
# Central 2nd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 2), [x-h, x, x+h]) -
h**-2 * (f(x-h) + f(x+h) - 2*f(x))).simplify() == 0
assert (as_finite_diff(f(x).diff(x, 2), [x - 2*h, x-h, x, x+h, x + 2*h]) -
h**-2 * (-S(1)/12*(f(x - 2*h) + f(x + 2*h)) +
S(4)/3*(f(x+h) + f(x-h)) - S(5)/2*f(x))).simplify() == 0
# Central 2nd derivative "half-way"
assert (as_finite_diff(f(x).diff(x, 2), [x - 3*h, x-h, x+h, x + 3*h]) -
(2*h)**-2 * (S(1)/2*(f(x - 3*h) + f(x + 3*h)) -
S(1)/2*(f(x+h) + f(x-h)))).simplify() == 0
# One sided 2nd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 2), [x, x+h, x + 2*h, x + 3*h]) -
h**-2 * (2*f(x) - 5*f(x+h) +
4*f(x+2*h) - f(x+3*h))).simplify() == 0
# One sided 2nd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 2), [x-h, x+h, x + 3*h, x + 5*h]) -
(2*h)**-2 * (S(3)/2*f(x-h) - S(7)/2*f(x+h) + S(5)/2*f(x + 3*h) -
S(1)/2*f(x + 5*h))).simplify() == 0
# Central 3rd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 3)) -
(-f(x - 3/S(2)) + 3*f(x - 1/S(2)) -
3*f(x + 1/S(2)) + f(x + 3/S(2)))).simplify() == 0
assert (as_finite_diff(
f(x).diff(x, 3), [x - 3*h, x - 2*h, x-h, x, x+h, x + 2*h, x + 3*h]) -
h**-3 * (S(1)/8*(f(x - 3*h) - f(x + 3*h)) - f(x - 2*h) +
f(x + 2*h) + S(13)/8*(f(x-h) - f(x+h)))).simplify() == 0
# Central 3rd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 3), [x - 3*h, x-h, x+h, x + 3*h]) -
(2*h)**-3 * (f(x + 3*h)-f(x - 3*h) +
3*(f(x-h)-f(x+h)))).simplify() == 0
# One sided 3rd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 3), [x, x+h, x + 2*h, x + 3*h]) -
h**-3 * (f(x + 3*h)-f(x) + 3*(f(x+h)-f(x + 2*h)))).simplify() == 0
# One sided 3rd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 3), [x-h, x+h, x + 3*h, x + 5*h]) -
(2*h)**-3 * (f(x + 5*h)-f(x-h) +
3*(f(x+h)-f(x + 3*h)))).simplify() == 0
| mit |
TamiaLab/carnetdumaker | apps/bugtracker/tests/test_context_processors.py | 1 | 2920 | """
Tests suite for the context processors of the bug tracker app.
"""
from django.test import SimpleTestCase
from django.http import HttpRequest
from ..context_processors import bugtracker
from ..constants import (STATUS_OPEN,
STATUS_NEED_DETAILS,
STATUS_CONFIRMED,
STATUS_WORKING_ON,
STATUS_DEFERRED,
STATUS_DUPLICATE,
STATUS_WONT_FIX,
STATUS_CLOSED,
STATUS_FIXED)
from ..constants import (PRIORITY_GODZILLA,
PRIORITY_CRITICAL,
PRIORITY_MAJOR,
PRIORITY_MINOR,
PRIORITY_TRIVIAL,
PRIORITY_NEED_REVIEW,
PRIORITY_FEATURE,
PRIORITY_WISHLIST,
PRIORITY_INVALID,
PRIORITY_NOT_MY_FAULT)
from ..constants import (DIFFICULTY_DESIGN_ERRORS,
DIFFICULTY_IMPORTANT,
DIFFICULTY_NORMAL,
DIFFICULTY_LOW_IMPACT,
DIFFICULTY_OPTIONAL)
class BugTrackerContextProcessorTestCase(SimpleTestCase):
"""
Tests case for the context processor.
"""
def test_bugtracker_context_update(self):
"""
Test if the ``bugtracker`` context processor add the constants into the context.
"""
request = HttpRequest()
result = bugtracker(request)
self.assertEqual(result, {
'BUGTRACKER_STATUS': {
'OPEN': STATUS_OPEN,
'NEED_DETAILS': STATUS_NEED_DETAILS,
'CONFIRMED': STATUS_CONFIRMED,
'WORKING_ON': STATUS_WORKING_ON,
'DEFERRED': STATUS_DEFERRED,
'DUPLICATE': STATUS_DUPLICATE,
'WONT_FIX': STATUS_WONT_FIX,
'CLOSED': STATUS_CLOSED,
'FIXED': STATUS_FIXED,
},
'BUGTRACKER_PRIORITY': {
'GODZILLA': PRIORITY_GODZILLA,
'CRITICAL': PRIORITY_CRITICAL,
'MAJOR': PRIORITY_MAJOR,
'MINOR': PRIORITY_MINOR,
'TRIVIAL': PRIORITY_TRIVIAL,
'NEED_REVIEW': PRIORITY_NEED_REVIEW,
'FEATURE': PRIORITY_FEATURE,
'WISHLIST': PRIORITY_WISHLIST,
'INVALID': PRIORITY_INVALID,
'NOT_MY_FAULT': PRIORITY_NOT_MY_FAULT,
},
'BUGTRACKER_DIFFICULTY': {
'DESIGN_ERRORS': DIFFICULTY_DESIGN_ERRORS,
'IMPORTANT': DIFFICULTY_IMPORTANT,
'NORMAL': DIFFICULTY_NORMAL,
'LOW_IMPACT': DIFFICULTY_LOW_IMPACT,
'OPTIONAL': DIFFICULTY_OPTIONAL,
},
})
| agpl-3.0 |
ccarrascal/drupal-commerce | sites/all/libraries/elfinder/src/connectors/python/connector.py | 74 | 2687 | #!/usr/bin/env python
import cgi
try:
import json
except ImportError:
import simplejson as json
import elFinder
# configure connector options
opts = {
#'root': '/home/troex/Sites/git/elfinder/files',
'root': '../git/elfinder/files/',
'URL': 'http://localhost:8001/~troex/git/elfinder/files',
## other options
'debug': True,
'fileURL': True, # download files using connector, no direct urls to files
# 'dirSize': True,
# 'dotFiles': True,
# 'perms': {
# 'backup': {
# 'read': True,
# 'write': False,
# 'rm': False
# },
# '^/pics': {
# 'read': True,
# 'write': False,
# 'rm': False
# }
# },
# 'uploadDeny': ['image', 'application'],
# 'uploadAllow': ['image/png', 'image/jpeg'],
# 'uploadOrder': ['deny', 'allow']
# 'disabled': ['rename', 'quicklook', 'upload']
}
# init connector and pass options
elf = elFinder.connector(opts)
# fetch only needed GET/POST parameters
httpRequest = {}
form = cgi.FieldStorage()
for field in elf.httpAllowedParameters:
if field in form:
httpRequest[field] = form.getvalue(field)
if field == 'upload[]':
upFiles = {}
cgiUploadFiles = form['upload[]']
for up in cgiUploadFiles:
if up.filename:
upFiles[up.filename] = up.file # pack dict(filename: filedescriptor)
httpRequest['upload[]'] = upFiles
# run connector with parameters
status, header, response = elf.run(httpRequest)
# get connector output and print it out
# code below is tested with apache only (maybe other server need other method?)
if status == 200:
print 'Status: 200'
elif status == 403:
print 'Status: 403'
elif status == 404:
print 'Status: 404'
if len(header) >= 1:
for h, v in header.iteritems():
print h + ': ' + v
print
if not response is None and status == 200:
# send file
if 'file' in response and isinstance(response['file'], file):
print response['file'].read()
response['file'].close()
# output json
else:
print json.dumps(response, indent = True)
## logging
#import sys
#log = open('/home/troex/Sites/git/elfinder/files/out.log', 'w')
#print >>log, 'FORM: ', form
#log.close()
## another aproach
## get connector output and print it out
#if elf.httpStatusCode == 200:
# print 'HTTP/1.1 200 OK'
#elif elf.httpStatusCode == 403:
# print 'HTTP/1.x 403 Access Denied'
#elif elf.httpStatusCode == 404:
# print 'HTTP/1.x 404 Not Found'
#
#if len(elf.httpHeader) >= 1:
# for header, value in elf.httpHeader.iteritems():
# print header + ': ' + value
# print
#
#if not elf.httpResponse is None:
# if isinstance(elf.httpResponse['file'], file):
# print elf.httpResponse['file'].read()
# elf.httpResponse['file'].close()
# else:
# print json.dumps(elf.httpResponse, indent = True)
#
| gpl-2.0 |
Ensembles/ert | python/python/ert/enkf/plot_data/ensemble_plot_gen_kw_vector.py | 2 | 1627 | # Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'ensemble_plot_gen_kw_vector.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from ert.enkf import EnkfPrototype
class EnsemblePlotGenKWVector(BaseCClass):
TYPE_NAME = "ensemble_plot_gen_kw_vector"
_size = EnkfPrototype("int enkf_plot_gen_kw_vector_get_size(ensemble_plot_gen_kw_vector)")
_get_value = EnkfPrototype("double enkf_plot_gen_kw_vector_iget(ensemble_plot_gen_kw_vector, int)")
def __init__(self):
raise NotImplementedError("Class can not be instantiated directly!")
def __len__(self):
""" @rtype: int """
return self._size()
def getValue(self, index):
""" @rtype: float """
return self[index]
def __iter__(self):
cur = 0
while cur < len(self):
yield self[cur]
cur += 1
def __getitem__(self, index):
""" @rtype: float """
return self._get_value(index)
def __repr__(self):
return 'EnsemblePlotGenKWVector(size = %d) %s' % (len(self), self._ad_str())
| gpl-3.0 |
nrwahl2/ansible | lib/ansible/modules/network/cloudengine/ce_vrf_interface.py | 27 | 15550 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_vrf_interface
version_added: "2.4"
short_description: Manages interface specific VPN configuration on HUAWEI CloudEngine switches.
description:
- Manages interface specific VPN configuration of HUAWEI CloudEngine switches.
author: Zhijin Zhou (@CloudEngine-Ansible)
notes:
- Ensure that a VPN instance has been created and the IPv4 address family has been enabled for the VPN instance.
options:
vrf:
description:
- VPN instance, the length of vrf name is 1 ~ 31, i.e. "test", but can not be C(_public_).
required: true
vpn_interface:
description:
- An interface that can binding VPN instance, i.e. 40GE1/0/22, Vlanif10.
Must be fully qualified interface name.
Interface types, such as 10GE, 40GE, 100GE, LoopBack, MEth, Tunnel, Vlanif....
required: true
state:
description:
- Manage the state of the resource.
required: false
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- name: VRF interface test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure a VPN instance for the interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: present
provider: "{{ cli }}"
- name: "Disable the association between a VPN instance and an interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {
"state": "present",
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
existing:
description: k/v pairs of existing attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": null
}
end_state:
description: k/v pairs of end attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"ip binding vpn-instance jss",
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import ce_argument_spec, get_nc_config, set_nc_config
CE_NC_GET_VRF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_GET_VRF_INTERFACE = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName></vrfName>
<l3vpnIfs>
<l3vpnIf>
<ifName></ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_MERGE_VRF_INTERFACE = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="merge">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_DEL_INTF_VPN = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="delete">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class VrfInterface(object):
"""Manange vpn instance"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# vpn instance info
self.vrf = self.module.params['vrf']
self.vpn_interface = self.module.params['vpn_interface']
self.vpn_interface = self.vpn_interface.upper().replace(' ', '')
self.state = self.module.params['state']
self.intf_info = dict()
self.intf_info['isL2SwitchPort'] = None
self.intf_info['vrfName'] = None
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init_module"""
required_one_of = [("vrf", "vpn_interface")]
self.module = AnsibleModule(
argument_spec=self.spec, required_one_of=required_one_of, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_update_cmd(self):
""" get updated command"""
if self.conf_exist:
return
if self.state == 'absent':
self.updates_cmd.append(
"undo ip binding vpn-instance %s" % self.vrf)
return
if self.vrf != self.intf_info['vrfName']:
self.updates_cmd.append("ip binding vpn-instance %s" % self.vrf)
return
def check_params(self):
"""Check all input params"""
if not self.is_vrf_exist():
self.module.fail_json(
msg='Error: The VPN instance is not existed.')
if self.state == 'absent':
if self.vrf != self.intf_info['vrfName']:
self.module.fail_json(
msg='Error: The VPN instance is not bound to the interface.')
if self.intf_info['isL2SwitchPort'] == 'true':
self.module.fail_json(
msg='Error: L2Switch Port can not binding a VPN instance.')
# interface type check
if self.vpn_interface:
intf_type = get_interface_type(self.vpn_interface)
if not intf_type:
self.module.fail_json(
msg='Error: interface name of %s'
' is error.' % self.vpn_interface)
# vrf check
if self.vrf == '_public_':
self.module.fail_json(
msg='Error: The vrf name _public_ is reserved.')
if len(self.vrf) < 1 or len(self.vrf) > 31:
self.module.fail_json(
msg='Error: The vrf name length must be between 1 and 31.')
def get_interface_vpn_name(self, vpninfo, vpn_name):
""" get vpn instance name"""
l3vpn_if = vpninfo.findall("l3vpnIf")
for l3vpn_ifinfo in l3vpn_if:
for ele in l3vpn_ifinfo:
if ele.tag in ['ifName']:
if ele.text == self.vpn_interface:
self.intf_info['vrfName'] = vpn_name
def get_interface_vpn(self):
""" get the VPN instance associated with the interface"""
xml_str = CE_NC_GET_VRF_INTERFACE
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get global vrf interface info
root = ElementTree.fromstring(xml_str)
vpns = root.findall(
"data/l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance")
if vpns:
for vpnele in vpns:
vpn_name = None
for vpninfo in vpnele:
if vpninfo.tag == 'vrfName':
vpn_name = vpninfo.text
if vpninfo.tag == 'l3vpnIfs':
self.get_interface_vpn_name(vpninfo, vpn_name)
return
def is_vrf_exist(self):
""" judge whether the VPN instance is existed"""
conf_str = CE_NC_GET_VRF % self.vrf
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return False
return True
def get_intf_conf_info(self):
""" get related configuration of the interface"""
conf_str = CE_NC_GET_INTF % self.vpn_interface
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return
# get interface base info
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
interface = root.find("data/ifm/interfaces/interface")
if interface:
for eles in interface:
if eles.tag in ["isL2SwitchPort"]:
self.intf_info[eles.tag] = eles.text
self.get_interface_vpn()
return
def get_existing(self):
"""get existing config"""
self.existing = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def get_proposed(self):
"""get_proposed"""
self.proposed = dict(vrf=self.vrf,
vpn_interface=self.vpn_interface,
state=self.state)
def get_end_state(self):
"""get_end_state"""
self.intf_info['vrfName'] = None
self.get_intf_conf_info()
self.end_state = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def show_result(self):
""" show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
""" judge whether configuration has existed"""
if self.state == 'absent':
return False
delta = set(self.proposed.items()).difference(
self.existing.items())
delta = dict(delta)
if len(delta) == 1 and delta['state']:
return True
return False
def config_interface_vrf(self):
""" configure VPN instance of the interface"""
if not self.conf_exist and self.state == 'present':
xml_str = CE_NC_MERGE_VRF_INTERFACE % (
self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "VRF_INTERFACE_CONFIG")
self.changed = True
elif self.state == 'absent':
xml_str = CE_NC_DEL_INTF_VPN % (self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "DEL_VRF_INTERFACE_CONFIG")
self.changed = True
def work(self):
"""excute task"""
self.get_intf_conf_info()
self.check_params()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_interface_vrf()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""main"""
argument_spec = dict(
vrf=dict(required=True, type='str'),
vpn_interface=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
vrf_intf = VrfInterface(argument_spec)
vrf_intf.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
codedsk/hubcheck-hubzero-tests | hchztests/tests/test_website_support_need_help.py | 1 | 7124 | import pytest
import sys
import os
import re
import hubcheck
pytestmark = [ pytest.mark.website,
pytest.mark.tickets,
pytest.mark.need_help,
pytest.mark.reboot,
pytest.mark.upgrade,
pytest.mark.prod_safe_upgrade
]
class TestNeedHelp(hubcheck.testcase.TestCase2):
def setup_method(self,method):
# setup a web browser
self.browser.get(self.https_authority)
# get user account info
self.username,self.password = \
self.testdata.find_account_for('ticketsubmitter')
self.adminuser,self.adminpass = \
self.testdata.find_account_for('ticketmanager')
self.ticket_number = None
def teardown_method(self,method):
# if we created a ticket, delete the ticket
if self.ticket_number is not None \
and (self.adminuser != "") \
and (self.adminpass != ""):
try:
self.utils.account.logout()
except:
pass
self.utils.account.login_as(self.adminuser,self.adminpass)
self.utils.support.close_support_ticket_invalid(self.ticket_number)
def test_link_exists(self):
"""
click the need help link, to see if the widget exists
"""
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
po.close()
@pytest.mark.nt
def test_link_changes_webpage(self):
"""
click the need help link, check if the url changes
"""
po = self.catalog.load_pageobject('GenericPage')
start_url = po.current_url()
po.toggle_needhelp()
end_url = po.current_url()
assert start_url == end_url, "clicking the 'Need Help?' link" \
+ " changed the web page from %s to %s" % (start_url,end_url)
def test_if_link_leads_to_support_url(self):
"""
open the "Need Help?" dialogue to ensure it does not lead to
/support
Sometime found when javascript is turned off, but if javascript
is on, clicking this link should not send the user to the
/support webpage.
"""
# store the start and end page url's for comparison
# click the needhelp link and see if it takes us to /support
po = self.catalog.load_pageobject('SupportNeedHelpPage')
startpageurl = po.current_url()
po.open()
endpageurl = po.current_url()
assert startpageurl == endpageurl, \
"User was redirected to %s\n" % endpageurl
# FIXME: use urlparse here
# create a pattern for a url regular expression
p = re.compile('(([^:]+)://)?([^:/]+)(:([0-9]+))?(/.*)?')
(junk, junk, junk, junk, junk, path) = p.search(endpageurl).groups()
# check that the page we were taken to is not /support
s = "pageurl = %s\npath = %s\n" % (endpageurl,path)
assert path != '/support', s
def test_submit_ticket_logged_in_using_need_help_link(self):
"""
login to the website as the "ticket submitter" and submit a
ticket using the need help link.
"""
problem = 'hubcheck test ticket\n%s' % (self.fnbase)
# login to the website and click the need help link
self.utils.account.login_as(self.username,self.password)
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
# fill in the trouble report
# username, name, and email fields are
# not accessible while logged in
self.ticket_number = po.submit_ticket({'problem':problem})
# check if the ticket number is a valid number
assert self.ticket_number is not None, "no ticket number returned"
assert re.match('\d+',self.ticket_number) is not None, \
"cound not find a matching ticket number in '%s'" \
% (self.ticket_number)
# convert to a number and ensure it is not ticket #0
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
@pytest.mark.captcha
def test_submit_ticket_logged_out_using_need_help_link(self):
"""
submit a support ticket using the need help link while not
logged into the website.
"""
# data for trouble report
data = {
'name' : 'hubcheck testuser',
'email' : '[email protected]',
'problem' : 'hubcheck test ticket\n%s' % (self.fnbase),
'captcha' : True,
}
# navigate to the SupportNeedHelp Page:
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
# fill in the trouble report
# username is optional
self.ticket_number = po.submit_ticket(data)
# check if the ticket number is a valid number
assert self.ticket_number is not None, \
"no ticket number returned"
assert re.match('\d+',self.ticket_number) is not None, \
"cound not find a matching ticket number in '%s'" \
% (self.ticket_number)
# convert to a number and ensure it is not ticket #0
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
@pytest.mark.tickets_attach_jpg
def test_attaching_jpg_image_to_ticket_submitted_through_need_help(self):
"""
Login to the website and submit a ticket, using the need help
link, with an attached jpeg image.
"""
problem = 'hubcheck test ticket\nattaching jpg image\n%s' \
% (self.fnbase)
uploadfilename = 'app2.jpg'
uploadfilepath = os.path.join(self.datadir,'images',uploadfilename)
data = {
'problem' : problem,
'upload' : uploadfilepath,
}
# login to the website and navigate to the need help form
self.utils.account.login_as(self.username,self.password)
po = self.catalog.load_pageobject('SupportNeedHelpPage')
# po.open()
po.needhelplink.click()
# submit a trouble report
# username, name, and email fields are not accessible
self.ticket_number = po.submit_ticket(data)
assert self.ticket_number is not None, "no ticket number returned"
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
po.goto_ticket()
po = self.catalog.load_pageobject('SupportTicketViewPage')
content = po.get_ticket_content()
imgsrc = content.download_image(uploadfilename)
# not sure how to really download image files yet.
# so we assume that as long as opening the image didn't
# cause an error, the test passed.
assert re.search(uploadfilename,imgsrc) is not None, \
"After uploading an image to support ticket" \
+ " #%s, could not download image %s" \
% (self.ticket_number,uploadfilename)
| mit |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/unit/ec2/test_address.py | 109 | 5950 | from tests.compat import mock, unittest
from boto.ec2.address import Address
class AddressTest(unittest.TestCase):
def setUp(self):
self.address = Address()
self.address.connection = mock.Mock()
self.address.public_ip = "192.168.1.1"
def check_that_attribute_has_been_set(self, name, value, attribute):
self.address.endElement(name, value, None)
self.assertEqual(getattr(self.address, attribute), value)
def test_endElement_sets_correct_attributes_with_values(self):
for arguments in [("publicIp", "192.168.1.1", "public_ip"),
("instanceId", 1, "instance_id"),
("domain", "some domain", "domain"),
("allocationId", 1, "allocation_id"),
("associationId", 1, "association_id"),
("somethingRandom", "somethingRandom", "somethingRandom")]:
self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
def test_release_calls_connection_release_address_with_correct_args(self):
self.address.release()
self.address.connection.release_address.assert_called_with(
public_ip="192.168.1.1",
dry_run=False
)
def test_associate_calls_connection_associate_address_with_correct_args(self):
self.address.associate(1)
self.address.connection.associate_address.assert_called_with(
instance_id=1,
public_ip="192.168.1.1",
allow_reassociation=False,
network_interface_id=None,
private_ip_address=None,
dry_run=False
)
def test_disassociate_calls_connection_disassociate_address_with_correct_args(self):
self.address.disassociate()
self.address.connection.disassociate_address.assert_called_with(
public_ip="192.168.1.1",
dry_run=False
)
class AddressWithAllocationTest(unittest.TestCase):
def setUp(self):
self.address = Address()
self.address.connection = mock.Mock()
self.address.public_ip = "192.168.1.1"
self.address.allocation_id = "aid1"
def check_that_attribute_has_been_set(self, name, value, attribute):
self.address.endElement(name, value, None)
self.assertEqual(getattr(self.address, attribute), value)
def test_endElement_sets_correct_attributes_with_values(self):
for arguments in [("publicIp", "192.168.1.1", "public_ip"),
("instanceId", 1, "instance_id"),
("domain", "some domain", "domain"),
("allocationId", 1, "allocation_id"),
("associationId", 1, "association_id"),
("somethingRandom", "somethingRandom", "somethingRandom")]:
self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
def test_release_calls_connection_release_address_with_correct_args(self):
self.address.release()
self.address.connection.release_address.assert_called_with(
allocation_id="aid1",
dry_run=False
)
def test_associate_calls_connection_associate_address_with_correct_args(self):
self.address.associate(1)
self.address.connection.associate_address.assert_called_with(
instance_id=1,
public_ip="192.168.1.1",
allocation_id="aid1",
network_interface_id=None,
private_ip_address=None,
allow_reassociation=False,
dry_run=False
)
def test_disassociate_calls_connection_disassociate_address_with_correct_args(self):
self.address.disassociate()
self.address.connection.disassociate_address.assert_called_with(
public_ip="192.168.1.1",
dry_run=False
)
class AddressWithNetworkInterfaceTest(unittest.TestCase):
def setUp(self):
self.address = Address()
self.address.connection = mock.Mock()
self.address.public_ip = "192.168.1.1"
self.address.allocation_id = "aid1"
def check_that_attribute_has_been_set(self, name, value, attribute):
self.address.endElement(name, value, None)
self.assertEqual(getattr(self.address, attribute), value)
def test_endElement_sets_correct_attributes_with_values(self):
for arguments in [("publicIp", "192.168.1.1", "public_ip"),
("instanceId", 1, "instance_id"),
("domain", "some domain", "domain"),
("allocationId", 1, "allocation_id"),
("associationId", 1, "association_id"),
("somethingRandom", "somethingRandom", "somethingRandom")]:
self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
def test_release_calls_connection_release_address_with_correct_args(self):
self.address.release()
self.address.connection.release_address.assert_called_with(
allocation_id="aid1",
dry_run=False
)
def test_associate_calls_connection_associate_address_with_correct_args(self):
self.address.associate(network_interface_id=1)
self.address.connection.associate_address.assert_called_with(
instance_id=None,
public_ip="192.168.1.1",
network_interface_id=1,
private_ip_address=None,
allocation_id="aid1",
allow_reassociation=False,
dry_run=False
)
def test_disassociate_calls_connection_disassociate_address_with_correct_args(self):
self.address.disassociate()
self.address.connection.disassociate_address.assert_called_with(
public_ip="192.168.1.1",
dry_run=False
)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
YongseopKim/crosswalk-test-suite | tools/apkanalyser/comm.py | 3 | 2140 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: [email protected]
import os, sys
import re, codecs
import shutil, glob
def _find(pathname, matchFunc=os.path.isfile):
for dirname in sys.path:
candidate = os.path.join(dirname, pathname)
if matchFunc(candidate):
return candidate
def mk_dir(path):
if not find_dir(path):
os.mkdir(path)
def find_file(pathname):
return _find(pathname)
def find_dir(path):
return _find(path, matchFunc=os.path.isdir)
def find_glob_path(filepath):
return glob.glob(filepath)
| bsd-3-clause |
jasper-meyer/Platformer | platformer.py | 1 | 3751 | """
platformer.py
Author: Jasper Meyer
Credit: You, the internet, Brendan
Assignment:
Write and submit a program that implements the sandbox platformer game:
https://github.com/HHS-IntroProgramming/Platformer
"""
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
SCREEN_WIDTH = 1080
SCREEN_HEIGHT = 720
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
black = Color(0, 1)
backcol = Color(0xd9ffcc, 1.0)
purp = Color(0x9900cc, 1.0)
blue = Color(0x3399ff,1.0)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, backcol)
bg = Sprite(bg_asset, (0,0))
thinline = LineStyle(1, black)
sq = RectangleAsset (75,75, noline, black)
wub=0
pup=0
mousex=0
mousey=0
mousexround=0
mouseyround=0
play = RectangleAsset (25,50, noline, purp)
spr = RectangleAsset (20,10, noline, blue)
vy=0
player=0
acc = 0
ti = 0
rupx=0
lupx=0
vx=0
up=0
upup=0
stop = 0
shutup=0
spring = 0
sub = 0
springlist = []
def wup(event):
global wub
global mousexround
global mouseyround
wub = 1
if wub == 1:
mousexround=mousex-((mousex)%75)
mouseyround=mousey-((mousey)%75)
block = Sprite (sq, (mousexround, mouseyround))
def mousemo(event):
global mousex
global mousey
mousex=event.x
mousey=event.y
def spri(event):
global spring
global mousex
global mousey
global mouseyround
global sub
global springlist
sub =1
if sub == 1:
mouseyround=mousey-((mousey)%75)+65
springlist.append (Sprite (spr, (mousex, mouseyround)))
def pup(event):
global pub
global mousex
global mouseyround
global player
pub = 1
if pub == 1:
mouseyround=mousey-((mousey)%75)+25
if player == 0:
player = Sprite (play, (mousex, mouseyround))
def rup(event):
global rupx
rupx=1
def lup(event):
global lupx
lupx=1
def uup(event):
global up
up=1
def step():
if player != 0:
global vy
global acc
global ti
global rupx
global vx
global lupx
global up
global upup
global stop
global shutup
global springlist
global player
acc = 0.02
for s in springlist:
if player.collidingWith(s):
vy=-50+vy
vx=-vx
if stop == 0:
ti=ti+.5
if upup==4.5:
vy = (0.2*ti)-upup
else:
vy = (0.2*ti)
player.y=player.y+vy
player.x=player.x+vx
if rupx == 1:
vx=vx+1.5
lupx=0
rupx=0
if lupx == 1:
vx=vx-1.5
rupx=0
lupx=0
if vx > 3:
vx = 3
if vx < -3:
vx =-3
if up == 1:
upup = 4.5
up=0
if up == 0:
upup =4.5
col = player.collidingWithSprites(Sprite)
if len(col) > 1 and col[1].y<player.y+500:
stop=1
player.y=player.y-0.2
else:
stop=0
if stop == 1:
vy=0
ti=0
if len(col) > 1:
if col[1].y<player.y+50:
vx=-0.5*vx
if player.y > 2000:
player = 0
ti=0
myapp.listenKeyEvent('keyup', 's', spri)
myapp.listenKeyEvent('keydown', 'up arrow', uup)
myapp.listenKeyEvent('keydown', 'left arrow', lup)
myapp.listenKeyEvent('keydown', 'right arrow', rup)
myapp.listenKeyEvent('keyup', 'p', pup)
myapp.listenKeyEvent('keyup', 'w', wup)
myapp.listenMouseEvent('mousemove', mousemo)
myapp.run(step) | mit |
xpansa/server-tools | fetchmail_attach_from_folder/match_algorithm/__init__.py | 54 | 1115 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import base
from . import email_exact
from . import email_domain
from . import openerp_standard
| agpl-3.0 |
simplegeo/eventlet | eventlet/hubs/pyevent.py | 13 | 5455 | import sys
import traceback
import event
import types
from eventlet.support import greenlets as greenlet
from eventlet.hubs.hub import BaseHub, FdListener, READ, WRITE
class event_wrapper(object):
def __init__(self, impl=None, seconds=None):
self.impl = impl
self.seconds = seconds
def __repr__(self):
if self.impl is not None:
return repr(self.impl)
else:
return object.__repr__(self)
def __str__(self):
if self.impl is not None:
return str(self.impl)
else:
return object.__str__(self)
def cancel(self):
if self.impl is not None:
self.impl.delete()
self.impl = None
@property
def pending(self):
return bool(self.impl and self.impl.pending())
class Hub(BaseHub):
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
def __init__(self):
super(Hub,self).__init__()
event.init()
self.signal_exc_info = None
self.signal(
2,
lambda signalnum, frame: self.greenlet.parent.throw(KeyboardInterrupt))
self.events_to_add = []
def dispatch(self):
loop = event.loop
while True:
for e in self.events_to_add:
if e is not None and e.impl is not None and e.seconds is not None:
e.impl.add(e.seconds)
e.seconds = None
self.events_to_add = []
result = loop()
if getattr(event, '__event_exc', None) is not None:
# only have to do this because of bug in event.loop
t = getattr(event, '__event_exc')
setattr(event, '__event_exc', None)
assert getattr(event, '__event_exc') is None
raise t[0], t[1], t[2]
if result != 0:
return result
def run(self):
while True:
try:
self.dispatch()
except greenlet.GreenletExit:
break
except self.SYSTEM_EXCEPTIONS:
raise
except:
if self.signal_exc_info is not None:
self.schedule_call_global(
0, greenlet.getcurrent().parent.throw, *self.signal_exc_info)
self.signal_exc_info = None
else:
self.squelch_timer_exception(None, sys.exc_info())
def abort(self, wait=True):
self.schedule_call_global(0, self.greenlet.throw, greenlet.GreenletExit)
if wait:
assert self.greenlet is not greenlet.getcurrent(), "Can't abort with wait from inside the hub's greenlet."
self.switch()
def _getrunning(self):
return bool(self.greenlet)
def _setrunning(self, value):
pass # exists for compatibility with BaseHub
running = property(_getrunning, _setrunning)
def add(self, evtype, fileno, real_cb):
# this is stupid: pyevent won't call a callback unless it's a function,
# so we have to force it to be one here
if isinstance(real_cb, types.BuiltinMethodType):
def cb(_d):
real_cb(_d)
else:
cb = real_cb
if evtype is READ:
evt = event.read(fileno, cb, fileno)
elif evtype is WRITE:
evt = event.write(fileno, cb, fileno)
return super(Hub,self).add(evtype, fileno, evt)
def signal(self, signalnum, handler):
def wrapper():
try:
handler(signalnum, None)
except:
self.signal_exc_info = sys.exc_info()
event.abort()
return event_wrapper(event.signal(signalnum, wrapper))
def remove(self, listener):
super(Hub, self).remove(listener)
listener.cb.delete()
def remove_descriptor(self, fileno):
for lcontainer in self.listeners.itervalues():
listener = lcontainer.pop(fileno, None)
if listener:
try:
listener.cb.delete()
except self.SYSTEM_EXCEPTIONS:
raise
except:
traceback.print_exc()
def schedule_call_local(self, seconds, cb, *args, **kwargs):
current = greenlet.getcurrent()
if current is self.greenlet:
return self.schedule_call_global(seconds, cb, *args, **kwargs)
event_impl = event.event(_scheduled_call_local, (cb, args, kwargs, current))
wrapper = event_wrapper(event_impl, seconds=seconds)
self.events_to_add.append(wrapper)
return wrapper
schedule_call = schedule_call_local
def schedule_call_global(self, seconds, cb, *args, **kwargs):
event_impl = event.event(_scheduled_call, (cb, args, kwargs))
wrapper = event_wrapper(event_impl, seconds=seconds)
self.events_to_add.append(wrapper)
return wrapper
def _version_info(self):
baseversion = event.__version__
return baseversion
def _scheduled_call(event_impl, handle, evtype, arg):
cb, args, kwargs = arg
try:
cb(*args, **kwargs)
finally:
event_impl.delete()
def _scheduled_call_local(event_impl, handle, evtype, arg):
cb, args, kwargs, caller_greenlet = arg
try:
if not caller_greenlet.dead:
cb(*args, **kwargs)
finally:
event_impl.delete()
| mit |