repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
mozbhearsum/balrog
auslib/blobs/systemaddons.py
1
4440
from auslib.AUS import isForbiddenUrl from auslib.blobs.base import Blob from auslib.errors import BadDataError class SystemAddonsBlob(Blob): jsonschema = "systemaddons.yml" def __init__(self, **kwargs): Blob.__init__(self, **kwargs) if "schema_version" not in self: self["schema_version"] = 5000 def getAddonsForPlatform(self, platform): for v in self.get("addons", {}): platforms = self["addons"].get(v, {}).get("platforms", {}) if platform in platforms or "default" in platforms: yield v def getResolvedPlatform(self, addon, platform): platforms = self.get("addons", {}).get(addon, {}).get("platforms", {}) if platform in platforms: return self.get("addons", {}).get(addon, {}).get("platforms", {}).get(platform, {}).get("alias", platform) if "default" in platforms: return "default" raise BadDataError("No platform '%s' or default in addon '%s'", platform, addon) def getPlatformData(self, addon, platform): platform = self.getResolvedPlatform(addon, platform) return self.get("addons", {}).get(addon, {}).get("platforms", {}).get(platform) def shouldServeUpdate(self, updateQuery): # SystemAddon updates should always be returned. It is the responsibility # of the client to decide whether or not any action needs to be taken, # similar to GMP return True # If there are are no updates, we have a special response for SystemAddons # blobs. We return <updates></updates>, without the addons tags. def hasUpdates(self, updateQuery, whitelistedDomains): buildTarget = updateQuery["buildTarget"] for addon in sorted(self.getAddonsForPlatform(buildTarget)): # Checking if the addon update is to be served platformData = self.getPlatformData(addon, buildTarget) url = platformData["fileUrl"] # There might be no updates even if we have response products if # they are not served from whitelisted domains if isForbiddenUrl(url, updateQuery["product"], whitelistedDomains): continue return True return False # Because specialForceHosts is only relevant to our own internal servers, # and these type of updates are always served externally, we don't process # them in SystemAddon blobs, similar to GMP. def getInnerXML(self, updateQuery, update_type, whitelistedDomains, specialForceHosts): # In case we have an uninstall blob, we won't have the addons section if self.get("addons") is None: return [] buildTarget = updateQuery["buildTarget"] addonXML = [] for addon in sorted(self.getAddonsForPlatform(buildTarget)): addonInfo = self["addons"][addon] platformData = self.getPlatformData(addon, buildTarget) url = platformData["fileUrl"] if isForbiddenUrl(url, updateQuery["product"], whitelistedDomains): continue addonXML.append( ' <addon id="%s" URL="%s" hashFunction="%s" hashValue="%s" size="%s" version="%s"/>' % (addon, url, self["hashFunction"], platformData["hashValue"], platformData["filesize"], addonInfo["version"]) ) return addonXML def getInnerHeaderXML(self, updateQuery, update_type, whitelistedDomains, specialForceHosts): if self.get("uninstall", False) or self.hasUpdates(updateQuery, whitelistedDomains): return " <addons>" else: return "" def getInnerFooterXML(self, updateQuery, update_type, whitelistedDomains, specialForceHosts): if self.get("uninstall", False) or self.hasUpdates(updateQuery, whitelistedDomains): return " </addons>" else: return "" def containsForbiddenDomain(self, product, whitelistedDomains): """Returns True if the blob contains any file URLs that contain a domain that we're not allowed to serve updates to.""" for addon in self.get("addons", {}).values(): for platform in addon.get("platforms", {}).values(): if "fileUrl" in platform: if isForbiddenUrl(platform["fileUrl"], product, whitelistedDomains): return True return False
mpl-2.0
coreycb/horizon
openstack_dashboard/dashboards/project/networks/subnets/tables.py
5
5494
# Copyright 2012 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import exceptions from horizon import tables from horizon.utils import memoized from openstack_dashboard import api from openstack_dashboard import policy from openstack_dashboard.usage import quotas LOG = logging.getLogger(__name__) class CheckNetworkEditable(object): """Mixin class to determine the specified network is editable.""" def allowed(self, request, datum=None): # Only administrator is allowed to create and manage subnets # on shared networks. network = self.table._get_network() if network.shared: return False return True class SubnetPolicyTargetMixin(policy.PolicyTargetMixin): def get_policy_target(self, request, datum=None): policy_target = super(SubnetPolicyTargetMixin, self)\ .get_policy_target(request, datum) network = self.table._get_network() # neutron switched policy target values, we'll support both policy_target["network:tenant_id"] = network.tenant_id policy_target["network:project_id"] = network.tenant_id return policy_target class DeleteSubnet(SubnetPolicyTargetMixin, CheckNetworkEditable, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u"Delete Subnet", u"Delete Subnets", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Deleted Subnet", u"Deleted Subnets", count ) policy_rules = (("network", "delete_subnet"),) def delete(self, request, obj_id): try: api.neutron.subnet_delete(request, obj_id) except Exception: msg = _('Failed to delete subnet %s') % obj_id LOG.info(msg) network_id = self.table.kwargs['network_id'] redirect = reverse('horizon:project:networks:detail', args=[network_id]) exceptions.handle(request, msg, redirect=redirect) class CreateSubnet(SubnetPolicyTargetMixin, CheckNetworkEditable, tables.LinkAction): name = "create" verbose_name = _("Create Subnet") url = "horizon:project:networks:addsubnet" classes = ("ajax-modal",) icon = "plus" policy_rules = (("network", "create_subnet"),) def get_link_url(self, datum=None): network_id = self.table.kwargs['network_id'] return reverse(self.url, args=(network_id,)) def allowed(self, request, datum=None): usages = quotas.tenant_quota_usages(request) if usages['subnets']['available'] <= 0: if 'disabled' not in self.classes: self.classes = [c for c in self.classes] + ['disabled'] self.verbose_name = _('Create Subnet (Quota exceeded)') else: self.verbose_name = _('Create Subnet') self.classes = [c for c in self.classes if c != 'disabled'] return True class UpdateSubnet(SubnetPolicyTargetMixin, CheckNetworkEditable, tables.LinkAction): name = "update" verbose_name = _("Edit Subnet") url = "horizon:project:networks:editsubnet" classes = ("ajax-modal",) icon = "pencil" policy_rules = (("network", "update_subnet"),) def get_link_url(self, subnet): network_id = self.table.kwargs['network_id'] return reverse(self.url, args=(network_id, subnet.id)) class SubnetsTable(tables.DataTable): name = tables.WrappingColumn( "name_or_id", verbose_name=_("Name"), link='horizon:project:networks:subnets:detail') cidr = tables.Column("cidr", verbose_name=_("Network Address")) ip_version = tables.Column("ipver_str", verbose_name=_("IP Version")) gateway_ip = tables.Column("gateway_ip", verbose_name=_("Gateway IP")) failure_url = reverse_lazy('horizon:project:networks:index') @memoized.memoized_method def _get_network(self): try: network_id = self.kwargs['network_id'] network = api.neutron.network_get(self.request, network_id) network.set_id_as_name_if_empty(length=0) except Exception: network = None msg = _('Unable to retrieve details for network "%s".') \ % (network_id) exceptions.handle(self.request, msg,) return network class Meta(object): name = "subnets" verbose_name = _("Subnets") table_actions = (CreateSubnet, DeleteSubnet, tables.FilterAction,) row_actions = (UpdateSubnet, DeleteSubnet) hidden_title = False
apache-2.0
sanyaade-teachings/gyp
test/intermediate_dir/gyptest-intermediate-dir.py
100
1400
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that targets have independent INTERMEDIATE_DIRs. """ import TestGyp test = TestGyp.TestGyp() test.run_gyp('test.gyp', chdir='src') test.build('test.gyp', 'target1', chdir='src') # Check stuff exists. intermediate_file1 = test.read('src/outfile.txt') test.must_contain(intermediate_file1, 'target1') shared_intermediate_file1 = test.read('src/shared_outfile.txt') test.must_contain(shared_intermediate_file1, 'shared_target1') test.run_gyp('test2.gyp', chdir='src') # Force the shared intermediate to be rebuilt. test.sleep() test.touch('src/shared_infile.txt') test.build('test2.gyp', 'target2', chdir='src') # Check INTERMEDIATE_DIR file didn't get overwritten but SHARED_INTERMEDIATE_DIR # file did. intermediate_file2 = test.read('src/outfile.txt') test.must_contain(intermediate_file1, 'target1') test.must_contain(intermediate_file2, 'target2') shared_intermediate_file2 = test.read('src/shared_outfile.txt') if shared_intermediate_file1 != shared_intermediate_file2: test.fail_test(shared_intermediate_file1 + ' != ' + shared_intermediate_file2) test.must_contain(shared_intermediate_file1, 'shared_target2') test.must_contain(shared_intermediate_file2, 'shared_target2') test.pass_test()
bsd-3-clause
HaiFangHui/cf_log_parser
models.py
1
2248
import gzip import re from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session from sqlalchemy import Column, Integer, String, DateTime, Text, func, desc from config import config db_spec = config.get('DATABASE', 'DB_SPEC') engine = create_engine(db_spec) session = scoped_session(sessionmaker(bind=engine)) Base = declarative_base() class LogEntry(Base): __tablename__ = 'log_entries' id = Column(Integer, primary_key=True) logtime = Column(String(200)) edge = Column(String(200)) bytesent = Column(Integer) cip = Column(String(100)) method = Column(String(20)) host = Column(String(100)) uri = Column(String(1024)) status = Column(String(10)) creferrer = Column(Text) useragent = Column(Text) cs_uri_query = Column(Text) cookie = Column(Text) x_edge_result_type = Column(Text) x_edge_request_id = Column(Text) x_host_header = Column(String(100)) protocol = Column(String) cs_bytes = Column(Integer) time_taken = Column(String) def load_from(self, line): fields = line.split("\t") self.logtime = fields[0] + ' ' + fields[1] self.edge = fields[2] self.bytesent = fields[3] self.cip = fields[4] self.method = fields[5] self.host = fields[6] self.uri = fields[7] self.status = fields[8] self.creferrer = fields[9] self.useragent = fields[10] self.cs_uri_query = fields[11] self.cookie = fields[12] self.x_edge_result_type = fields[13] self.x_edge_result_id = fields[14] self.x_host_header = fields[15] self.protocol = fields[16] self.cs_bytes = fields[17] self.time_taken = fields[18] return self class LogFile(Base): __tablename__ = 'log_files' id = Column(Integer, primary_key=True) filename = Column(String(100), unique=True) def parse_log_data(data): for line in data.splitlines(): line.strip() if re.search('^#', line): pass else: log_entry = LogEntry() log_entry.load_from(line) session.add(log_entry)
mit
mindnervestech/mnrp
addons/l10n_be/__openerp__.py
50
3750
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Belgium - Accounting', 'version': '1.1', 'category': 'Localization/Account Charts', 'description': """ This is the base module to manage the accounting chart for Belgium in OpenERP. ============================================================================== After installing this module, the Configuration wizard for accounting is launched. * We have the account templates which can be helpful to generate Charts of Accounts. * On that particular wizard, you will be asked to pass the name of the company, the chart template to follow, the no. of digits to generate, the code for your account and bank account, currency to create journals. Thus, the pure copy of Chart Template is generated. Wizards provided by this module: -------------------------------- * Partner VAT Intra: Enlist the partners with their related VAT and invoiced amounts. Prepares an XML file format. **Path to access :** Invoicing/Reporting/Legal Reports/Belgium Statements/Partner VAT Intra * Periodical VAT Declaration: Prepares an XML file for Vat Declaration of the Main company of the User currently Logged in. **Path to access :** Invoicing/Reporting/Legal Reports/Belgium Statements/Periodical VAT Declaration * Annual Listing Of VAT-Subjected Customers: Prepares an XML file for Vat Declaration of the Main company of the User currently Logged in Based on Fiscal year. **Path to access :** Invoicing/Reporting/Legal Reports/Belgium Statements/Annual Listing Of VAT-Subjected Customers """, 'author': 'Noviat & OpenERP SA', 'depends': [ 'account', 'base_vat', 'base_iban', 'account_chart', 'l10n_be_coda', 'l10n_multilang', ], 'data': [ 'account_financial_report.xml', 'account_pcmn_belgium.xml', 'account_tax_code_template.xml', 'account_chart_template.xml', 'account_chart_template.yml', 'account_tax_template.xml', 'wizard/l10n_be_account_vat_declaration_view.xml', 'wizard/l10n_be_vat_intra_view.xml', 'wizard/l10n_be_partner_vat_listing.xml', 'wizard/account_wizard.xml', 'l10n_be_sequence.xml', 'l10n_be_reports.xml', 'fiscal_templates.xml', 'account_fiscal_position_tax_template.xml', 'security/ir.model.access.csv', 'views/report_vatintraprint.xml', 'views/report_vatpartnerlisting.xml', ], 'demo': [], 'installable': True, 'website': 'https://www.odoo.com/page/accounting', 'images': ['images/1_config_chart_l10n_be.jpeg','images/2_l10n_be_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
SlimRoms/android_external_chromium_org
chrome/app/theme/PRESUBMIT.py
121
1455
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Presubmit script for Chromium theme resources. See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into gcl/git cl, and see http://www.chromium.org/developers/web-development-style-guide for the rules we're checking against here. """ def CheckChangeOnUpload(input_api, output_api): return _CommonChecks(input_api, output_api) def CheckChangeOnCommit(input_api, output_api): return _CommonChecks(input_api, output_api) def _CommonChecks(input_api, output_api): """Checks common to both upload and commit.""" results = [] resources = input_api.os_path.join(input_api.PresubmitLocalPath(), '../../../ui/resources') # List of paths with their associated scale factor. This is used to verify # that the images modified in one are the correct scale of the other. path_scales = [ [(100, 'default_100_percent/'), (200, 'default_200_percent/')], ] import sys old_path = sys.path try: sys.path = [resources] + old_path from resource_check import resource_scale_factors for paths in path_scales: results.extend(resource_scale_factors.ResourceScaleFactors( input_api, output_api, paths).RunChecks()) finally: sys.path = old_path return results
bsd-3-clause
SlimRoms/android_external_chromium_org
chrome/common/extensions/docs/server2/availability_finder_test.py
7
15139
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import unittest import api_schema_graph from availability_finder import AvailabilityFinder, AvailabilityInfo from branch_utility import BranchUtility, ChannelInfo from compiled_file_system import CompiledFileSystem from fake_host_file_system_provider import FakeHostFileSystemProvider from fake_url_fetcher import FakeUrlFetcher from host_file_system_iterator import HostFileSystemIterator from mock_function import MockFunction from object_store_creator import ObjectStoreCreator from test_data.canned_data import (CANNED_API_FILE_SYSTEM_DATA, CANNED_BRANCHES) from test_data.object_level_availability.tabs import TABS_SCHEMA_BRANCHES from test_util import Server2Path TABS_UNMODIFIED_VERSIONS = (16, 20, 23, 24) class AvailabilityFinderTest(unittest.TestCase): def setUp(self): self._branch_utility = BranchUtility( os.path.join('branch_utility', 'first.json'), os.path.join('branch_utility', 'second.json'), FakeUrlFetcher(Server2Path('test_data')), ObjectStoreCreator.ForTest()) api_fs_creator = FakeHostFileSystemProvider(CANNED_API_FILE_SYSTEM_DATA) self._node_fs_creator = FakeHostFileSystemProvider(TABS_SCHEMA_BRANCHES) def create_availability_finder(host_fs_creator): test_object_store = ObjectStoreCreator.ForTest() return AvailabilityFinder( self._branch_utility, CompiledFileSystem.Factory(test_object_store), HostFileSystemIterator(host_fs_creator, self._branch_utility), host_fs_creator.GetTrunk(), test_object_store) self._avail_finder = create_availability_finder(api_fs_creator) self._node_avail_finder = create_availability_finder(self._node_fs_creator) # Imitate the actual SVN file system by incrementing the stats for paths # where an API schema has changed. last_stat = type('last_stat', (object,), {'val': 0}) def stat_paths(file_system, channel_info): if channel_info.version not in TABS_UNMODIFIED_VERSIONS: last_stat.val += 1 # HACK: |file_system| is a MockFileSystem backed by a TestFileSystem. # Increment the TestFileSystem stat count. file_system._file_system.IncrementStat(by=last_stat.val) # Continue looping. The iterator will stop after 'trunk' automatically. return True # Use the HostFileSystemIterator created above to change global stat values # for the TestFileSystems that it creates. self._node_avail_finder._file_system_iterator.Ascending( # The earliest version represented with the tabs' test data is 13. self._branch_utility.GetStableChannelInfo(13), stat_paths) def testGraphOptimization(self): # Keep track of how many times the APISchemaGraph constructor is called. original_constructor = api_schema_graph.APISchemaGraph mock_constructor = MockFunction(original_constructor) api_schema_graph.APISchemaGraph = mock_constructor try: # The test data includes an extra branch where the API does not exist. num_versions = len(TABS_SCHEMA_BRANCHES) - 1 # We expect an APISchemaGraph to be created only when an API schema file # has different stat data from the previous version's schema file. num_graphs_created = num_versions - len(TABS_UNMODIFIED_VERSIONS) # Run the logic for object-level availability for an API. self._node_avail_finder.GetAPINodeAvailability('tabs') self.assertTrue(*api_schema_graph.APISchemaGraph.CheckAndReset( num_graphs_created)) finally: # Ensure that the APISchemaGraph constructor is reset to be the original # constructor. api_schema_graph.APISchemaGraph = original_constructor def testGetAPIAvailability(self): # Key: Using 'channel' (i.e. 'beta') to represent an availability listing # for an API in a _features.json file, and using |channel| (i.e. |dev|) to # represent the development channel, or phase of development, where an API's # availability is being checked. # Testing APIs with predetermined availability. self.assertEqual( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('jsonTrunkAPI')) self.assertEqual( AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)), self._avail_finder.GetAPIAvailability('jsonDevAPI')) self.assertEqual( AvailabilityInfo(ChannelInfo('beta', CANNED_BRANCHES[27], 27)), self._avail_finder.GetAPIAvailability('jsonBetaAPI')) self.assertEqual( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[20], 20)), self._avail_finder.GetAPIAvailability('jsonStableAPI')) # Testing a whitelisted API. self.assertEquals( AvailabilityInfo(ChannelInfo('beta', CANNED_BRANCHES[27], 27)), self._avail_finder.GetAPIAvailability('declarativeWebRequest')) # Testing APIs found only by checking file system existence. self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[23], 23)), self._avail_finder.GetAPIAvailability('windows')) self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[18], 18)), self._avail_finder.GetAPIAvailability('tabs')) self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[18], 18)), self._avail_finder.GetAPIAvailability('input.ime')) # Testing API channel existence for _api_features.json. # Listed as 'dev' on |beta|, 'dev' on |dev|. self.assertEquals( AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)), self._avail_finder.GetAPIAvailability('systemInfo.stuff')) # Listed as 'stable' on |beta|. self.assertEquals( AvailabilityInfo( ChannelInfo('beta', CANNED_BRANCHES[27], 27), scheduled=28), self._avail_finder.GetAPIAvailability('systemInfo.cpu')) # Testing API channel existence for _manifest_features.json. # Listed as 'trunk' on all channels. self.assertEquals( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('sync')) # No records of API until |trunk|. self.assertEquals( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('history')) # Listed as 'dev' on |dev|. self.assertEquals( AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)), self._avail_finder.GetAPIAvailability('storage')) # Stable in _manifest_features and into pre-18 versions. self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[8], 8)), self._avail_finder.GetAPIAvailability('pageAction')) # Testing API channel existence for _permission_features.json. # Listed as 'beta' on |trunk|. self.assertEquals( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('falseBetaAPI')) # Listed as 'trunk' on |trunk|. self.assertEquals( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('trunkAPI')) # Listed as 'trunk' on all development channels. self.assertEquals( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('declarativeContent')) # Listed as 'dev' on all development channels. self.assertEquals( AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)), self._avail_finder.GetAPIAvailability('bluetooth')) # Listed as 'dev' on |dev|. self.assertEquals( AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)), self._avail_finder.GetAPIAvailability('cookies')) # Treated as 'stable' APIs. self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[24], 24)), self._avail_finder.GetAPIAvailability('alarms')) self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[21], 21)), self._avail_finder.GetAPIAvailability('bookmarks')) # Testing older API existence using extension_api.json. self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[6], 6)), self._avail_finder.GetAPIAvailability('menus')) self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[5], 5)), self._avail_finder.GetAPIAvailability('idle')) # Switches between _features.json files across branches. # Listed as 'trunk' on all channels, in _api, _permission, or _manifest. self.assertEquals( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('contextMenus')) # Moves between _permission and _manifest as file system is traversed. self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[23], 23)), self._avail_finder.GetAPIAvailability('systemInfo.display')) self.assertEquals( AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[17], 17)), self._avail_finder.GetAPIAvailability('webRequest')) # Mid-upgrade cases: # Listed as 'dev' on |beta| and 'beta' on |dev|. self.assertEquals( AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)), self._avail_finder.GetAPIAvailability('notifications')) # Listed as 'beta' on |stable|, 'dev' on |beta| ... until |stable| on trunk. self.assertEquals( AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')), self._avail_finder.GetAPIAvailability('events')) def testGetAPINodeAvailability(self): # Allow the LookupResult constructions below to take just one line. lookup_result = api_schema_graph.LookupResult availability_graph = self._node_avail_finder.GetAPINodeAvailability('tabs') self.assertEquals( lookup_result(True, self._branch_utility.GetChannelInfo('trunk')), availability_graph.Lookup('tabs', 'properties', 'fakeTabsProperty3')) self.assertEquals( lookup_result(True, self._branch_utility.GetChannelInfo('dev')), availability_graph.Lookup('tabs', 'events', 'onActivated', 'parameters', 'activeInfo', 'properties', 'windowId')) self.assertEquals( lookup_result(True, self._branch_utility.GetChannelInfo('dev')), availability_graph.Lookup('tabs', 'events', 'onUpdated', 'parameters', 'tab')) self.assertEquals( lookup_result(True, self._branch_utility.GetChannelInfo('beta')), availability_graph.Lookup('tabs', 'events','onActivated')) self.assertEquals( lookup_result(True, self._branch_utility.GetChannelInfo('beta')), availability_graph.Lookup('tabs', 'functions', 'get', 'parameters', 'tabId')) self.assertEquals( lookup_result(True, self._branch_utility.GetChannelInfo('stable')), availability_graph.Lookup('tabs', 'types', 'InjectDetails', 'properties', 'code')) self.assertEquals( lookup_result(True, self._branch_utility.GetChannelInfo('stable')), availability_graph.Lookup('tabs', 'types', 'InjectDetails', 'properties', 'file')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(25)), availability_graph.Lookup('tabs', 'types', 'InjectDetails')) # Nothing new in version 24 or 23. self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(22)), availability_graph.Lookup('tabs', 'types', 'Tab', 'properties', 'windowId')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(21)), availability_graph.Lookup('tabs', 'types', 'Tab', 'properties', 'selected')) # Nothing new in version 20. self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(19)), availability_graph.Lookup('tabs', 'functions', 'getCurrent')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(18)), availability_graph.Lookup('tabs', 'types', 'Tab', 'properties', 'index')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(17)), availability_graph.Lookup('tabs', 'events', 'onUpdated', 'parameters', 'changeInfo')) # Nothing new in version 16. self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(15)), availability_graph.Lookup('tabs', 'properties', 'fakeTabsProperty2')) # Everything else is available at the API's release, version 14 here. self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(14)), availability_graph.Lookup('tabs', 'types', 'Tab')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(14)), availability_graph.Lookup('tabs', 'types', 'Tab', 'properties', 'url')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(14)), availability_graph.Lookup('tabs', 'properties', 'fakeTabsProperty1')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(14)), availability_graph.Lookup('tabs', 'functions', 'get', 'parameters', 'callback')) self.assertEquals( lookup_result(True, self._branch_utility.GetStableChannelInfo(14)), availability_graph.Lookup('tabs', 'events', 'onUpdated')) # Test things that aren't available. self.assertEqual(lookup_result(False, None), availability_graph.Lookup('tabs', 'types', 'UpdateInfo')) self.assertEqual(lookup_result(False, None), availability_graph.Lookup('tabs', 'functions', 'get', 'parameters', 'callback', 'parameters', 'tab', 'id')) self.assertEqual(lookup_result(False, None), availability_graph.Lookup('functions')) self.assertEqual(lookup_result(False, None), availability_graph.Lookup('events', 'onActivated', 'parameters', 'activeInfo', 'tabId')) if __name__ == '__main__': unittest.main()
bsd-3-clause
jaimahajan1997/sympy
sympy/polys/numberfields.py
37
31789
"""Computational algebraic field theory. """ from __future__ import print_function, division from sympy import ( S, Rational, AlgebraicNumber, Add, Mul, sympify, Dummy, expand_mul, I, pi ) from sympy.functions.elementary.exponential import exp from sympy.functions.elementary.trigonometric import cos, sin from sympy.polys.polytools import ( Poly, PurePoly, sqf_norm, invert, factor_list, groebner, resultant, degree, poly_from_expr, parallel_poly_from_expr, lcm ) from sympy.polys.polyerrors import ( IsomorphismFailed, CoercionFailed, NotAlgebraic, GeneratorsError, ) from sympy.polys.rootoftools import CRootOf from sympy.polys.specialpolys import cyclotomic_poly from sympy.polys.polyutils import dict_from_expr, expr_from_dict from sympy.polys.domains import ZZ, QQ from sympy.polys.orthopolys import dup_chebyshevt from sympy.polys.rings import ring from sympy.polys.ring_series import rs_compose_add from sympy.printing.lambdarepr import LambdaPrinter from sympy.utilities import ( numbered_symbols, variations, lambdify, public, sift ) from sympy.core.exprtools import Factors from sympy.core.function import _mexpand from sympy.simplify.radsimp import _split_gcd from sympy.simplify.simplify import _is_sum_surds from sympy.ntheory import sieve from sympy.ntheory.factor_ import divisors from mpmath import pslq, mp from sympy.core.compatibility import reduce from sympy.core.compatibility import range def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5): """ Return a factor having root ``v`` It is assumed that one of the factors has root ``v``. """ if isinstance(factors[0], tuple): factors = [f[0] for f in factors] if len(factors) == 1: return factors[0] points = {x:v} symbols = dom.symbols if hasattr(dom, 'symbols') else [] t = QQ(1, 10) for n in range(bound**len(symbols)): prec1 = 10 n_temp = n for s in symbols: points[s] = n_temp % bound n_temp = n_temp // bound while True: candidates = [] eps = t**(prec1 // 2) for f in factors: if abs(f.as_expr().evalf(prec1, points)) < eps: candidates.append(f) if candidates: factors = candidates if len(factors) == 1: return factors[0] if prec1 > prec: break prec1 *= 2 raise NotImplementedError("multiple candidates for the minimal polynomial of %s" % v) def _separate_sq(p): """ helper function for ``_minimal_polynomial_sq`` It selects a rational ``g`` such that the polynomial ``p`` consists of a sum of terms whose surds squared have gcd equal to ``g`` and a sum of terms with surds squared prime with ``g``; then it takes the field norm to eliminate ``sqrt(g)`` See simplify.simplify.split_surds and polytools.sqf_norm. Examples ======== >>> from sympy import sqrt >>> from sympy.abc import x >>> from sympy.polys.numberfields import _separate_sq >>> p= -x + sqrt(2) + sqrt(3) + sqrt(7) >>> p = _separate_sq(p); p -x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8 >>> p = _separate_sq(p); p -x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20 >>> p = _separate_sq(p); p -x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400 """ from sympy.utilities.iterables import sift def is_sqrt(expr): return expr.is_Pow and expr.exp is S.Half # p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)] a = [] for y in p.args: if not y.is_Mul: if is_sqrt(y): a.append((S.One, y**2)) elif y.is_Atom: a.append((y, S.One)) elif y.is_Pow and y.exp.is_integer: a.append((y, S.One)) else: raise NotImplementedError continue sifted = sift(y.args, is_sqrt) a.append((Mul(*sifted[False]), Mul(*sifted[True])**2)) a.sort(key=lambda z: z[1]) if a[-1][1] is S.One: # there are no surds return p surds = [z for y, z in a] for i in range(len(surds)): if surds[i] != 1: break g, b1, b2 = _split_gcd(*surds[i:]) a1 = [] a2 = [] for y, z in a: if z in b1: a1.append(y*z**S.Half) else: a2.append(y*z**S.Half) p1 = Add(*a1) p2 = Add(*a2) p = _mexpand(p1**2) - _mexpand(p2**2) return p def _minimal_polynomial_sq(p, n, x): """ Returns the minimal polynomial for the ``nth-root`` of a sum of surds or ``None`` if it fails. Parameters ========== p : sum of surds n : positive integer x : variable of the returned polynomial Examples ======== >>> from sympy.polys.numberfields import _minimal_polynomial_sq >>> from sympy import sqrt >>> from sympy.abc import x >>> q = 1 + sqrt(2) + sqrt(3) >>> _minimal_polynomial_sq(q, 3, x) x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8 """ from sympy.simplify.simplify import _is_sum_surds p = sympify(p) n = sympify(n) r = _is_sum_surds(p) if not n.is_Integer or not n > 0 or not _is_sum_surds(p): return None pn = p**Rational(1, n) # eliminate the square roots p -= x while 1: p1 = _separate_sq(p) if p1 is p: p = p1.subs({x:x**n}) break else: p = p1 # _separate_sq eliminates field extensions in a minimal way, so that # if n = 1 then `p = constant*(minimal_polynomial(p))` # if n > 1 it contains the minimal polynomial as a factor. if n == 1: p1 = Poly(p) if p.coeff(x**p1.degree(x)) < 0: p = -p p = p.primitive()[1] return p # by construction `p` has root `pn` # the minimal polynomial is the factor vanishing in x = pn factors = factor_list(p)[1] result = _choose_factor(factors, x, pn) return result def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None): """ return the minimal polynomial for ``op(ex1, ex2)`` Parameters ========== op : operation ``Add`` or ``Mul`` ex1, ex2 : expressions for the algebraic elements x : indeterminate of the polynomials dom: ground domain mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None Examples ======== >>> from sympy import sqrt, Add, Mul, QQ >>> from sympy.polys.numberfields import _minpoly_op_algebraic_element >>> from sympy.abc import x, y >>> p1 = sqrt(sqrt(2) + 1) >>> p2 = sqrt(sqrt(2) - 1) >>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ) x - 1 >>> q1 = sqrt(y) >>> q2 = 1 / y >>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.frac_field(y)) x**2*y**2 - 2*x*y - y**3 + 1 References ========== [1] http://en.wikipedia.org/wiki/Resultant [2] I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638 "Degrees of sums in a separable field extension". """ y = Dummy(str(x)) if mp1 is None: mp1 = _minpoly_compose(ex1, x, dom) if mp2 is None: mp2 = _minpoly_compose(ex2, y, dom) else: mp2 = mp2.subs({x: y}) if op is Add: # mp1a = mp1.subs({x: x - y}) if dom == QQ: R, X = ring('X', QQ) p1 = R(dict_from_expr(mp1)[0]) p2 = R(dict_from_expr(mp2)[0]) else: (p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y) r = p1.compose(p2) mp1a = r.as_expr() elif op is Mul: mp1a = _muly(mp1, x, y) else: raise NotImplementedError('option not available') if op is Mul or dom != QQ: r = resultant(mp1a, mp2, gens=[y, x]) else: r = rs_compose_add(p1, p2) r = expr_from_dict(r.as_expr_dict(), x) deg1 = degree(mp1, x) deg2 = degree(mp2, y) if op is Mul and deg1 == 1 or deg2 == 1: # if deg1 = 1, then mp1 = x - a; mp1a = x - y - a; # r = mp2(x - a), so that `r` is irreducible return r r = Poly(r, x, domain=dom) _, factors = r.factor_list() res = _choose_factor(factors, x, op(ex1, ex2), dom) return res.as_expr() def _invertx(p, x): """ Returns ``expand_mul(x**degree(p, x)*p.subs(x, 1/x))`` """ p1 = poly_from_expr(p, x)[0] n = degree(p1) a = [c * x**(n - i) for (i,), c in p1.terms()] return Add(*a) def _muly(p, x, y): """ Returns ``_mexpand(y**deg*p.subs({x:x / y}))`` """ p1 = poly_from_expr(p, x)[0] n = degree(p1) a = [c * x**i * y**(n - i) for (i,), c in p1.terms()] return Add(*a) def _minpoly_pow(ex, pw, x, dom, mp=None): """ Returns ``minpoly(ex**pw, x)`` Parameters ========== ex : algebraic element pw : rational number x : indeterminate of the polynomial dom: ground domain mp : minimal polynomial of ``p`` Examples ======== >>> from sympy import sqrt, QQ, Rational >>> from sympy.polys.numberfields import _minpoly_pow, minpoly >>> from sympy.abc import x, y >>> p = sqrt(1 + sqrt(2)) >>> _minpoly_pow(p, 2, x, QQ) x**2 - 2*x - 1 >>> minpoly(p**2, x) x**2 - 2*x - 1 >>> _minpoly_pow(y, Rational(1, 3), x, QQ.frac_field(y)) x**3 - y >>> minpoly(y**Rational(1, 3), x) x**3 - y """ pw = sympify(pw) if not mp: mp = _minpoly_compose(ex, x, dom) if not pw.is_rational: raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) if pw < 0: if mp == x: raise ZeroDivisionError('%s is zero' % ex) mp = _invertx(mp, x) if pw == -1: return mp pw = -pw ex = 1/ex y = Dummy(str(x)) mp = mp.subs({x: y}) n, d = pw.as_numer_denom() res = Poly(resultant(mp, x**d - y**n, gens=[y]), x, domain=dom) _, factors = res.factor_list() res = _choose_factor(factors, x, ex**pw, dom) return res.as_expr() def _minpoly_add(x, dom, *a): """ returns ``minpoly(Add(*a), dom, x)`` """ mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom) p = a[0] + a[1] for px in a[2:]: mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp) p = p + px return mp def _minpoly_mul(x, dom, *a): """ returns ``minpoly(Mul(*a), dom, x)`` """ mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom) p = a[0] * a[1] for px in a[2:]: mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp) p = p * px return mp def _minpoly_sin(ex, x): """ Returns the minimal polynomial of ``sin(ex)`` see http://mathworld.wolfram.com/TrigonometryAngles.html """ c, a = ex.args[0].as_coeff_Mul() if a is pi: if c.is_rational: n = c.q q = sympify(n) if q.is_prime: # for a = pi*p/q with q odd prime, using chebyshevt # write sin(q*a) = mp(sin(a))*sin(a); # the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1 a = dup_chebyshevt(n, ZZ) return Add(*[x**(n - i - 1)*a[i] for i in range(n)]) if c.p == 1: if q == 9: return 64*x**6 - 96*x**4 + 36*x**2 - 3 if n % 2 == 1: # for a = pi*p/q with q odd, use # sin(q*a) = 0 to see that the minimal polynomial must be # a factor of dup_chebyshevt(n, ZZ) a = dup_chebyshevt(n, ZZ) a = [x**(n - i)*a[i] for i in range(n + 1)] r = Add(*a) _, factors = factor_list(r) res = _choose_factor(factors, x, ex) return res expr = ((1 - cos(2*c*pi))/2)**S.Half res = _minpoly_compose(expr, x, QQ) return res raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) def _minpoly_cos(ex, x): """ Returns the minimal polynomial of ``cos(ex)`` see http://mathworld.wolfram.com/TrigonometryAngles.html """ from sympy import sqrt c, a = ex.args[0].as_coeff_Mul() if a is pi: if c.is_rational: if c.p == 1: if c.q == 7: return 8*x**3 - 4*x**2 - 4*x + 1 if c.q == 9: return 8*x**3 - 6*x + 1 elif c.p == 2: q = sympify(c.q) if q.is_prime: s = _minpoly_sin(ex, x) return _mexpand(s.subs({x:sqrt((1 - x)/2)})) # for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p n = int(c.q) a = dup_chebyshevt(n, ZZ) a = [x**(n - i)*a[i] for i in range(n + 1)] r = Add(*a) - (-1)**c.p _, factors = factor_list(r) res = _choose_factor(factors, x, ex) return res raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) def _minpoly_exp(ex, x): """ Returns the minimal polynomial of ``exp(ex)`` """ c, a = ex.args[0].as_coeff_Mul() p = sympify(c.p) q = sympify(c.q) if a == I*pi: if c.is_rational: if c.p == 1 or c.p == -1: if q == 3: return x**2 - x + 1 if q == 4: return x**4 + 1 if q == 6: return x**4 - x**2 + 1 if q == 8: return x**8 + 1 if q == 9: return x**6 - x**3 + 1 if q == 10: return x**8 - x**6 + x**4 - x**2 + 1 if q.is_prime: s = 0 for i in range(q): s += (-x)**i return s # x**(2*q) = product(factors) factors = [cyclotomic_poly(i, x) for i in divisors(2*q)] mp = _choose_factor(factors, x, ex) return mp else: raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) def _minpoly_rootof(ex, x): """ Returns the minimal polynomial of a ``CRootOf`` object. """ p = ex.expr p = p.subs({ex.poly.gens[0]:x}) _, factors = factor_list(p, x) result = _choose_factor(factors, x, ex) return result def _minpoly_compose(ex, x, dom): """ Computes the minimal polynomial of an algebraic element using operations on minimal polynomials Examples ======== >>> from sympy import minimal_polynomial, sqrt, Rational >>> from sympy.abc import x, y >>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=True) x**2 - 2*x - 1 >>> minimal_polynomial(sqrt(y) + 1/y, x, compose=True) x**2*y**2 - 2*x*y - y**3 + 1 """ if ex.is_Rational: return ex.q*x - ex.p if ex is I: return x**2 + 1 if hasattr(dom, 'symbols') and ex in dom.symbols: return x - ex if dom.is_QQ and _is_sum_surds(ex): # eliminate the square roots ex -= x while 1: ex1 = _separate_sq(ex) if ex1 is ex: return ex else: ex = ex1 if ex.is_Add: res = _minpoly_add(x, dom, *ex.args) elif ex.is_Mul: f = Factors(ex).factors r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational) if r[True] and dom == QQ: ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]]) r1 = r[True] dens = [y.q for _, y in r1] lcmdens = reduce(lcm, dens, 1) nums = [base**(y.p*lcmdens // y.q) for base, y in r1] ex2 = Mul(*nums) mp1 = minimal_polynomial(ex1, x) # use the fact that in SymPy canonicalization products of integers # raised to rational powers are organized in relatively prime # bases, and that in ``base**(n/d)`` a perfect power is # simplified with the root mp2 = ex2.q*x**lcmdens - ex2.p ex2 = ex2**Rational(1, lcmdens) res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2) else: res = _minpoly_mul(x, dom, *ex.args) elif ex.is_Pow: res = _minpoly_pow(ex.base, ex.exp, x, dom) elif ex.__class__ is sin: res = _minpoly_sin(ex, x) elif ex.__class__ is cos: res = _minpoly_cos(ex, x) elif ex.__class__ is exp: res = _minpoly_exp(ex, x) elif ex.__class__ is CRootOf: res = _minpoly_rootof(ex, x) else: raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex) return res @public def minimal_polynomial(ex, x=None, **args): """ Computes the minimal polynomial of an algebraic element. Parameters ========== ex : algebraic element expression x : independent variable of the minimal polynomial Options ======= compose : if ``True`` ``_minpoly_compose`` is used, if ``False`` the ``groebner`` algorithm polys : if ``True`` returns a ``Poly`` object domain : ground domain Notes ===== By default ``compose=True``, the minimal polynomial of the subexpressions of ``ex`` are computed, then the arithmetic operations on them are performed using the resultant and factorization. If ``compose=False``, a bottom-up algorithm is used with ``groebner``. The default algorithm stalls less frequently. If no ground domain is given, it will be generated automatically from the expression. Examples ======== >>> from sympy import minimal_polynomial, sqrt, solve, QQ >>> from sympy.abc import x, y >>> minimal_polynomial(sqrt(2), x) x**2 - 2 >>> minimal_polynomial(sqrt(2), x, domain=QQ.algebraic_field(sqrt(2))) x - sqrt(2) >>> minimal_polynomial(sqrt(2) + sqrt(3), x) x**4 - 10*x**2 + 1 >>> minimal_polynomial(solve(x**3 + x + 3)[0], x) x**3 + x + 3 >>> minimal_polynomial(sqrt(y), x) x**2 - y """ from sympy.polys.polytools import degree from sympy.polys.domains import FractionField from sympy.core.basic import preorder_traversal compose = args.get('compose', True) polys = args.get('polys', False) dom = args.get('domain', None) ex = sympify(ex) if ex.is_number: # not sure if it's always needed but try it for numbers (issue 8354) ex = _mexpand(ex, recursive=True) for expr in preorder_traversal(ex): if expr.is_AlgebraicNumber: compose = False break if x is not None: x, cls = sympify(x), Poly else: x, cls = Dummy('x'), PurePoly if not dom: dom = FractionField(QQ, list(ex.free_symbols)) if ex.free_symbols else QQ if hasattr(dom, 'symbols') and x in dom.symbols: raise GeneratorsError("the variable %s is an element of the ground domain %s" % (x, dom)) if compose: result = _minpoly_compose(ex, x, dom) result = result.primitive()[1] c = result.coeff(x**degree(result, x)) if c.is_negative: result = expand_mul(-result) return cls(result, x, field=True) if polys else result.collect(x) if not dom.is_QQ: raise NotImplementedError("groebner method only works for QQ") result = _minpoly_groebner(ex, x, cls) return cls(result, x, field=True) if polys else result.collect(x) def _minpoly_groebner(ex, x, cls): """ Computes the minimal polynomial of an algebraic number using Groebner bases Examples ======== >>> from sympy import minimal_polynomial, sqrt, Rational >>> from sympy.abc import x >>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=False) x**2 - 2*x - 1 """ from sympy.polys.polytools import degree from sympy.core.function import expand_multinomial generator = numbered_symbols('a', cls=Dummy) mapping, symbols, replace = {}, {}, [] def update_mapping(ex, exp, base=None): a = next(generator) symbols[ex] = a if base is not None: mapping[ex] = a**exp + base else: mapping[ex] = exp.as_expr(a) return a def bottom_up_scan(ex): if ex.is_Atom: if ex is S.ImaginaryUnit: if ex not in mapping: return update_mapping(ex, 2, 1) else: return symbols[ex] elif ex.is_Rational: return ex elif ex.is_Add: return Add(*[ bottom_up_scan(g) for g in ex.args ]) elif ex.is_Mul: return Mul(*[ bottom_up_scan(g) for g in ex.args ]) elif ex.is_Pow: if ex.exp.is_Rational: if ex.exp < 0 and ex.base.is_Add: coeff, terms = ex.base.as_coeff_add() elt, _ = primitive_element(terms, polys=True) alg = ex.base - coeff # XXX: turn this into eval() inverse = invert(elt.gen + coeff, elt).as_expr() base = inverse.subs(elt.gen, alg).expand() if ex.exp == -1: return bottom_up_scan(base) else: ex = base**(-ex.exp) if not ex.exp.is_Integer: base, exp = ( ex.base**ex.exp.p).expand(), Rational(1, ex.exp.q) else: base, exp = ex.base, ex.exp base = bottom_up_scan(base) expr = base**exp if expr not in mapping: return update_mapping(expr, 1/exp, -base) else: return symbols[expr] elif ex.is_AlgebraicNumber: if ex.root not in mapping: return update_mapping(ex.root, ex.minpoly) else: return symbols[ex.root] raise NotAlgebraic("%s doesn't seem to be an algebraic number" % ex) def simpler_inverse(ex): """ Returns True if it is more likely that the minimal polynomial algorithm works better with the inverse """ if ex.is_Pow: if (1/ex.exp).is_integer and ex.exp < 0: if ex.base.is_Add: return True if ex.is_Mul: hit = True a = [] for p in ex.args: if p.is_Add: return False if p.is_Pow: if p.base.is_Add and p.exp > 0: return False if hit: return True return False inverted = False ex = expand_multinomial(ex) if ex.is_AlgebraicNumber: return ex.minpoly.as_expr(x) elif ex.is_Rational: result = ex.q*x - ex.p else: inverted = simpler_inverse(ex) if inverted: ex = ex**-1 res = None if ex.is_Pow and (1/ex.exp).is_Integer: n = 1/ex.exp res = _minimal_polynomial_sq(ex.base, n, x) elif _is_sum_surds(ex): res = _minimal_polynomial_sq(ex, S.One, x) if res is not None: result = res if res is None: bus = bottom_up_scan(ex) F = [x - bus] + list(mapping.values()) G = groebner(F, list(symbols.values()) + [x], order='lex') _, factors = factor_list(G[-1]) # by construction G[-1] has root `ex` result = _choose_factor(factors, x, ex) if inverted: result = _invertx(result, x) if result.coeff(x**degree(result, x)) < 0: result = expand_mul(-result) return result minpoly = minimal_polynomial __all__.append('minpoly') def _coeffs_generator(n): """Generate coefficients for `primitive_element()`. """ for coeffs in variations([1, -1], n, repetition=True): yield list(coeffs) @public def primitive_element(extension, x=None, **args): """Construct a common number field for all extensions. """ if not extension: raise ValueError("can't compute primitive element for empty extension") if x is not None: x, cls = sympify(x), Poly else: x, cls = Dummy('x'), PurePoly if not args.get('ex', False): extension = [ AlgebraicNumber(ext, gen=x) for ext in extension ] g, coeffs = extension[0].minpoly.replace(x), [1] for ext in extension[1:]: s, _, g = sqf_norm(g, x, extension=ext) coeffs = [ s*c for c in coeffs ] + [1] if not args.get('polys', False): return g.as_expr(), coeffs else: return cls(g), coeffs generator = numbered_symbols('y', cls=Dummy) F, Y = [], [] for ext in extension: y = next(generator) if ext.is_Poly: if ext.is_univariate: f = ext.as_expr(y) else: raise ValueError("expected minimal polynomial, got %s" % ext) else: f = minpoly(ext, y) F.append(f) Y.append(y) coeffs_generator = args.get('coeffs', _coeffs_generator) for coeffs in coeffs_generator(len(Y)): f = x - sum([ c*y for c, y in zip(coeffs, Y)]) G = groebner(F + [f], Y + [x], order='lex', field=True) H, g = G[:-1], cls(G[-1], x, domain='QQ') for i, (h, y) in enumerate(zip(H, Y)): try: H[i] = Poly(y - h, x, domain='QQ').all_coeffs() # XXX: composite=False except CoercionFailed: # pragma: no cover break # G is not a triangular set else: break else: # pragma: no cover raise RuntimeError("run out of coefficient configurations") _, g = g.clear_denoms() if not args.get('polys', False): return g.as_expr(), coeffs, H else: return g, coeffs, H def is_isomorphism_possible(a, b): """Returns `True` if there is a chance for isomorphism. """ n = a.minpoly.degree() m = b.minpoly.degree() if m % n != 0: return False if n == m: return True da = a.minpoly.discriminant() db = b.minpoly.discriminant() i, k, half = 1, m//n, db//2 while True: p = sieve[i] P = p**k if P > half: break if ((da % p) % 2) and not (db % P): return False i += 1 return True def field_isomorphism_pslq(a, b): """Construct field isomorphism using PSLQ algorithm. """ if not a.root.is_real or not b.root.is_real: raise NotImplementedError("PSLQ doesn't support complex coefficients") f = a.minpoly g = b.minpoly.replace(f.gen) n, m, prev = 100, b.minpoly.degree(), None for i in range(1, 5): A = a.root.evalf(n) B = b.root.evalf(n) basis = [1, B] + [ B**i for i in range(2, m) ] + [A] dps, mp.dps = mp.dps, n coeffs = pslq(basis, maxcoeff=int(1e10), maxsteps=1000) mp.dps = dps if coeffs is None: break if coeffs != prev: prev = coeffs else: break coeffs = [S(c)/coeffs[-1] for c in coeffs[:-1]] while not coeffs[-1]: coeffs.pop() coeffs = list(reversed(coeffs)) h = Poly(coeffs, f.gen, domain='QQ') if f.compose(h).rem(g).is_zero: d, approx = len(coeffs) - 1, 0 for i, coeff in enumerate(coeffs): approx += coeff*B**(d - i) if A*approx < 0: return [ -c for c in coeffs ] else: return coeffs elif f.compose(-h).rem(g).is_zero: return [ -c for c in coeffs ] else: n *= 2 return None def field_isomorphism_factor(a, b): """Construct field isomorphism via factorization. """ _, factors = factor_list(a.minpoly, extension=b) for f, _ in factors: if f.degree() == 1: coeffs = f.rep.TC().to_sympy_list() d, terms = len(coeffs) - 1, [] for i, coeff in enumerate(coeffs): terms.append(coeff*b.root**(d - i)) root = Add(*terms) if (a.root - root).evalf(chop=True) == 0: return coeffs if (a.root + root).evalf(chop=True) == 0: return [ -c for c in coeffs ] else: return None @public def field_isomorphism(a, b, **args): """Construct an isomorphism between two number fields. """ a, b = sympify(a), sympify(b) if not a.is_AlgebraicNumber: a = AlgebraicNumber(a) if not b.is_AlgebraicNumber: b = AlgebraicNumber(b) if a == b: return a.coeffs() n = a.minpoly.degree() m = b.minpoly.degree() if n == 1: return [a.root] if m % n != 0: return None if args.get('fast', True): try: result = field_isomorphism_pslq(a, b) if result is not None: return result except NotImplementedError: pass return field_isomorphism_factor(a, b) @public def to_number_field(extension, theta=None, **args): """Express `extension` in the field generated by `theta`. """ gen = args.get('gen') if hasattr(extension, '__iter__'): extension = list(extension) else: extension = [extension] if len(extension) == 1 and type(extension[0]) is tuple: return AlgebraicNumber(extension[0]) minpoly, coeffs = primitive_element(extension, gen, polys=True) root = sum([ coeff*ext for coeff, ext in zip(coeffs, extension) ]) if theta is None: return AlgebraicNumber((minpoly, root)) else: theta = sympify(theta) if not theta.is_AlgebraicNumber: theta = AlgebraicNumber(theta, gen=gen) coeffs = field_isomorphism(root, theta) if coeffs is not None: return AlgebraicNumber(theta, coeffs) else: raise IsomorphismFailed( "%s is not in a subfield of %s" % (root, theta.root)) class IntervalPrinter(LambdaPrinter): """Use ``lambda`` printer but print numbers as ``mpi`` intervals. """ def _print_Integer(self, expr): return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr) def _print_Rational(self, expr): return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr) def _print_Pow(self, expr): return super(IntervalPrinter, self)._print_Pow(expr, rational=True) @public def isolate(alg, eps=None, fast=False): """Give a rational isolating interval for an algebraic number. """ alg = sympify(alg) if alg.is_Rational: return (alg, alg) elif not alg.is_real: raise NotImplementedError( "complex algebraic numbers are not supported") func = lambdify((), alg, modules="mpmath", printer=IntervalPrinter()) poly = minpoly(alg, polys=True) intervals = poly.intervals(sqf=True) dps, done = mp.dps, False try: while not done: alg = func() for a, b in intervals: if a <= alg.a and alg.b <= b: done = True break else: mp.dps *= 2 finally: mp.dps = dps if eps is not None: a, b = poly.refine_root(a, b, eps=eps, fast=fast) return (a, b)
bsd-3-clause
Hybrid-Cloud/conveyor
conveyor/conveyorheat/engine/lifecycle_plugin.py
9
2138
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class LifecyclePlugin(object): """Base class for pre-op and post-op work on a stack. Implementations should extend this class and override the methods. """ def do_pre_op(self, cnxt, stack, current_stack=None, action=None): """Method to be run by heat before stack operations.""" pass def do_post_op(self, cnxt, stack, current_stack=None, action=None, is_stack_failure=False): """Method to be run by heat after stack operations, including failures. On failure to execute all the registered pre_ops, this method will be called if and only if the corresponding pre_op was successfully called. On failures of the actual stack operation, this method will be called if all the pre operations were successfully called. """ pass def get_ordinal(self): """Get the sort order for pre and post operation execution. The values returned by get_ordinal are used to create a partial order for pre and post operation method invocations. The default ordinal value of 100 may be overridden. If class1inst.ordinal() < class2inst.ordinal(), then the method on class1inst will be executed before the method on class2inst. If class1inst.ordinal() > class2inst.ordinal(), then the method on class1inst will be executed after the method on class2inst. If class1inst.ordinal() == class2inst.ordinal(), then the order of method invocation is indeterminate. """ return 100
apache-2.0
samdoran/ansible
lib/ansible/modules/cloud/webfaction/webfaction_domain.py
63
5507
#!/usr/bin/python # # Create Webfaction domains and subdomains using Ansible and the Webfaction API # # ------------------------------------------ # # (c) Quentin Stafford-Fraser 2015 # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: webfaction_domain short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. - > You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays. - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. options: name: description: - The name of the domain required: true state: description: - Whether the domain should exist required: false choices: ['present', 'absent'] default: "present" subdomains: description: - Any subdomains to create. required: false default: null login_name: description: - The webfaction account to use required: true login_password: description: - The webfaction password to use required: true ''' EXAMPLES = ''' - name: Create a test domain webfaction_domain: name: mydomain.com state: present subdomains: - www - blog login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" - name: Delete test domain and any subdomains webfaction_domain: name: mydomain.com state: absent login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" ''' import socket import xmlrpclib webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), state = dict(required=False, choices=['present', 'absent'], default='present'), subdomains = dict(required=False, default=[]), login_name = dict(required=True), login_password = dict(required=True, no_log=True), ), supports_check_mode=True ) domain_name = module.params['name'] domain_state = module.params['state'] domain_subdomains = module.params['subdomains'] session_id, account = webfaction.login( module.params['login_name'], module.params['login_password'] ) domain_list = webfaction.list_domains(session_id) domain_map = dict([(i['domain'], i) for i in domain_list]) existing_domain = domain_map.get(domain_name) result = {} # Here's where the real stuff happens if domain_state == 'present': # Does an app with this name already exist? if existing_domain: if set(existing_domain['subdomains']) >= set(domain_subdomains): # If it exists with the right subdomains, we don't change anything. module.exit_json( changed = False, ) positional_args = [session_id, domain_name] + domain_subdomains if not module.check_mode: # If this isn't a dry run, create the app # print positional_args result.update( webfaction.create_domain( *positional_args ) ) elif domain_state == 'absent': # If the app's already not there, nothing changed. if not existing_domain: module.exit_json( changed = False, ) positional_args = [session_id, domain_name] + domain_subdomains if not module.check_mode: # If this isn't a dry run, delete the app result.update( webfaction.delete_domain(*positional_args) ) else: module.fail_json(msg="Unknown state specified: {}".format(domain_state)) module.exit_json( changed = True, result = result ) from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
zhreshold/mxnet
example/gluon/embedding_learning/data.py
30
6391
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import random import numpy as np import mxnet as mx from mxnet import nd def transform(data, target_wd, target_ht, is_train, box): """Crop and normnalize an image nd array.""" if box is not None: x, y, w, h = box data = data[y:min(y+h, data.shape[0]), x:min(x+w, data.shape[1])] # Resize to target_wd * target_ht. data = mx.image.imresize(data, target_wd, target_ht) # Normalize in the same way as the pre-trained model. data = data.astype(np.float32) / 255.0 data = (data - mx.nd.array([0.485, 0.456, 0.406])) / mx.nd.array([0.229, 0.224, 0.225]) if is_train: if random.random() < 0.5: data = nd.flip(data, axis=1) data, _ = mx.image.random_crop(data, (224, 224)) else: data, _ = mx.image.center_crop(data, (224, 224)) # Transpose from (target_wd, target_ht, 3) # to (3, target_wd, target_ht). data = nd.transpose(data, (2, 0, 1)) # If image is greyscale, repeat 3 times to get RGB image. if data.shape[0] == 1: data = nd.tile(data, (3, 1, 1)) return data.reshape((1,) + data.shape) class CUB200Iter(mx.io.DataIter): """Iterator for the CUB200-2011 dataset. Parameters ---------- data_path : str, The path to dataset directory. batch_k : int, Number of images per class in a batch. batch_size : int, Batch size. batch_size : tupple, Data shape. E.g. (3, 224, 224). is_train : bool, Training data or testig data. Training batches are randomly sampled. Testing batches are loaded sequentially until reaching the end. """ def __init__(self, data_path, batch_k, batch_size, data_shape, is_train): super(CUB200Iter, self).__init__(batch_size) self.data_shape = (batch_size,) + data_shape self.batch_size = batch_size self.provide_data = [('data', self.data_shape)] self.batch_k = batch_k self.is_train = is_train self.train_image_files = [[] for _ in range(100)] self.test_image_files = [] self.test_labels = [] self.boxes = {} self.test_count = 0 with open(os.path.join(data_path, 'images.txt'), 'r') as f_img, \ open(os.path.join(data_path, 'image_class_labels.txt'), 'r') as f_label, \ open(os.path.join(data_path, 'bounding_boxes.txt'), 'r') as f_box: for line_img, line_label, line_box in zip(f_img, f_label, f_box): fname = os.path.join(data_path, 'images', line_img.strip().split()[-1]) label = int(line_label.strip().split()[-1]) - 1 box = [int(float(v)) for v in line_box.split()[-4:]] self.boxes[fname] = box # Following "Deep Metric Learning via Lifted Structured Feature Embedding" paper, # we use the first 100 classes for training, and the remaining for testing. if label < 100: self.train_image_files[label].append(fname) else: self.test_labels.append(label) self.test_image_files.append(fname) self.n_test = len(self.test_image_files) def get_image(self, img, is_train): """Load and transform an image.""" img_arr = mx.image.imread(img) img_arr = transform(img_arr, 256, 256, is_train, self.boxes[img]) return img_arr def sample_train_batch(self): """Sample a training batch (data and label).""" batch = [] labels = [] num_groups = self.batch_size // self.batch_k # For CUB200, we use the first 100 classes for training. sampled_classes = np.random.choice(100, num_groups, replace=False) for i in range(num_groups): img_fnames = np.random.choice(self.train_image_files[sampled_classes[i]], self.batch_k, replace=False) batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames] labels += [sampled_classes[i] for _ in range(self.batch_k)] return nd.concatenate(batch, axis=0), labels def get_test_batch(self): """Sample a testing batch (data and label).""" batch_size = self.batch_size batch = [self.get_image(self.test_image_files[(self.test_count*batch_size + i) % len(self.test_image_files)], is_train=False) for i in range(batch_size)] labels = [self.test_labels[(self.test_count*batch_size + i) % len(self.test_image_files)] for i in range(batch_size)] return nd.concatenate(batch, axis=0), labels def reset(self): """Reset an iterator.""" self.test_count = 0 def next(self): """Return a batch.""" if self.is_train: data, labels = self.sample_train_batch() else: if self.test_count * self.batch_size < len(self.test_image_files): data, labels = self.get_test_batch() self.test_count += 1 else: self.test_count = 0 raise StopIteration return mx.io.DataBatch(data=[data], label=[labels]) def cub200_iterator(data_path, batch_k, batch_size, data_shape): """Return training and testing iterator for the CUB200-2011 dataset.""" return (CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=True), CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=False))
apache-2.0
rogeriofalcone/treeio
core/migrations/0008_auto__add_field_attachment_filename.py
1
21062
# encoding: utf-8 # Copyright 2011 Tree.io Limited # This file is part of Treeio. # License www.tree.io/license # encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Attachment.filename' db.add_column('core_attachment', 'filename', self.gf('django.db.models.fields.CharField')(default='', max_length=64), keep_default=False) def backwards(self, orm): # Deleting field 'Attachment.filename' db.delete_column('core_attachment', 'filename') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'core.accessentity': { 'Meta': {'object_name': 'AccessEntity'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.attachment': { 'Meta': {'object_name': 'Attachment'}, 'attached_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'attached_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Object']", 'null': 'True', 'blank': 'True'}), 'attached_record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UpdateRecord']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}) }, 'core.comment': { 'Meta': {'object_name': 'Comment'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}), 'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}) }, 'core.configsetting': { 'Meta': {'object_name': 'ConfigSetting'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'core.group': { 'Meta': {'ordering': "['name']", 'object_name': 'Group', '_ormbases': ['core.AccessEntity']}, 'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}), 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"}) }, 'core.invitation': { 'Meta': {'object_name': 'Invitation'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}) }, 'core.location': { 'Meta': {'object_name': 'Location', '_ormbases': ['core.Object']}, 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Location']"}) }, 'core.module': { 'Meta': {'ordering': "['name']", 'object_name': 'Module', '_ormbases': ['core.Object']}, 'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'display': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'system': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '512'}) }, 'core.modulesetting': { 'Meta': {'object_name': 'ModuleSetting'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'module': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Module']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'perspective': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Perspective']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'core.object': { 'Meta': {'object_name': 'Object'}, 'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}), 'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}), 'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}), 'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}), 'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}), 'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'core.page': { 'Meta': {'ordering': "['name']", 'object_name': 'Page', '_ormbases': ['core.Object']}, 'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.PageFolder']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'core.pagefolder': { 'Meta': {'object_name': 'PageFolder', '_ormbases': ['core.Object']}, 'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}) }, 'core.perspective': { 'Meta': {'object_name': 'Perspective', '_ormbases': ['core.Object']}, 'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'modules': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Module']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}) }, 'core.revision': { 'Meta': {'object_name': 'Revision'}, 'change_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Object']"}), 'previous': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'next'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Revision']"}) }, 'core.revisionfield': { 'Meta': {'object_name': 'RevisionField'}, 'field': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'field_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Revision']"}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'value_key': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisionfield_key'", 'null': 'True', 'to': "orm['core.Object']"}), 'value_key_acc': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisionfield_key_acc'", 'null': 'True', 'to': "orm['core.AccessEntity']"}), 'value_m2m': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'revisionfield_m2m'", 'symmetrical': 'False', 'to': "orm['core.Object']"}), 'value_m2m_acc': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'revisionfield_m2m_acc'", 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}) }, 'core.tag': { 'Meta': {'ordering': "['name']", 'object_name': 'Tag'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}) }, 'core.updaterecord': { 'Meta': {'ordering': "['-date_created']", 'object_name': 'UpdateRecord'}, 'about': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Object']"}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_updates'", 'null': 'True', 'to': "orm['core.User']"}), 'body': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_on_updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}), 'format_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'format_strings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'updates_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}), 'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'received_updates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}), 'record_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_updates'", 'null': 'True', 'to': "orm['core.Object']"}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}) }, 'core.user': { 'Meta': {'ordering': "['name']", 'object_name': 'User', '_ormbases': ['core.AccessEntity']}, 'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}), 'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'core.widget': { 'Meta': {'ordering': "['weight']", 'object_name': 'Widget'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'perspective': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Perspective']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}), 'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'widget_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}) } } complete_apps = ['core']
mit
vlukes/sfepy
tests/test_normals.py
4
2742
from __future__ import absolute_import import numpy as nm from sfepy.base.testing import TestCommon expected_normals = { # Need to be normalized! '2_3' : nm.array([[ 0, -1], [ 1, 1], [-1, 0]], dtype=nm.float64), '2_4' : nm.array([[ 0, -1], [ 1, 0], [ 0, 1], [-1, 0]], dtype=nm.float64), '3_4' : nm.array([[ 0, 0, -1], [-1, 0, 0], [ 0, -1, 0], [ 1, 1, 1]], dtype=nm.float64), '3_8' : nm.array([[ 0, 0, -1], [-1, 0, 0], [ 0, -1, 0], [ 0, 0, 1], [ 1, 0, 0], [ 0, 1, 0]], dtype=nm.float64), } class Test(TestCommon): @staticmethod def from_conf(conf, options): return Test(conf=conf, options=options) def test_normals(self): """ Check orientations of surface normals on the reference elements. """ import sfepy from sfepy.discrete import Integral, PolySpace from sfepy.discrete.fem import Mesh, FEDomain from sfepy.discrete.fem.mappings import SurfaceMapping from sfepy.linalg import normalize_vectors ok = True for geom in ['2_3', '2_4', '3_4', '3_8']: mesh = Mesh.from_file('meshes/elements/%s_1.mesh' % geom, prefix_dir=sfepy.data_dir) domain = FEDomain('domain', mesh) surface = domain.create_region('Surface', 'vertices of surface', 'facet') domain.create_surface_group(surface) sd = domain.surface_groups[surface.name] coors = domain.get_mesh_coors() gel = domain.geom_els[geom].surface_facet ps = PolySpace.any_from_args('aux', gel, 1) mapping = SurfaceMapping(coors, sd.get_connectivity(), ps) integral = Integral('i', order=1) vals, weights = integral.get_qp(gel.name) # Evaluate just in the first quadrature point... geo = mapping.get_mapping(vals[:1], weights[:1]) expected = expected_normals[geom].copy() normalize_vectors(expected) _ok = nm.allclose(expected, geo.normal[:, 0, :, 0], rtol=0.0, atol=1e-14) self.report('%s: %s' % (geom, _ok)) if not _ok: self.report('expected:') self.report(expected) self.report('actual:') self.report(geo.normal[:, 0, :, 0]) ok = ok and _ok return ok
bsd-3-clause
goliveirab/odoo
addons/mail/mail_followers.py
168
12482
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## import threading from openerp.osv import osv, fields from openerp import tools, SUPERUSER_ID from openerp.tools.translate import _ from openerp.tools.mail import plaintext2html class mail_followers(osv.Model): """ mail_followers holds the data related to the follow mechanism inside OpenERP. Partners can choose to follow documents (records) of any kind that inherits from mail.thread. Following documents allow to receive notifications for new messages. A subscription is characterized by: :param: res_model: model of the followed objects :param: res_id: ID of resource (may be 0 for every objects) """ _name = 'mail.followers' _rec_name = 'partner_id' _log_access = False _description = 'Document Followers' _columns = { 'res_model': fields.char('Related Document Model', required=True, select=1, help='Model of the followed resource'), 'res_id': fields.integer('Related Document ID', select=1, help='Id of the followed resource'), 'partner_id': fields.many2one('res.partner', string='Related Partner', ondelete='cascade', required=True, select=1), 'subtype_ids': fields.many2many('mail.message.subtype', string='Subtype', help="Message subtypes followed, meaning subtypes that will be pushed onto the user's Wall."), } # # Modifying followers change access rights to individual documents. As the # cache may contain accessible/inaccessible data, one has to refresh it. # def create(self, cr, uid, vals, context=None): res = super(mail_followers, self).create(cr, uid, vals, context=context) self.invalidate_cache(cr, uid, context=context) return res def write(self, cr, uid, ids, vals, context=None): res = super(mail_followers, self).write(cr, uid, ids, vals, context=context) self.invalidate_cache(cr, uid, context=context) return res def unlink(self, cr, uid, ids, context=None): res = super(mail_followers, self).unlink(cr, uid, ids, context=context) self.invalidate_cache(cr, uid, context=context) return res _sql_constraints = [('mail_followers_res_partner_res_model_id_uniq','unique(res_model,res_id,partner_id)','Error, a partner cannot follow twice the same object.')] class mail_notification(osv.Model): """ Class holding notifications pushed to partners. Followers and partners added in 'contacts to notify' receive notifications. """ _name = 'mail.notification' _rec_name = 'partner_id' _log_access = False _description = 'Notifications' _columns = { 'partner_id': fields.many2one('res.partner', string='Contact', ondelete='cascade', required=True, select=1), 'is_read': fields.boolean('Read', select=1, oldname='read'), 'starred': fields.boolean('Starred', select=1, help='Starred message that goes into the todo mailbox'), 'message_id': fields.many2one('mail.message', string='Message', ondelete='cascade', required=True, select=1), } _defaults = { 'is_read': False, 'starred': False, } def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('mail_notification_partner_id_read_starred_message_id',)) if not cr.fetchone(): cr.execute('CREATE INDEX mail_notification_partner_id_read_starred_message_id ON mail_notification (partner_id, is_read, starred, message_id)') def get_partners_to_email(self, cr, uid, ids, message, context=None): """ Return the list of partners to notify, based on their preferences. :param browse_record message: mail.message to notify :param list partners_to_notify: optional list of partner ids restricting the notifications to process """ notify_pids = [] for notification in self.browse(cr, uid, ids, context=context): if notification.is_read: continue partner = notification.partner_id # Do not send to partners without email address defined if not partner.email: continue # Do not send to partners having same email address than the author (can cause loops or bounce effect due to messy database) if message.author_id and message.author_id.email == partner.email: continue # Partner does not want to receive any emails or is opt-out if partner.notify_email == 'none': continue notify_pids.append(partner.id) return notify_pids def get_signature_footer(self, cr, uid, user_id, res_model=None, res_id=None, context=None, user_signature=True): """ Format a standard footer for notification emails (such as pushed messages notification or invite emails). Format: <p>--<br /> Administrator </p> <div> <small>Sent from <a ...>Your Company</a> using <a ...>OpenERP</a>.</small> </div> """ footer = "" if not user_id: return footer # add user signature user = self.pool.get("res.users").browse(cr, SUPERUSER_ID, [user_id], context=context)[0] if user_signature: if user.signature: signature = user.signature else: signature = "--<br />%s" % user.name footer = tools.append_content_to_html(footer, signature, plaintext=False) # add company signature if user.company_id.website: website_url = ('http://%s' % user.company_id.website) if not user.company_id.website.lower().startswith(('http:', 'https:')) \ else user.company_id.website company = "<a style='color:inherit' href='%s'>%s</a>" % (website_url, user.company_id.name) else: company = user.company_id.name sent_by = _('Sent by %(company)s using %(odoo)s') signature_company = '<br /><small>%s</small>' % (sent_by % { 'company': company, 'odoo': "<a style='color:inherit' href='https://www.odoo.com/'>Odoo</a>" }) footer = tools.append_content_to_html(footer, signature_company, plaintext=False, container_tag='div') return footer def update_message_notification(self, cr, uid, ids, message_id, partner_ids, context=None): existing_pids = set() new_pids = set() new_notif_ids = [] for notification in self.browse(cr, uid, ids, context=context): existing_pids.add(notification.partner_id.id) # update existing notifications self.write(cr, uid, ids, {'is_read': False}, context=context) # create new notifications new_pids = set(partner_ids) - existing_pids for new_pid in new_pids: new_notif_ids.append(self.create(cr, uid, {'message_id': message_id, 'partner_id': new_pid, 'is_read': False}, context=context)) return new_notif_ids def _notify_email(self, cr, uid, ids, message_id, force_send=False, user_signature=True, context=None): message = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context) # compute partners email_pids = self.get_partners_to_email(cr, uid, ids, message, context=None) if not email_pids: return True # compute email body (signature, company data) body_html = message.body # add user signature except for mail groups, where users are usually adding their own signatures already user_id = message.author_id and message.author_id.user_ids and message.author_id.user_ids[0] and message.author_id.user_ids[0].id or None signature_company = self.get_signature_footer(cr, uid, user_id, res_model=message.model, res_id=message.res_id, context=context, user_signature=(user_signature and message.model != 'mail.group')) if signature_company: body_html = tools.append_content_to_html(body_html, signature_company, plaintext=False, container_tag='div') # compute email references references = message.parent_id.message_id if message.parent_id else False # custom values custom_values = dict() if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_email_values'): custom_values = self.pool[message.model].message_get_email_values(cr, uid, message.res_id, message, context=context) # create email values max_recipients = 50 chunks = [email_pids[x:x + max_recipients] for x in xrange(0, len(email_pids), max_recipients)] email_ids = [] for chunk in chunks: mail_values = { 'mail_message_id': message.id, 'auto_delete': (context or {}).get('mail_auto_delete', True), 'mail_server_id': (context or {}).get('mail_server_id', False), 'body_html': body_html, 'recipient_ids': [(4, id) for id in chunk], 'references': references, } mail_values.update(custom_values) email_ids.append(self.pool.get('mail.mail').create(cr, uid, mail_values, context=context)) # NOTE: # 1. for more than 50 followers, use the queue system # 2. do not send emails immediately if the registry is not loaded, # to prevent sending email during a simple update of the database # using the command-line. if force_send and len(chunks) < 2 and \ (not self.pool._init or getattr(threading.currentThread(), 'testing', False)): self.pool.get('mail.mail').send(cr, uid, email_ids, context=context) return True def _notify(self, cr, uid, message_id, partners_to_notify=None, context=None, force_send=False, user_signature=True): """ Send by email the notification depending on the user preferences :param list partners_to_notify: optional list of partner ids restricting the notifications to process :param bool force_send: if True, the generated mail.mail is immediately sent after being created, as if the scheduler was executed for this message only. :param bool user_signature: if True, the generated mail.mail body is the body of the related mail.message with the author's signature """ notif_ids = self.search(cr, SUPERUSER_ID, [('message_id', '=', message_id), ('partner_id', 'in', partners_to_notify)], context=context) # update or create notifications new_notif_ids = self.update_message_notification(cr, SUPERUSER_ID, notif_ids, message_id, partners_to_notify, context=context) # mail_notify_noemail (do not send email) or no partner_ids: do not send, return if context and context.get('mail_notify_noemail'): return True # browse as SUPERUSER_ID because of access to res_partner not necessarily allowed self._notify_email(cr, SUPERUSER_ID, new_notif_ids, message_id, force_send, user_signature, context=context)
agpl-3.0
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/datasets/statecrime/data.py
3
2985
#! /usr/bin/env python """Statewide Crime Data""" __docformat__ = 'restructuredtext' COPYRIGHT = """Public domain.""" TITLE = """Statewide Crime Data 2009""" SOURCE = """ All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below. """ DESCRSHORT = """State crime data 2009""" DESCRLONG = DESCRSHORT #suggested notes NOTE = """ Number of observations: 51 Number of variables: 8 Variable name definitions: state All 50 states plus DC. violent Rate of violent crimes / 100,000 population. Includes murder, forcible rape, robbery, and aggravated assault. Numbers for Illinois and Minnesota do not include forcible rapes. Footnote included with the American Statistical Abstract table reads: "The data collection methodology for the offense of forcible rape used by the Illinois and the Minnesota state Uniform Crime Reporting (UCR) Programs (with the exception of Rockford, Illinois, and Minneapolis and St. Paul, Minnesota) does not comply with national UCR guidelines. Consequently, their state figures for forcible rape and violent crime (of which forcible rape is a part) are not published in this table." murder Rate of murders / 100,000 population. hs_grad Precent of population having graduated from high school or higher. poverty % of individuals below the poverty line white Percent of population that is one race - white only. From 2009 American Community Survey single Calculated from 2009 1-year American Community Survey obtained obtained from Census. Variable is Male householder, no wife present, family household combined with Female household, no husband prsent, family household, divided by the total number of Family households. urban % of population in Urbanized Areas as of 2010 Census. Urbanized Areas are area of 50,000 or more people.""" import numpy as np from statsmodels.datasets import utils as du from os.path import dirname, abspath def load(): """ Load the statecrime data and return a Dataset class instance. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. """ data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5], dtype=float) def load_pandas(): data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5], dtype=float, index_idx=0) def _get_data(): filepath = dirname(abspath(__file__)) ##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv ##### data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'), delimiter=",", names=True, dtype=None) return data
apache-2.0
jimsimon/sky_engine
testing/legion/tools/legion.py
16
5772
#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A helper module to run Legion multi-machine tests. Example usage with 1 task machine: $ testing/legion/tools/legion.py run \ --controller-isolated out/Release/example_test_controller.isolated \ --dimension os Ubuntu-14.04 \ --task-name test-task-name \ --task task_machine out/Release/example_task_machine.isolated Example usage with 2 task machines with the same isolated file: $ testing/legion/tools/legion.py run \ --controller-isolated out/Release/example_test_controller.isolated \ --dimension os Ubuntu-14.04 \ --task-name test-task-name \ --task task_machine_1 out/Release/example_task_machine.isolated \ --task task_machine_2 out/Release/example_task_machine.isolated Example usage with 2 task machines with different isolated file: $ testing/legion/tools/legion.py run \ --controller-isolated out/Release/example_test_controller.isolated \ --dimension os Ubuntu-14.04 \ --task-name test-task-name \ --task task_machine_1 out/Release/example_task_machine_1.isolated \ --task task_machine_2 out/Release/example_task_machine_2.isolated """ import argparse import logging import os import subprocess import sys THIS_DIR = os.path.split(__file__)[0] SWARMING_DIR = os.path.join(THIS_DIR, '..', '..', '..', 'tools', 'swarming_client') ISOLATE_PY = os.path.join(SWARMING_DIR, 'isolate.py') SWARMING_PY = os.path.join(SWARMING_DIR, 'swarming.py') LOGGING_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR'] class Error(Exception): pass def GetArgs(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('action', choices=['run', 'trigger'], help='The swarming action to perform.') parser.add_argument('-f', '--format-only', action='store_true', help='If true the .isolated files are archived but ' 'swarming is not called, only the command line is built.') parser.add_argument('--controller-isolated', required=True, help='The isolated file for the test controller.') parser.add_argument('--isolate-server', help='Optional. The isolated server ' 'to use.') parser.add_argument('--swarming-server', help='Optional. The swarming server ' 'to use.') parser.add_argument('--task-name', help='Optional. The swarming task name ' 'to use.') parser.add_argument('--dimension', action='append', dest='dimensions', nargs=2, default=[], help='Dimensions to pass to ' 'swarming.py. This is in the form of --dimension key ' 'value. The minimum required is --dimension os <OS>') parser.add_argument('--task', action='append', dest='tasks', nargs=2, default=[], help='List of task names used in ' 'the test controller. This is in the form of --task name ' '.isolated and is passed to the controller as --name ' '<ISOLATED HASH>.') parser.add_argument('--controller-var', action='append', dest='controller_vars', nargs=2, default=[], help='Command line vars to pass to the controller. These ' 'are in the form of --controller-var name value and are ' 'passed to the controller as --name value.') parser.add_argument('-v', '--verbosity', default=0, action='count') return parser.parse_args() def RunCommand(cmd, stream_stdout=False): """Runs the command line and streams stdout if requested.""" kwargs = { 'args': cmd, 'stderr': subprocess.PIPE, } if not stream_stdout: kwargs['stdout'] = subprocess.PIPE p = subprocess.Popen(**kwargs) stdout, stderr = p.communicate() if p.returncode: raise Error(stderr) if not stream_stdout: logging.debug(stdout) return stdout def Archive(isolated, isolate_server=None): """Calls isolate.py archive with the given args.""" cmd = [ sys.executable, ISOLATE_PY, 'archive', '--isolated', isolated, ] if isolate_server: cmd.extend(['--isolate-server', isolate_server]) print ' '.join(cmd) return RunCommand(cmd).split()[0] # The isolated hash def GetSwarmingCommandLine(args): """Builds and returns the command line for swarming.py run|trigger.""" cmd = [ sys.executable, SWARMING_PY, args.action, args.controller_isolated, ] if args.isolate_server: cmd.extend(['--isolate-server', args.isolate_server]) if args.swarming_server: cmd.extend(['--swarming', args.swarming_server]) if args.task_name: cmd.extend(['--task-name', args.task_name]) # swarming.py dimensions for name, value in args.dimensions: cmd.extend(['--dimension', name, value]) cmd.append('--') # Specify the output dir cmd.extend(['--output-dir', '${ISOLATED_OUTDIR}']) # Task name/hash values for name, isolated in args.tasks: cmd.extend(['--' + name, Archive(isolated, args.isolate_server)]) # Test controller args for name, value in args.controller_vars: cmd.extend(['--' + name, value]) print ' '.join(cmd) return cmd def main(): args = GetArgs() logging.basicConfig( format='%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s', datefmt='%H:%M:%S', level=LOGGING_LEVELS[len(LOGGING_LEVELS)-args.verbosity-1]) cmd = GetSwarmingCommandLine(args) if not args.format_only: RunCommand(cmd, True) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
DPaaS-Raksha/horizon
openstack_dashboard/dashboards/project/loadbalancers/tabs.py
9
5153
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013, Big Switch Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from django.utils.translation import ugettext as _ from horizon import exceptions from horizon import tabs from horizon import tables from openstack_dashboard import api from .tables import PoolsTable, MembersTable, MonitorsTable class PoolsTab(tabs.TableTab): table_classes = (PoolsTable,) name = _("Pools") slug = "pools" template_name = "horizon/common/_detail_table.html" def get_poolstable_data(self): try: pools = api.lbaas.pools_get(self.tab_group.request) poolsFormatted = [p.readable(self.tab_group.request) for p in pools] except: poolsFormatted = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve pools list.')) return poolsFormatted class MembersTab(tabs.TableTab): table_classes = (MembersTable,) name = _("Members") slug = "members" template_name = "horizon/common/_detail_table.html" def get_memberstable_data(self): try: members = api.lbaas.members_get(self.tab_group.request) membersFormatted = [m.readable(self.tab_group.request) for m in members] except: membersFormatted = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve member list.')) return membersFormatted class MonitorsTab(tabs.TableTab): table_classes = (MonitorsTable,) name = _("Monitors") slug = "monitors" template_name = "horizon/common/_detail_table.html" def get_monitorstable_data(self): try: monitors = api.lbaas.pool_health_monitors_get( self.tab_group.request) except: monitors = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve monitor list.')) return monitors class LoadBalancerTabs(tabs.TabGroup): slug = "lbtabs" tabs = (PoolsTab, MembersTab, MonitorsTab) sticky = True class PoolDetailsTab(tabs.Tab): name = _("Pool Details") slug = "pooldetails" template_name = "project/loadbalancers/_pool_details.html" def get_context_data(self, request): pid = self.tab_group.kwargs['pool_id'] try: pool = api.lbaas.pool_get(request, pid) except: pool = [] exceptions.handle(request, _('Unable to retrieve pool details.')) return {'pool': pool} class VipDetailsTab(tabs.Tab): name = _("Vip Details") slug = "vipdetails" template_name = "project/loadbalancers/_vip_details.html" def get_context_data(self, request): vid = self.tab_group.kwargs['vip_id'] try: vip = api.lbaas.vip_get(request, vid) except: vip = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve vip details.')) return {'vip': vip} class MemberDetailsTab(tabs.Tab): name = _("Member Details") slug = "memberdetails" template_name = "project/loadbalancers/_member_details.html" def get_context_data(self, request): mid = self.tab_group.kwargs['member_id'] try: member = api.lbaas.member_get(request, mid) except: member = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve member details.')) return {'member': member} class MonitorDetailsTab(tabs.Tab): name = _("Monitor Details") slug = "monitordetails" template_name = "project/loadbalancers/_monitor_details.html" def get_context_data(self, request): mid = self.tab_group.kwargs['monitor_id'] try: monitor = api.lbaas.pool_health_monitor_get(request, mid) except: monitor = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve monitor details.')) return {'monitor': monitor} class PoolDetailsTabs(tabs.TabGroup): slug = "pooltabs" tabs = (PoolDetailsTab,) class VipDetailsTabs(tabs.TabGroup): slug = "viptabs" tabs = (VipDetailsTab,) class MemberDetailsTabs(tabs.TabGroup): slug = "membertabs" tabs = (MemberDetailsTab,) class MonitorDetailsTabs(tabs.TabGroup): slug = "monitortabs" tabs = (MonitorDetailsTab,)
apache-2.0
edevil/django
django/contrib/auth/middleware.py
172
5116
from django.contrib import auth from django.contrib.auth import load_backend from django.contrib.auth.backends import RemoteUserBackend from django.core.exceptions import ImproperlyConfigured from django.utils.functional import SimpleLazyObject def get_user(request): if not hasattr(request, '_cached_user'): request._cached_user = auth.get_user(request) return request._cached_user class AuthenticationMiddleware(object): def process_request(self, request): assert hasattr(request, 'session'), ( "The Django authentication middleware requires session middleware " "to be installed. Edit your MIDDLEWARE_CLASSES setting to insert " "'django.contrib.sessions.middleware.SessionMiddleware' before " "'django.contrib.auth.middleware.AuthenticationMiddleware'." ) request.user = SimpleLazyObject(lambda: get_user(request)) class SessionAuthenticationMiddleware(object): """ Formerly, a middleware for invalidating a user's sessions that don't correspond to the user's current session authentication hash. However, it caused the "Vary: Cookie" header on all responses. Now a backwards compatibility shim that enables session verification in auth.get_user() if this middleware is in MIDDLEWARE_CLASSES. """ def process_request(self, request): pass class RemoteUserMiddleware(object): """ Middleware for utilizing Web-server-provided authentication. If request.user is not authenticated, then this middleware attempts to authenticate the username passed in the ``REMOTE_USER`` request header. If authentication is successful, the user is automatically logged in to persist the user in the session. The header used is configurable and defaults to ``REMOTE_USER``. Subclass this class and change the ``header`` attribute if you need to use a different header. """ # Name of request header to grab username from. This will be the key as # used in the request.META dictionary, i.e. the normalization of headers to # all uppercase and the addition of "HTTP_" prefix apply. header = "REMOTE_USER" def process_request(self, request): # AuthenticationMiddleware is required so that request.user exists. if not hasattr(request, 'user'): raise ImproperlyConfigured( "The Django remote user auth middleware requires the" " authentication middleware to be installed. Edit your" " MIDDLEWARE_CLASSES setting to insert" " 'django.contrib.auth.middleware.AuthenticationMiddleware'" " before the RemoteUserMiddleware class.") try: username = request.META[self.header] except KeyError: # If specified header doesn't exist then remove any existing # authenticated remote-user, or return (leaving request.user set to # AnonymousUser by the AuthenticationMiddleware). if request.user.is_authenticated(): self._remove_invalid_user(request) return # If the user is already authenticated and that user is the user we are # getting passed in the headers, then the correct user is already # persisted in the session and we don't need to continue. if request.user.is_authenticated(): if request.user.get_username() == self.clean_username(username, request): return else: # An authenticated user is associated with the request, but # it does not match the authorized user in the header. self._remove_invalid_user(request) # We are seeing this user for the first time in this session, attempt # to authenticate the user. user = auth.authenticate(remote_user=username) if user: # User is valid. Set request.user and persist user in the session # by logging the user in. request.user = user auth.login(request, user) def clean_username(self, username, request): """ Allows the backend to clean the username, if the backend defines a clean_username method. """ backend_str = request.session[auth.BACKEND_SESSION_KEY] backend = auth.load_backend(backend_str) try: username = backend.clean_username(username) except AttributeError: # Backend has no clean_username method. pass return username def _remove_invalid_user(self, request): """ Removes the current authenticated user in the request which is invalid but only if the user is authenticated via the RemoteUserBackend. """ try: stored_backend = load_backend(request.session.get(auth.BACKEND_SESSION_KEY, '')) except ImportError: # backend failed to load auth.logout(request) else: if isinstance(stored_backend, RemoteUserBackend): auth.logout(request)
bsd-3-clause
m7thon/tom
python/tests/sequence_test.py
1
12007
import unittest import tom class PlainPySequence: """Simple list of symbols: [o_0, ..., o_{N-1}].""" def __init__(self, data): if type(data) is list: self.data = data else: self.data = list(range(data)) def __repr__(self): return str(self.data) def length(self): return len(self.data) def raw(self): return self def equals(self, tom_seq): if tom_seq.rawSize() != len(self.data): return False for idx in range(len(self.data)): if self.data[idx] != tom_seq.rawAt(idx): return False return True def o(self, idx): return self.data[idx] def u(self, idx): return 0 def sub(self, idx, length): return PlainPySequence(self.data[idx:idx + length:1 if length > 0 else -1]) rawSub = sub def slice(self, begin, end, forwards): if begin == tom.NoIndex: begin = None if end == tom.NoIndex: end = None return PlainPySequence(self.data[begin:end:1 if forwards else -1]) rawSlice = slice def reverse(self): return PlainPySequence(list(reversed(self.data))) def io(self, rev): if self.length() == 0: ret = IoPySequence([]) ret.reversed = rev return ret dat = self.data[:] if (not rev and dat[0] > 0) or (rev and dat[0] < 0): dat = [None] + dat if len(dat) % 2 != 0: dat = dat + [None] i = dat[::2] o = dat[1::2] if rev: i, o = o, i ret = IoPySequence(list(zip(i, o))) ret.reversed = rev return ret class IoPySequence: """ A list of io-pairs: [(u_0, o_0), ..., (u_{N-1}, o_{N-1})]. Note that for testing inputs will be negative and outputs positive!""" def __init__(self, data): if type(data) is list: self.data = data else: self.data = list(zip(range(-1, -(data + 1), -1), range(1, data + 1, 1))) self.reversed = False def __repr__(self): return str(self.data) def raw(self): if not self.reversed: return PlainPySequence([i for pair in self.data for i in pair if i is not None]) else: return PlainPySequence([i for pair in self.data for i in reversed(pair) if i is not None]) def length(self): return len(self.data) def equals(self, tom_seq): if not tom_seq.isIO(): return False if tom_seq.length() != len(self.data): return False if len(self.data) > 0 and tom_seq.isReversed() != self.reversed: return False return self.raw().equals(tom_seq) def o(self, idx): return self.data[idx][1] def u(self, idx): return self.data[idx][0] def reverse(self): rev = IoPySequence(list(reversed(self.data))) rev.reversed = not self.reversed return rev def sub(self, idx, length): ret = IoPySequence(self.data[idx:idx + length:1 if length > 0 else -1]) if length < 0: ret.reversed = not self.reversed else: ret.reversed = self.reversed return ret def slice(self, begin, end, forwards): if begin == tom.NoIndex: begin = None if end == tom.NoIndex: end = None ret = IoPySequence(self.data[begin:end:1 if forwards else -1]) if not forwards: ret.reversed = not self.reversed else: ret.reversed = self.reversed return ret def rawSub(self, idx, length): return self.raw().sub(idx, length).io((self.reversed) == (length >= 0)) def rawSlice(self, begin, end, forwards): return self.raw().slice(begin, end, forwards).io(self.reversed == forwards) class TestSequence(unittest.TestCase): def create_subs(self): pass def cases(self): size = 5 l = list(range(1, size + 1)) py_base = PlainPySequence(l) tom_base = tom.Sequence(l, 10, 0) for b in range(size): for e in range(b, size + 1): try: tom_seq = tom_base.rawSlice(b, e, True) py_seq = py_base.slice(b, e, True) except: self.assertTrue(False, "Error creating test cases by rawSlice by " + str(tom_base) + " [%d:%d:%d]" % (b, e, True)) self.assertTrue(py_seq.equals(tom_seq), "Error creating test cases by rawSlice for " + str(tom_seq) + " and " + str(py_seq)) yield (tom_seq, py_seq) py_seq = py_seq.reverse() tom_seq = tom_seq.reverse() self.assertTrue(py_seq.equals(tom_seq), "Error creating test cases by rawSlice for " + str(tom_seq) + " and " + str(py_seq)) yield (tom_seq, py_seq) i = list(range(-1, -size - 1, -1)) o = list(range(1, size + 1, 1)) l = list(zip(i, o)) x = [] for p in l: x.extend(p) py_base = IoPySequence(l) tom_base = tom.Sequence(x, 10, 1) for b in range(2 * size): for e in range(b, 2 * size + 1): tom_seq = tom_base.rawSlice(b, e, True) py_seq = py_base.rawSlice(b, e, True) self.assertTrue(py_seq.equals(tom_seq), "Error creating test cases by rawSlice for " + str(tom_seq) + " and " + str(py_seq)) yield (tom_seq, py_seq) py_seq = py_seq.reverse() tom_seq = tom_seq.reverse() self.assertTrue(py_seq.equals(tom_seq), "Error creating test cases by rawSlice for " + str(tom_seq) + " and " + str(py_seq)) yield (tom_seq, py_seq) def test_json_io(self): for tom_seq, py_seq in self.cases(): seq = tom.Sequence(tom_seq.toJSON()) self.assertTrue(seq == tom_seq, "to and from json gives non-equal sequence for" + str(tom_seq) + " and " + str(seq)) self.assertTrue(seq.nInputSymbols() == tom_seq.nInputSymbols(), "alphabet changed: " + str(tom_seq) + " and " + str(seq)) self.assertTrue(seq.nOutputSymbols() == tom_seq.nOutputSymbols(), "alphabet changed: " + str(tom_seq) + " and " + str(seq)) json = """{"Type":"Sequence","nU":1,"nO":10,"data":[-1,1,-2,2],"size":4}""" tom_seq = tom.Sequence(json) py_seq = IoPySequence([(-1, 1), (-2, 2)]) self.assertTrue(tom_seq.nInputSymbols() == 1 and tom_seq.nOutputSymbols() == 10 and py_seq.equals(tom_seq), "Error reading simple json-string") def test_copy(self): for tom_seq, py_seq in self.cases(): seq = tom_seq.copy() self.assertTrue(seq == tom_seq, ".copy() not equal:" + str(tom_seq) + " and " + str(seq)) self.assertTrue(seq.nInputSymbols() == tom_seq.nInputSymbols(), "alphabet changed: " + str(tom_seq) + " and " + str(seq)) self.assertTrue(seq.nOutputSymbols() == tom_seq.nOutputSymbols(), "alphabet changed: " + str(tom_seq) + " and " + str(seq)) def test_accessors(self): for tom_seq, py_seq in self.cases(): for idx in range(-py_seq.length(), py_seq.length()): try: self.assertTrue(tom_seq.o(idx) == py_seq.o(idx), ".o(%d) not correct: " % idx + str(tom_seq) + " and " + str(py_seq)) except: if py_seq.o(idx) is not None: self.assertTrue(False, ".o(%d) should be %d: " % (idx, py_seq.o(idx)) + str(tom_seq) + " and " + str(py_seq)) try: self.assertTrue(tom_seq.u(idx) == py_seq.u(idx), ".u(%d) not correct: " % idx + str(tom_seq) + " and " + str(py_seq)) except: if py_seq.u(idx) is not None: self.assertTrue(False, ".u(%d) should be %d: " % (idx, py_seq.u(idx)) + str(tom_seq) + " and " + str(py_seq)) for idx in range(-py_seq.raw().length(), py_seq.raw().length()): self.assertTrue(tom_seq.rawAt(idx) == py_seq.raw().o(idx), "Error with rawAt: " + str(tom_seq)) self.assertTrue(tom_seq.rawAt(idx) == tom_seq[idx], "Error with python []: " + str(tom_seq)) self.assertTrue(list(tom_seq) == py_seq.raw().data, "Error with python iterator access: " + str(tom_seq)) def test_rawSub(self): for tom_seq, py_seq in self.cases(): for idx in list(range(py_seq.raw().length())): for l in list(range(py_seq.raw().length()-idx)): self.assertTrue(py_seq.rawSub(idx, l).equals(tom_seq.rawSub(idx, l)), "Sub error: " + str(tom_seq) + " [%d:%d]" % (idx, l)) for l in list(range(-1, -idx-1, -1)): self.assertTrue(py_seq.rawSub(idx, l).equals(tom_seq.rawSub(idx, l)), "Sub error: " + str(tom_seq) + " [%d:%d]" % (idx, l)) def test_sub(self): for tom_seq, py_seq in self.cases(): for idx in list(range(py_seq.length())): for l in list(range(py_seq.length()-idx)): self.assertTrue(py_seq.sub(idx, l).equals(tom_seq.sub(idx, l)), "Sub error: " + str(tom_seq) + " [%d:%d]" % (idx, l)) for l in list(range(-1, -idx-1, -1)): self.assertTrue(py_seq.sub(idx, l).equals(tom_seq.sub(idx, l)), "Sub error: " + str(tom_seq) + " [%d:%d]" % (idx, l)) def test_rawSlice(self): for tom_seq, py_seq in self.cases(): for b in [tom.NoIndex] + list(range(py_seq.raw().length())): if b == tom.NoIndex: es = [tom.NoIndex] + list(range(py_seq.raw().length() + 1)) else: es = [tom.NoIndex] + list(range(b, py_seq.raw().length()+1)) for e in es: self.assertTrue(py_seq.rawSlice(b, e, True).equals(tom_seq.rawSlice(b, e, True)), "Slicing error: " + str(tom_seq) + " [%d:%d:%d]" % (b, e, True)) self.assertTrue(tom_seq.rawSlice(b, e, True) == tom_seq[None if b == tom.NoIndex else b:None if e == tom.NoIndex else e], "Python-slicing error: " + str(tom_seq) + " [%d:%d:%d]" % (b, e, True)) for b in [tom.NoIndex] + list(range(py_seq.raw().length())): if b == tom.NoIndex: es = [tom.NoIndex] + list(range(py_seq.raw().length())) else: es = [tom.NoIndex] + list(range(0, b+1)) for e in es: self.assertTrue(py_seq.rawSlice(b, e, False).equals(tom_seq.rawSlice(b, e, False)), "Slicing error: " + str(tom_seq) + " [%d:%d:%d]" % (b, e, False)) self.assertTrue(tom_seq.rawSlice(b, e, False) == tom_seq[None if b == tom.NoIndex else b:None if e == tom.NoIndex else e:-1], "Python-slicing error: " + str(tom_seq) + " [%d:%d:%d]" % (b, e, False)) def test_slice(self): for tom_seq, py_seq in self.cases(): for b in [tom.NoIndex] + list(range(py_seq.length())): if b == tom.NoIndex: es = [tom.NoIndex] + list(range(py_seq.length() + 1)) else: es = [tom.NoIndex] + list(range(b, py_seq.length()+1)) for e in es: self.assertTrue(py_seq.slice(b, e, True).equals(tom_seq.slice(b, e, True)), "Slicing error: " + str(tom_seq) + " [%d:%d:%d]" % (b, e, True)) for b in [tom.NoIndex] + list(range(py_seq.length())): if b == tom.NoIndex: es = [tom.NoIndex] + list(range(py_seq.length())) else: es = [tom.NoIndex] + list(range(0, b+1)) for e in es: self.assertTrue(py_seq.slice(b, e, False).equals(tom_seq.slice(b, e, False)), "Slicing error: " + str(tom_seq) + " [%d:%d:%d]" % (b, e, False))
mit
alabarga/SocialLearning
SocialLearning/apps/learningobjects/management/commands/describe.py
1
6166
#-*- coding: UTF-8 -*- from learningobjects.utils.search import * from learningobjects.utils.parsers import * from django.core.management.base import BaseCommand, CommandError from learningobjects.models import * from optparse import make_option from learningobjects.utils import feedfinder from learningobjects.management.commands import add from django.core.management import call_command import feedparser from learningobjects.utils.parsers import ReadibilityParser from learningobjects.utils.alchemyapi import AlchemyAPI from learningobjects.utils import slideshare from textblob import TextBlob # pip install -U textblob # python -m textblob.download_corpora import sys, traceback class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('-q','--query', dest='query', help='Palabra clave a buscar'), )+( make_option('-u','--url', dest='URL', help='URL del recurso'), ) def add_arguments(self, parser): # Positional arguments parser.add_argument('-u','--url', dest='URL', help='URL del recurso') #parser.add_argument('poll_id', nargs='+', type=int) # Named (optional) arguments parser.add_argument('--verbose', action='store_true', dest='verbose', default=False, help='Log actions to console') def handle(self, *args, **options): results=[] alchemyapi = AlchemyAPI() if options['URL'] == None: inp=raw_input("This will describe EVERY Resource with ´Added´ status on the database. Are you sure(y/n)?:") inp=inp.lower() if inp=="y" or inp=="yes": topics = Topic.objects.all() #resources=Resource.objects.filter(status=Resource.ADDED) col = Collection.objects.get(pk=23) resources = col.resources.all() slides = slideshare.SlideshareAPI() for res in resources: try: url = res.url res.identifier = hashlib.md5(url).hexdigest() u = URLObject(url) print "%s (%s)" % (u.url, u.content_type) if 'application/pdf' in u.content_type: pd = PDFParser(url).describe() res.fulltext = pd.fulltext res.content_type = 'PDF' # slideshare elif bool(re.match('^(http(s|):\/\/|)(www.|)slideshare.net',u.url)): s = slides.get_slideshow(slideshow_url=u.url) res.title = slide['Slideshow']['Title'] res.description = slide['Slideshow']['Description'] res.author = slide['Slideshow']['Username'] res.fulltext = slide['Slideshow']['Embed'] res.interest = int(slide['Slideshow']['NumViews']) + int(slide['Slideshow']['NumFavorites']) + int(slide['Slideshow']['NumDownloads']) rc_url = 'https://www.slideshare.net/' + slide['Slideshow']['Username'] + '/presentations' rc_rss = 'http://es.slideshare.net/rss/user/' + slide['Slideshow']['Username'] rc, created = ResourceContainer.objects.get_or_create(url=rc_url, rss=rc_rss, name=slide['Slideshow']['Username'] ) rc.resources.add(res) # youtube elif bool(re.match('^(http(s|):\/\/|)(www.|)youtube.com',u.url)): yt_desc = YoutubeParser(url).describe() res.title = yt_desc.title res.description = yt_desc.description res.interest = yt_desc.viewcount res.content_type = 'VIDEO' res.author = yt_desc.username rc_url = 'https://www.youtube.com/user/' + yt_desc.username rc_rss = 'http://gdata.youtube.com/feeds/api/users/' + yt_desc.username + '/uploads' rc = ResourceContainer.objects.get_or_create(url=rc_url, rss=rc_rss, name=yt_desc.username ) rc.resources.add(res) elif 'text/html' in u.content_type: rp_desc = ReadibilityParser(url).describe() gp_desc = GooseParser(url).describe() sm_desc = SummaryParser(url).describe() res.title = rp_desc.title res.description = sm_desc.summary res.fulltext = gp_desc.text np = TextBlob(gp_desc.text) res.language = np.detect_language() res.author = rp_desc.author res.content_type = 'WEB' else: continue #for t in topics: # rel, created = Relevance.objects.get_or_create(resource=res, topic=t) # rel.score = random.random() res.status=Resource.DESCRIBED res.save() except: traceback.print_exc(file=sys.stdout) else: url=options['URL'] resource=Resource.objects.filter(url=url,status=Resource.ADDED) if len(resource)>0: data = ReadibilityParser.describe(url) resource.update(status=Resource.DESCRIBED) else: print "That link is not in the database or is not with ´Added´ status. Add it first (python manage.py add -u "+url+")"
gpl-3.0
ClearwaterCore/gmock-upstream
gtest/xcode/Scripts/versiongenerate.py
3088
4536
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A script to prepare version informtion for use the gtest Info.plist file. This script extracts the version information from the configure.ac file and uses it to generate a header file containing the same information. The #defines in this header file will be included in during the generation of the Info.plist of the framework, giving the correct value to the version shown in the Finder. This script makes the following assumptions (these are faults of the script, not problems with the Autoconf): 1. The AC_INIT macro will be contained within the first 1024 characters of configure.ac 2. The version string will be 3 integers separated by periods and will be surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first segment represents the major version, the second represents the minor version and the third represents the fix version. 3. No ")" character exists between the opening "(" and closing ")" of AC_INIT, including in comments and character strings. """ import sys import re # Read the command line argument (the output directory for Version.h) if (len(sys.argv) < 3): print "Usage: versiongenerate.py input_dir output_dir" sys.exit(1) else: input_dir = sys.argv[1] output_dir = sys.argv[2] # Read the first 1024 characters of the configure.ac file config_file = open("%s/configure.ac" % input_dir, 'r') buffer_size = 1024 opening_string = config_file.read(buffer_size) config_file.close() # Extract the version string from the AC_INIT macro # The following init_expression means: # Extract three integers separated by periods and surrounded by squre # brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy # (*? is the non-greedy flag) since that would pull in everything between # the first "(" and the last ")" in the file. version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)", re.DOTALL) version_values = version_expression.search(opening_string) major_version = version_values.group(1) minor_version = version_values.group(2) fix_version = version_values.group(3) # Write the version information to a header file to be included in the # Info.plist file. file_data = """// // DO NOT MODIFY THIS FILE (but you can delete it) // // This file is autogenerated by the versiongenerate.py script. This script // is executed in a "Run Script" build phase when creating gtest.framework. This // header file is not used during compilation of C-source. Rather, it simply // defines some version strings for substitution in the Info.plist. Because of // this, we are not not restricted to C-syntax nor are we using include guards. // #define GTEST_VERSIONINFO_SHORT %s.%s #define GTEST_VERSIONINFO_LONG %s.%s.%s """ % (major_version, minor_version, major_version, minor_version, fix_version) version_file = open("%s/Version.h" % output_dir, 'w') version_file.write(file_data) version_file.close()
bsd-3-clause
SoteriousIdaofevil/xmlstar
mingw/libxml2-2.9.1/python/tests/xpathleak.py
30
1563
#!/usr/bin/python import sys, libxml2 libxml2.debugMemory(True) expect="""--> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed --> Invalid expression --> xmlXPathEval: evaluation failed """ err="" def callback(ctx, str): global err err = err + "%s %s" % (ctx, str) libxml2.registerErrorHandler(callback, "-->") doc = libxml2.parseDoc("<fish/>") ctxt = doc.xpathNewContext() ctxt.setContextNode(doc) badexprs = ( ":false()", "bad:()", "bad(:)", ":bad(:)", "bad:(:)", "bad:bad(:)", "a:/b", "/c:/d", "//e:/f", "g://h" ) for expr in badexprs: try: ctxt.xpathEval(expr) except libxml2.xpathError: pass else: print("Unexpectedly legal expression:", expr) ctxt.xpathFreeContext() doc.freeDoc() if err != expect: print("error") print("received %s" %(err)) print("expected %s" %(expect)) sys.exit(1) libxml2.cleanupParser() leakedbytes = libxml2.debugMemory(True) if leakedbytes == 0: print("OK") else: print("Memory leak", leakedbytes, "bytes") # drop file to .memdump file in cwd, but won't work if not compiled in libxml2.dumpMemory()
mit
ModdedPA/android_external_chromium_org
third_party/closure_linter/closure_linter/error_fixer.py
135
17305
#!/usr/bin/env python # # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main class responsible for automatically fixing simple style violations.""" __author__ = '[email protected] (Robert Walker)' import re import gflags as flags from closure_linter import errors from closure_linter import javascriptstatetracker from closure_linter import javascripttokens from closure_linter import requireprovidesorter from closure_linter import tokenutil from closure_linter.common import errorhandler # Shorthand Token = javascripttokens.JavaScriptToken Type = javascripttokens.JavaScriptTokenType END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$') # Regex to represent common mistake inverting author name and email as # @author User Name (user@company) INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)' '(?P<name>[^(]+)' '(?P<whitespace_after_name>\s+)' '\(' '(?P<email>[^\s]+@[^)\s]+)' '\)' '(?P<trailing_characters>.*)') FLAGS = flags.FLAGS flags.DEFINE_boolean('disable_indentation_fixing', False, 'Whether to disable automatic fixing of indentation.') class ErrorFixer(errorhandler.ErrorHandler): """Object that fixes simple style errors.""" def __init__(self, external_file=None): """Initialize the error fixer. Args: external_file: If included, all output will be directed to this file instead of overwriting the files the errors are found in. """ errorhandler.ErrorHandler.__init__(self) self._file_name = None self._file_token = None self._external_file = external_file def HandleFile(self, filename, first_token): """Notifies this ErrorPrinter that subsequent errors are in filename. Args: filename: The name of the file about to be checked. first_token: The first token in the file. """ self._file_name = filename self._file_token = first_token self._file_fix_count = 0 self._file_changed_lines = set() def _AddFix(self, tokens): """Adds the fix to the internal count. Args: tokens: The token or sequence of tokens changed to fix an error. """ self._file_fix_count += 1 if hasattr(tokens, 'line_number'): self._file_changed_lines.add(tokens.line_number) else: for token in tokens: self._file_changed_lines.add(token.line_number) def HandleError(self, error): """Attempts to fix the error. Args: error: The error object """ code = error.code token = error.token if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL: iterator = token.attached_object.type_start_token if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(): iterator = iterator.next leading_space = len(iterator.string) - len(iterator.string.lstrip()) iterator.string = '%s?%s' % (' ' * leading_space, iterator.string.lstrip()) # Cover the no outer brace case where the end token is part of the type. while iterator and iterator != token.attached_object.type_end_token.next: iterator.string = iterator.string.replace( 'null|', '').replace('|null', '') iterator = iterator.next # Create a new flag object with updated type info. token.attached_object = javascriptstatetracker.JsDocFlag(token) self._AddFix(token) elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE: iterator = token.attached_object.type_end_token if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace(): iterator = iterator.previous ending_space = len(iterator.string) - len(iterator.string.rstrip()) iterator.string = '%s=%s' % (iterator.string.rstrip(), ' ' * ending_space) # Create a new flag object with updated type info. token.attached_object = javascriptstatetracker.JsDocFlag(token) self._AddFix(token) elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION, errors.MISSING_SEMICOLON): semicolon_token = Token(';', Type.SEMICOLON, token.line, token.line_number) tokenutil.InsertTokenAfter(semicolon_token, token) token.metadata.is_implied_semicolon = False semicolon_token.metadata.is_implied_semicolon = False self._AddFix(token) elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, errors.REDUNDANT_SEMICOLON, errors.COMMA_AT_END_OF_LITERAL): tokenutil.DeleteToken(token) self._AddFix(token) elif code == errors.INVALID_JSDOC_TAG: if token.string == '@returns': token.string = '@return' self._AddFix(token) elif code == errors.FILE_MISSING_NEWLINE: # This error is fixed implicitly by the way we restore the file self._AddFix(token) elif code == errors.MISSING_SPACE: if error.position: if error.position.IsAtBeginning(): tokenutil.InsertSpaceTokenAfter(token.previous) elif error.position.IsAtEnd(token.string): tokenutil.InsertSpaceTokenAfter(token) else: token.string = error.position.Set(token.string, ' ') self._AddFix(token) elif code == errors.EXTRA_SPACE: if error.position: token.string = error.position.Set(token.string, '') self._AddFix(token) elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER: token.string = error.position.Set(token.string, '.') self._AddFix(token) elif code == errors.MISSING_LINE: if error.position.IsAtBeginning(): tokenutil.InsertBlankLineAfter(token.previous) else: tokenutil.InsertBlankLineAfter(token) self._AddFix(token) elif code == errors.EXTRA_LINE: tokenutil.DeleteToken(token) self._AddFix(token) elif code == errors.WRONG_BLANK_LINE_COUNT: if not token.previous: # TODO(user): Add an insertBefore method to tokenutil. return num_lines = error.fix_data should_delete = False if num_lines < 0: num_lines *= -1 should_delete = True for i in xrange(1, num_lines + 1): if should_delete: # TODO(user): DeleteToken should update line numbers. tokenutil.DeleteToken(token.previous) else: tokenutil.InsertBlankLineAfter(token.previous) self._AddFix(token) elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING: end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END) if end_quote: single_quote_start = Token( "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number) single_quote_end = Token( "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line, token.line_number) tokenutil.InsertTokenAfter(single_quote_start, token) tokenutil.InsertTokenAfter(single_quote_end, end_quote) tokenutil.DeleteToken(token) tokenutil.DeleteToken(end_quote) self._AddFix([token, end_quote]) elif code == errors.MISSING_BRACES_AROUND_TYPE: fixed_tokens = [] start_token = token.attached_object.type_start_token if start_token.type != Type.DOC_START_BRACE: leading_space = ( len(start_token.string) - len(start_token.string.lstrip())) if leading_space: start_token = tokenutil.SplitToken(start_token, leading_space) # Fix case where start and end token were the same. if token.attached_object.type_end_token == start_token.previous: token.attached_object.type_end_token = start_token new_token = Token('{', Type.DOC_START_BRACE, start_token.line, start_token.line_number) tokenutil.InsertTokenAfter(new_token, start_token.previous) token.attached_object.type_start_token = new_token fixed_tokens.append(new_token) end_token = token.attached_object.type_end_token if end_token.type != Type.DOC_END_BRACE: # If the start token was a brace, the end token will be a # FLAG_ENDING_TYPE token, if there wasn't a starting brace then # the end token is the last token of the actual type. last_type = end_token if not fixed_tokens: last_type = end_token.previous while last_type.string.isspace(): last_type = last_type.previous # If there was no starting brace then a lone end brace wouldn't have # been type end token. Now that we've added any missing start brace, # see if the last effective type token was an end brace. if last_type.type != Type.DOC_END_BRACE: trailing_space = (len(last_type.string) - len(last_type.string.rstrip())) if trailing_space: tokenutil.SplitToken(last_type, len(last_type.string) - trailing_space) new_token = Token('}', Type.DOC_END_BRACE, last_type.line, last_type.line_number) tokenutil.InsertTokenAfter(new_token, last_type) token.attached_object.type_end_token = new_token fixed_tokens.append(new_token) self._AddFix(fixed_tokens) elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED: require_start_token = error.fix_data sorter = requireprovidesorter.RequireProvideSorter() sorter.FixRequires(require_start_token) self._AddFix(require_start_token) elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED: provide_start_token = error.fix_data sorter = requireprovidesorter.RequireProvideSorter() sorter.FixProvides(provide_start_token) self._AddFix(provide_start_token) elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC: if token.previous.string == '{' and token.next.string == '}': tokenutil.DeleteToken(token.previous) tokenutil.DeleteToken(token.next) self._AddFix([token]) elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION: match = INVERTED_AUTHOR_SPEC.match(token.string) if match: token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'), match.group('email'), match.group('whitespace_after_name'), match.group('name'), match.group('trailing_characters')) self._AddFix(token) elif (code == errors.WRONG_INDENTATION and not FLAGS.disable_indentation_fixing): token = tokenutil.GetFirstTokenInSameLine(token) actual = error.position.start expected = error.position.length if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0: token.string = token.string.lstrip() + (' ' * expected) self._AddFix([token]) else: # We need to add indentation. new_token = Token(' ' * expected, Type.WHITESPACE, token.line, token.line_number) # Note that we'll never need to add indentation at the first line, # since it will always not be indented. Therefore it's safe to assume # token.previous exists. tokenutil.InsertTokenAfter(new_token, token.previous) self._AddFix([token]) elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT, errors.MISSING_END_OF_SCOPE_COMMENT]: # Only fix cases where }); is found with no trailing content on the line # other than a comment. Value of 'token' is set to } for this error. if (token.type == Type.END_BLOCK and token.next.type == Type.END_PAREN and token.next.next.type == Type.SEMICOLON): current_token = token.next.next.next removed_tokens = [] while current_token and current_token.line_number == token.line_number: if current_token.IsAnyType(Type.WHITESPACE, Type.START_SINGLE_LINE_COMMENT, Type.COMMENT): removed_tokens.append(current_token) current_token = current_token.next else: return if removed_tokens: tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens)) whitespace_token = Token(' ', Type.WHITESPACE, token.line, token.line_number) start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT, token.line, token.line_number) comment_token = Token(' goog.scope', Type.COMMENT, token.line, token.line_number) insertion_tokens = [whitespace_token, start_comment_token, comment_token] tokenutil.InsertTokensAfter(insertion_tokens, token.next.next) self._AddFix(removed_tokens + insertion_tokens) elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]: tokens_in_line = tokenutil.GetAllTokensInSameLine(token) tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line)) self._AddFix(tokens_in_line) elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]: is_provide = code == errors.MISSING_GOOG_PROVIDE is_require = code == errors.MISSING_GOOG_REQUIRE missing_namespaces = error.fix_data[0] need_blank_line = error.fix_data[1] if need_blank_line is None: # TODO(user): This happens when there are no existing # goog.provide or goog.require statements to position new statements # relative to. Consider handling this case with a heuristic. return insert_location = token.previous # If inserting a missing require with no existing requires, insert a # blank line first. if need_blank_line and is_require: tokenutil.InsertBlankLineAfter(insert_location) insert_location = insert_location.next for missing_namespace in missing_namespaces: new_tokens = self._GetNewRequireOrProvideTokens( is_provide, missing_namespace, insert_location.line_number + 1) tokenutil.InsertLineAfter(insert_location, new_tokens) insert_location = new_tokens[-1] self._AddFix(new_tokens) # If inserting a missing provide with no existing provides, insert a # blank line after. if need_blank_line and is_provide: tokenutil.InsertBlankLineAfter(insert_location) def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number): """Returns a list of tokens to create a goog.require/provide statement. Args: is_provide: True if getting tokens for a provide, False for require. namespace: The required or provided namespaces to get tokens for. line_number: The line number the new require or provide statement will be on. Returns: Tokens to create a new goog.require or goog.provide statement. """ string = 'goog.require' if is_provide: string = 'goog.provide' line_text = string + '(\'' + namespace + '\');\n' return [ Token(string, Type.IDENTIFIER, line_text, line_number), Token('(', Type.START_PAREN, line_text, line_number), Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number), Token(namespace, Type.STRING_TEXT, line_text, line_number), Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number), Token(')', Type.END_PAREN, line_text, line_number), Token(';', Type.SEMICOLON, line_text, line_number) ] def FinishFile(self): """Called when the current file has finished style checking. Used to go back and fix any errors in the file. """ if self._file_fix_count: f = self._external_file if not f: print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name) f = open(self._file_name, 'w') token = self._file_token char_count = 0 while token: f.write(token.string) char_count += len(token.string) if token.IsLastInLine(): f.write('\n') if char_count > 80 and token.line_number in self._file_changed_lines: print 'WARNING: Line %d of %s is now longer than 80 characters.' % ( token.line_number, self._file_name) char_count = 0 token = token.next if not self._external_file: # Close the file if we created it f.close()
bsd-3-clause
OpenTechFund/WebApp
addressfield/fields.py
1
1809
import json from os import path from django import forms from django.core.exceptions import ValidationError from .widgets import AddressWidget basepath = path.dirname(__file__) filepath = path.abspath(path.join(basepath, "static", "addressfield.min.json")) with open(filepath, encoding='utf8') as address_data: countries = json.load(address_data)['options'] VALIDATION_DATA = {country['iso']: country for country in countries} def flatten_data(data): flattened = dict() for d in data: for k, v in d.items(): if isinstance(v, list): value = flatten_data(v) else: value = {k: v} flattened.update(value) return flattened class AddressField(forms.CharField): """ The field stores the address in a flattened form, so the locality components are on the same level as country or premise """ widget = AddressWidget data = VALIDATION_DATA def clean(self, value, **kwargs): country = value['country'] try: country_data = self.data[country] except KeyError: raise ValidationError('Invalid country selected') fields = flatten_data(country_data['fields']) missing_fields = set(country_data['required']) - set(field for field, value in value.items() if value) if missing_fields: missing_field_name = [fields[field]['label'] for field in missing_fields] raise ValidationError('Please provide data for: {}'.format(', '.join(missing_field_name))) return super().clean(value, **kwargs) def to_python(self, value): return json.dumps(value) def prepare_value(self, value): try: return json.loads(value) except TypeError: return value
gpl-2.0
40023256/W17test
wsgi.py
1
27073
# coding=utf-8 # 上面的程式內容編碼必須在程式的第一或者第二行才會有作用 ################# (1) 模組導入區 # 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝 import cherrypy # 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝 import os # 導入 random 模組 import random import math from cherrypy.lib.static import serve_file # 導入 gear 模組 #import gear import man import man2 ################# (2) 廣域變數設定區 # 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線 _curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # 設定在雲端與近端的資料儲存目錄 if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示程式在雲端執行 download_root_dir = os.environ['OPENSHIFT_DATA_DIR'] data_dir = os.environ['OPENSHIFT_DATA_DIR'] else: # 表示程式在近端執行 download_root_dir = _curdir + "/local_data/" data_dir = _curdir + "/local_data/" def downloadlist_access_list(files, starti, endi): # different extension files, associated links were provided # popup window to view images, video or STL files, other files can be downloaded directly # files are all the data to list, from starti to endi # add file size outstring = "" for index in range(int(starti)-1, int(endi)): fileName, fileExtension = os.path.splitext(files[index]) fileExtension = fileExtension.lower() fileSize = sizeof_fmt(os.path.getsize(download_root_dir+"downloads/"+files[index])) # images files if fileExtension == ".png" or fileExtension == ".jpg" or fileExtension == ".gif": outstring += '<input type="checkbox" name="filename" value="'+files[index]+'"><a href="javascript:;" onClick="window.open(\'/downloads/'+ \ files[index]+'\',\'images\', \'catalogmode\',\'scrollbars\')">'+files[index]+'</a> ('+str(fileSize)+')<br />' # stl files elif fileExtension == ".stl": outstring += '<input type="checkbox" name="filename" value="'+files[index]+'"><a href="javascript:;" onClick="window.open(\'/static/viewstl.html?src=/downloads/'+ \ files[index]+'\',\'images\', \'catalogmode\',\'scrollbars\')">'+files[index]+'</a> ('+str(fileSize)+')<br />' # flv files elif fileExtension == ".flv": outstring += '<input type="checkbox" name="filename" value="'+files[index]+'"><a href="javascript:;" onClick="window.open(\'/flvplayer?filepath=/downloads/'+ \ files[index]+'\',\'images\', \'catalogmode\',\'scrollbars\')">'+files[index]+'</a> ('+str(fileSize)+')<br />' # direct download files else: outstring += "<input type='checkbox' name='filename' value='"+files[index]+"'><a href='/download/?filepath="+download_root_dir.replace('\\', '/')+ \ "downloads/"+files[index]+"'>"+files[index]+"</a> ("+str(fileSize)+")<br />" return outstring def sizeof_fmt(num): for x in ['bytes','KB','MB','GB']: if num < 1024.0: return "%3.1f%s" % (num, x) num /= 1024.0 return "%3.1f%s" % (num, 'TB') ################# (3) 程式類別定義區 # 以下改用 CherryPy 網際框架程式架構 # 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計 class Midterm(object): # Midterm 類別的啟動設定 _cp_config = { 'tools.encode.encoding': 'utf-8', 'tools.sessions.on' : True, 'tools.sessions.storage_type' : 'file', #'tools.sessions.locking' : 'explicit', # session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄 'tools.sessions.storage_path' : data_dir+'/tmp', # session 有效時間設為 60 分鐘 'tools.sessions.timeout' : 60 } def __init__(self): # hope to create downloads and images directories  if not os.path.isdir(download_root_dir+"downloads"): try: os.makedirs(download_root_dir+"downloads") except: print("mkdir error") if not os.path.isdir(download_root_dir+"images"): try: os.makedirs(download_root_dir+"images") except: print("mkdir error") if not os.path.isdir(download_root_dir+"tmp"): try: os.makedirs(download_root_dir+"tmp") except: print("mkdir error") # 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行 @cherrypy.expose # index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法 # 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容 def index(self): outstring = ''' <!DOCTYPE html> <html> <head> </head> <body> <a href="a_40023256">a_40023256</a><br /> <a href="drawspur1">drawspur1</a><br /> <a href="drawspuraction1">drawspuraction1</a><br /> </body> </html> ''' return outstring @cherrypy.expose def a_40023256(self): outstring = ''' <!DOCTYPE html> <html> <head> 40023256 <head> <body> <br /><a href="index">index</a><br /> </body> </html> ''' return outstring @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def spur1(self, N1=15, N2=24, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <form method=POST action=spuraction1> 齒數1:<input type=text name=N1 value='''+str(N1)+'''><br /> 齒數2:<input type=text name=N2 value='''+str(N2)+'''><br /> 模數:<input type=text name=M value = '''+str(M)+'''><br /> 壓力角:<input type=text name=P value = '''+str(P)+'''><br /> <input type=submit value=send> </form> <br /><a href="index">index</a><br /> </body> </html> ''' return outstring @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def spuraction1(self, N1=15, N2=24, M=5, P=15): output = ''' <!doctype html><html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <title>2015CD Midterm</title> </head> <body> ''' output += "齒數1為"+str(N1)+"<br />" output += "齒數2為"+str(N2)+"<br />" output += "模數為"+str(M)+"<br />" output += "壓力角為"+str(P)+"<br />" output +='''<br /><a href="/spur1">spur1</a>(按下後再輸入)<br />''' output +='''<br /><a href="index">index</a><br /> </body> </html> ''' return output @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def drawspur1(self, N1=15, N2=24, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> </head> <body> <form method=POST action=drawspuraction1> 齒數1:<input type=text name=N1 value='''+str(N1)+'''><br /> 齒數2:<input type=text name=N2 value='''+str(N2)+'''><br /> <input type=submit value=畫出正齒輪輪廓><br /> (範圍:15~80) </form> <br /><a href="index">index</a><br /> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script> window.onload=function(){ brython(); } </script> </body> </html> ''' return outstring @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def drawspuraction1(self, N1=15, N2=24, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <a href="index">index</a><br /> <!-- 以下為 canvas 畫圖程式 --> <script type="text/python"> # 從 browser 導入 document from browser import document from math import * # 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案 import spur # 準備在 id="plotarea" 的 canvas 中繪圖 canvas = document["plotarea"] ctx = canvas.getContext("2d") # 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖 # 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組 # midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色 # Gear(midx, midy, rp, n=20, pa=20, color="black"): # 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角 # 壓力角 P 單位為角度 M = '''+str(M)+''' # 壓力角 pa 單位為角度 pa = '''+str(P)+''' # 齒輪齒數 n_g1 = '''+str(N1)+''' n_g2 = '''+str(N2)+''' # 計算兩齒輪的節圓半徑 rp_g1 = M*n_g1/2 rp_g2 = M*n_g2/2 # 繪圖第1齒輪的圓心座標 x_g1 = 400 y_g1 = 400 # 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同 x_g2 = x_g1 y_g2 = y_g1+ rp_g1 + rp_g2 # 將第1齒輪順時鐘轉 90 度 # 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖 ctx.save() # translate to the origin of second gear ctx.translate(x_g1, y_g1) # rotate to engage ctx.rotate(pi) # put it back ctx.translate(-x_g1, -y_g1) spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue") ctx.restore() # 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合 ctx.save() # translate to the origin of second gear ctx.translate(x_g2, y_g2) # rotate to engage ctx.rotate(-pi/n_g2) # put it back ctx.translate(-x_g2, -y_g2) spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "green") ctx.restore() </script> <canvas id="plotarea" width="3000" height="3000"></canvas> </body> </html> ''' return outstring @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def spur(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.3-20150514-095342/brython.js"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <form method=POST action=spuraction> 齒數:<input type=text name=N value='''+str(N)+'''><br /> 模數:<input type=text name=M value = '''+str(M)+'''><br /> 壓力角:<input type=text name=P value = '''+str(P)+'''><br /> <input type=submit value=send> </form> <br /><a href="index">index</a><br /> </body> </html> ''' return outstring @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def spuraction(self, N=20, M=5, P=15): output = ''' <!doctype html><html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <title>2015CD Midterm</title> </head> <body> ''' output += "齒數為"+str(N)+"<br />" output += "模數為"+str(M)+"<br />" output += "壓力角為"+str(P)+"<br />" output +='''<br /><a href="/spur">spur</a>(按下後再輸入)<br />''' output +='''<br /><a href="index">index</a><br /> </body> </html> ''' return output @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def drawspur(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> </head> <body> <form method=POST action=drawspuraction> 齒數:<input type=text name=N value='''+str(N)+'''><br /> 模數:<input type=text name=M value = '''+str(M)+'''><br /> 壓力角:<input type=text name=P value = '''+str(P)+'''><br /> <input type=submit value=畫出正齒輪輪廓> </form> <br /><a href="index">index</a><br /> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.3-20150514-095342/brython.js"></script> <script> window.onload=function(){ brython(); } </script> </body> </html> ''' return outstring @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def drawspuraction(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> </head> <body> <a href="index">index</a><br /> <!-- 以下為 canvas 畫圖程式 --> <script type="text/python"> # 從 browser 導入 document from browser import document from math import * # 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案 import spur # 準備在 id="plotarea" 的 canvas 中繪圖 canvas = document["plotarea"] ctx = canvas.getContext("2d") # 以下利用 spur.py 程式進行繪圖 # N 為齒數 N = '''+str(N)+''' # M 為模數 M = '''+str(M)+''' # 壓力角 P 單位為角度 P = '''+str(P)+''' # 計算兩齒輪的節圓半徑 rp = N*M/2 spur.Spur(ctx).Gear(600, 600, rp, N, P, "blue") </script> <canvas id="plotarea" width="1200" height="1200"></canvas> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.3-20150514-095342/brython.js"></script> <script> window.onload=function(){ brython(); } </script> </body> </html> ''' return outstring @cherrypy.expose # W 為正方體的邊長 def cube(self, W=10): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> </head> <body> <!-- 使用者輸入表單的參數交由 cubeaction 方法處理 --> <form method=POST action=cubeaction> 正方體邊長:<input type=text name=W value='''+str(W)+'''><br /> <input type=submit value=送出> </form> <br /><a href="index">index</a><br /> </body> </html> ''' return outstring @cherrypy.expose # W 為正方體邊長, 內定值為 10 def cubeaction(self, W=10): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 先載入 pfcUtils.js 與 wl_header.js --> <script type="text/javascript" src="/static/weblink/pfcUtils.js"></script> <script type="text/javascript" src="/static/weblink/wl_header.js"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.3-20150514-095342/brython.js"></script> document.writeln ("Error loading Pro/Web.Link header!"); </script> <script> window.onload=function(){ brython(); } </script> </head> <!-- 不要使用 body 啟動 brython() 改為 window level 啟動 --> <body onload=""> <h1>Creo 參數化零件</h1> <a href="index">index</a><br /> <!-- 以下為 Creo Pro/Web.Link 程式, 將 JavaScrip 改為 Brython 程式 --> <script type="text/python"> from browser import document, window from math import * # 這個區域為 Brython 程式範圍, 註解必須採用 Python 格式 # 因為 pfcIsWindows() 為原生的 JavaScript 函式, 在 Brython 中引用必須透過 window 物件 if (!window.pfcIsWindows()) window.netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect"); # 若第三輸入為 false, 表示僅載入 session, 但是不顯示 # ret 為 model open return ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false) if (!ret.Status): window.alert("pwlMdlOpen failed (" + ret.ErrorCode + ")") # 將 ProE 執行階段設為變數 session session = window.pfcGetProESession() # 在視窗中打開零件檔案, 並且顯示出來 pro_window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt")) solid = session.GetModel("cube.prt", window.pfcCreate("pfcModelType").MDL_PART) # 在 Brython 中與 Python 語法相同, 只有初值設定問題, 無需宣告變數 # length, width, myf, myn, i, j, volume, count, d1Value, d2Value # 將模型檔中的 length 變數設為 javascript 中的 length 變數 length = solid.GetParam("a1") # 將模型檔中的 width 變數設為 javascript 中的 width 變數 width = solid.GetParam("a2") # 改變零件尺寸 # myf=20 # myn=20 volume = 0 count = 0 try: # 以下採用 URL 輸入對應變數 # createParametersFromArguments (); # 以下則直接利用 javascript 程式改變零件參數 for i in range(5): myf ='''+str(W)+''' myn ='''+str(W)+''' + i*2.0 # 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值 d1Value = window.pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf) d2Value = window.pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn) # 將處理好的變數值, 指定給對應的零件變數 length.Value = d1Value width.Value = d2Value # 零件尺寸重新設定後, 呼叫 Regenerate 更新模型 # 在 JavaScript 為 null 在 Brython 為 None solid.Regenerate(None) # 利用 GetMassProperty 取得模型的質量相關物件 properties = solid.GetMassProperty(None) # volume = volume + properties.Volume volume = properties.Volume count = count + 1 window.alert("執行第"+count+"次,零件總體積:"+volume) # 將零件存為新檔案 newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt") if (!newfile.Status): window.alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")") # window.alert("共執行:"+count+"次,零件總體積:"+volume) # window.alert("零件體積:"+properties.Volume) # window.alert("零件體積取整數:"+Math.round(properties.Volume)); except: window.alert ("Exception occurred: "+window.pfcGetExceptionType (err)) </script> ''' return outstring @cherrypy.expose def fileuploadform(self): return '''<h1>file upload</h1> <script src="/static/jquery.js" type="text/javascript"></script> <script src="/static/axuploader.js" type="text/javascript"></script> <script> $(document).ready(function(){ $('.prova').axuploader({url:'fileaxupload', allowExt:['jpg','png','gif','7z','pdf','zip','flv','stl','swf'], finish:function(x,files) { alert('All files have been uploaded: '+files); }, enable:true, remotePath:function(){ return 'downloads/'; } }); }); </script> <div class="prova"></div> <input type="button" onclick="$('.prova').axuploader('disable')" value="asd" /> <input type="button" onclick="$('.prova').axuploader('enable')" value="ok" /> </section></body></html> ''' @cherrypy.expose def fileaxupload(self, *args, **kwargs): filename = kwargs["ax-file-name"] flag = kwargs["start"] if flag == "0": file = open(download_root_dir+"downloads/"+filename, "wb") else: file = open(download_root_dir+"downloads/"+filename, "ab") file.write(cherrypy.request.body.read()) header= cherrypy.request.body.read(80) file.close() return "files uploaded!"+header.decode("UTF-8") @cherrypy.expose def download_list(self, item_per_page=5, page=1, keyword=None, *args, **kwargs): files = os.listdir(download_root_dir+"downloads/") total_rows = len(files) totalpage = math.ceil(total_rows/int(item_per_page)) starti = int(item_per_page) * (int(page) - 1) + 1 endi = starti + int(item_per_page) - 1 outstring = "<form method='post' action='delete_file'>" notlast = False if total_rows > 0: outstring += "<br />" if (int(page) * int(item_per_page)) < total_rows: notlast = True if int(page) > 1: outstring += "<a href='" outstring += "download_list?&amp;page=1&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'><<</a> " page_num = int(page) - 1 outstring += "<a href='" outstring += "download_list?&amp;page="+str(page_num)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>Previous</a> " span = 10 for index in range(int(page)-span, int(page)+span): if index>= 0 and index< totalpage: page_now = index + 1 if page_now == int(page): outstring += "<font size='+1' color='red'>"+str(page)+" </font>" else: outstring += "<a href='" outstring += "download_list?&amp;page="+str(page_now)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>"+str(page_now)+"</a> " if notlast == True: nextpage = int(page) + 1 outstring += " <a href='" outstring += "download_list?&amp;page="+str(nextpage)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>Next</a>" outstring += " <a href='" outstring += "download_list?&amp;page="+str(totalpage)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>>></a><br /><br />" if (int(page) * int(item_per_page)) < total_rows: notlast = True outstring += downloadlist_access_list(files, starti, endi)+"<br />" else: outstring += "<br /><br />" outstring += downloadlist_access_list(files, starti, total_rows)+"<br />" if int(page) > 1: outstring += "<a href='" outstring += "download_list?&amp;page=1&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'><<</a> " page_num = int(page) - 1 outstring += "<a href='" outstring += "download_list?&amp;page="+str(page_num)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>Previous</a> " span = 10 for index in range(int(page)-span, int(page)+span): #for ($j=$page-$range;$j<$page+$range;$j++) if index >=0 and index < totalpage: page_now = index + 1 if page_now == int(page): outstring += "<font size='+1' color='red'>"+str(page)+" </font>" else: outstring += "<a href='" outstring += "download_list?&amp;page="+str(page_now)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>"+str(page_now)+"</a> " if notlast == True: nextpage = int(page) + 1 outstring += " <a href='" outstring += "download_list?&amp;page="+str(nextpage)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>Next</a>" outstring += " <a href='" outstring += "download_list?&amp;page="+str(totalpage)+"&amp;item_per_page="+str(item_per_page)+"&amp;keyword="+str(cherrypy.session.get('download_keyword')) outstring += "'>>></a>" else: outstring += "no data!" outstring += "<br /><br /><input type='submit' value='delete'><input type='reset' value='reset'></form>" return "<div class='container'><nav>"+ \ "</nav><section><h1>Download List</h1>"+outstring+"<br/><br /></body></html>" class Download: @cherrypy.expose def index(self, filepath): return serve_file(filepath, "application/x-download", "attachment") ################# (4) 程式啟動區 # 配合程式檔案所在目錄設定靜態目錄或靜態檔案 application_conf = {'/static':{ 'tools.staticdir.on': True, # 程式執行目錄下, 必須自行建立 static 目錄 'tools.staticdir.dir': _curdir+"/static"}, '/downloads':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/downloads"}, '/images':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/images"} } root = Midterm() root.download = Download() root.man = man.MAN() root.man2 = man2.MAN() #root.gear = gear.Gear() if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示在 OpenSfhit 執行 application = cherrypy.Application(root, config=application_conf) else: # 表示在近端執行 cherrypy.quickstart(root, config=application_conf)
agpl-3.0
RoboAvatar65/ChessRobot
algorithm.py
1
5095
############################################################ ###algorithm.py Chess Algorithm Engine ### ###Written by Nicholas Maselli ### ### ### ###Purpose: The Algorithm class creates a minimax tree ### ###that utilizies alpha beta pruning and iterative ### ###deepening to search through the tree quickly. Piece ### ###square tables are used to obtain good value functions ### ###for chess board evaluation. ### ### ### ###Version: 1.0 ### ###Date: 6-30-17 ### ############################################################ from chess import Chess import random import collections import time ############################### #####MinimaxGameTree Class##### ############################### class MinimaxGameTree(): def __init__(self, chess, color, depth): self.chess = chess self.player = color self.depth = depth #Time self.fulltime = 0 #Continuously Iterate search depth to obtain better move ordering def iterativeDeepening(self): alpha = -40000 beta = 40000 pv = [] for depth in range(1, self.depth+1): data = self.dfsMax(alpha, beta, depth, pv) pv = data[1] best_value = data[0] move_list = data[1] best_move = move_list[self.depth-1] return(best_move) #Minimax algorithm with alpha-beta pruning, max function def dfsMax(self, alpha, beta, depth, pv): if (depth == 0): value = self.evaluate_board(self.player) return((value, [])) #Start with the principal value move move_list = [] best_move = None if (pv != []): move = pv.pop() self.next_position(move) data = self.dfsMin(alpha, beta, depth-1, pv) self.previous_position() value = data[0] if (value >= beta): move_list = data[1] move_list.append(best_move) return((beta, move_list)) if (value > alpha): alpha = value best_move = move move_list = data[1] for move in self.chess.legal_moves(): self.next_position(move) data = self.dfsMin(alpha, beta, depth-1, pv) self.previous_position() value = data[0] if (value >= beta): move_list = data[1] move_list.append(best_move) return((beta, move_list)) if (value > alpha): alpha = value best_move = move move_list = data[1] #If you are in checkmate if (best_move == None): alpha = -20000 move_list.append(best_move) return((alpha, move_list)) #Minimax algorithm with alpha-beta pruning, min function def dfsMin(self, alpha, beta, depth, pv): if (depth == 0): value = self.evaluate_board(self.player) return((value, [])) #Start with the principal value move move_list = [] best_move = None if (pv != []): move = pv.pop() self.next_position(move) data = self.dfsMax(alpha, beta, depth-1, pv) self.previous_position() value = data[0] if (value <= alpha): move_list = data[1] move_list.append(best_move) return((alpha, move_list)) if (value < beta): beta = value best_move = move move_list = data[1] for move in self.chess.legal_moves(): self.next_position(move) data = self.dfsMax(alpha, beta, depth-1, pv) self.previous_position() value = data[0] if (value <= alpha): move_list = data[1] move_list.append(best_move) return((alpha, move_list)) if (value < beta): beta = value best_move = move move_list = data[1] #If opponent is in checkmate if (best_move == None): beta = 20000 move_list.append(best_move) return((beta, move_list)) #Evaluate the current board and state from color's perspective def evaluate_board(self, color): if (color == 'white'): value = self.chess.state.value if (color == 'black'): value = -self.chess.state.value return(value) #Move to the next position in the chess board def next_position(self, move): self.chess.move_piece(move) #Move to previous position in the chessboard def previous_position(self): self.chess.undo_move() ######################### #####Algorithm Class##### ######################### class Algorithm(): #Initialize values def __init__(self, chess, player, depth): self.chess = chess self.player = player self.depth = depth self.fulltime = 0 #Choose next move using algorithm def best_move(self): self.tree = MinimaxGameTree(self.chess, self.player, self.depth) #Comments here for timing purposes #start_time = time.time() move = self.tree.iterativeDeepening() #end_time = time.time() #print("Searching the tree: {}".format(end_time - start_time)) notation = self.chess.coordinate_to_notation(move) return(notation)
mit
tpazderka/pysaml2
src/saml2/server.py
1
27849
#!/usr/bin/env python # -*- coding: utf-8 -*- # """Contains classes and functions that a SAML2.0 Identity provider (IdP) or attribute authority (AA) may use to conclude its tasks. """ import logging import os import importlib import shelve import threading from saml2.eptid import EptidShelve, Eptid from saml2.saml import EncryptedAssertion from saml2.sdb import SessionStorage from saml2.schema import soapenv from saml2.samlp import NameIDMappingResponse from saml2.entity import Entity from saml2 import saml, element_to_extension_element from saml2 import class_name from saml2 import BINDING_HTTP_REDIRECT from saml2.request import AuthnRequest from saml2.request import AssertionIDRequest from saml2.request import AttributeQuery from saml2.request import NameIDMappingRequest from saml2.request import AuthzDecisionQuery from saml2.request import AuthnQuery from saml2.s_utils import MissingValue, Unknown, rndstr from saml2.sigver import pre_signature_part, signed_instance_factory, CertificateError, CryptoBackendXmlSec1 from saml2.assertion import Assertion from saml2.assertion import Policy from saml2.assertion import restriction_from_attribute_spec from saml2.assertion import filter_attribute_value_assertions from saml2.ident import IdentDB from saml2.profile import ecp logger = logging.getLogger(__name__) AUTHN_DICT_MAP = { "decl": "authn_decl", "authn_auth": "authn_auth", "class_ref": "authn_class", "authn_instant": "authn_instant", "subject_locality": "subject_locality" } class Server(Entity): """ A class that does things that IdPs or AAs do """ def __init__(self, config_file="", config=None, cache=None, stype="idp", symkey=""): Entity.__init__(self, stype, config, config_file) self.eptid = None self.init_config(stype) self.cache = cache self.ticket = {} # self.session_db = self.choose_session_storage() # Needed for self.symkey = symkey self.seed = rndstr() self.iv = os.urandom(16) self.lock = threading.Lock() def getvalid_certificate_str(self): if self.sec.cert_handler is not None: return self.sec.cert_handler._last_validated_cert return None def support_AssertionIDRequest(self): return True def support_AuthnQuery(self): return True def choose_session_storage(self): _spec = self.config.getattr("session_storage", "idp") if not _spec: return SessionStorage() elif isinstance(_spec, basestring): if _spec.lower() == "memory": return SessionStorage() else: # Should be tuple typ, data = _spec if typ.lower() == "mongodb": from saml2.mongo_store import SessionStorageMDB return SessionStorageMDB(database=data, collection="session") raise NotImplementedError("No such storage type implemented") def init_config(self, stype="idp"): """ Remaining init of the server configuration :param stype: The type of Server ("idp"/"aa") """ if stype == "aa": return # subject information is stored in a database # default database is in memory which is OK in some setups dbspec = self.config.getattr("subject_data", "idp") idb = None typ = "" if not dbspec: idb = {} elif isinstance(dbspec, basestring): idb = shelve.open(dbspec, writeback=True) else: # database spec is a a 2-tuple (type, address) #print >> sys.stderr, "DBSPEC: %s" % (dbspec,) (typ, addr) = dbspec if typ == "shelve": idb = shelve.open(addr, writeback=True) elif typ == "memcached": import memcache idb = memcache.Client(addr) elif typ == "dict": # in-memory dictionary idb = {} elif typ == "mongodb": from saml2.mongo_store import IdentMDB self.ident = IdentMDB(database=addr, collection="ident") elif typ == "identdb": mod, clas = addr.rsplit('.', 1) mod = importlib.import_module(mod) self.ident = getattr(mod, clas)() if typ == "mongodb" or typ == "identdb": pass elif idb is not None: self.ident = IdentDB(idb) elif dbspec: raise Exception("Couldn't open identity database: %s" % (dbspec,)) _domain = self.config.getattr("domain", "idp") if _domain: self.ident.domain = _domain self.ident.name_qualifier = self.config.entityid dbspec = self.config.getattr("edu_person_targeted_id", "idp") if not dbspec: pass else: typ = dbspec[0] addr = dbspec[1] secret = dbspec[2] if typ == "shelve": self.eptid = EptidShelve(secret, addr) elif typ == "mongodb": from saml2.mongo_store import EptidMDB self.eptid = EptidMDB(secret, database=addr, collection="eptid") else: self.eptid = Eptid(secret) def wants(self, sp_entity_id, index=None): """ Returns what attributes the SP requires and which are optional if any such demands are registered in the Metadata. :param sp_entity_id: The entity id of the SP :param index: which of the attribute consumer services its all about if index == None then all attribute consumer services are clumped together. :return: 2-tuple, list of required and list of optional attributes """ return self.metadata.attribute_requirement(sp_entity_id, index) def verify_assertion_consumer_service(self, request): _acs = request.assertion_consumer_service_url _aci = request.assertion_consumer_service_index _binding = request.protocol_binding _eid = request.issuer.text if _acs: # look up acs in for that binding in the metadata given the issuer # Assuming the format is entity for acs in self.metadata.assertion_consumer_service(_eid, _binding): if _acs == acs.text: return True elif _aci: for acs in self.metadata.assertion_consumer_service(_eid, _binding): if _aci == acs.index: return True return False # ------------------------------------------------------------------------- def parse_authn_request(self, enc_request, binding=BINDING_HTTP_REDIRECT): """Parse a Authentication Request :param enc_request: The request in its transport format :param binding: Which binding that was used to transport the message to this entity. :return: A dictionary with keys: consumer_url - as gotten from the SPs entity_id and the metadata id - the id of the request sp_entity_id - the entity id of the SP request - The verified request """ return self._parse_request(enc_request, AuthnRequest, "single_sign_on_service", binding) def parse_attribute_query(self, xml_string, binding): """ Parse an attribute query :param xml_string: The Attribute Query as an XML string :param binding: Which binding that was used for the request :return: A query instance """ return self._parse_request(xml_string, AttributeQuery, "attribute_service", binding) def parse_authz_decision_query(self, xml_string, binding): """ Parse an authorization decision query :param xml_string: The Authz decision Query as an XML string :param binding: Which binding that was used when receiving this query :return: Query instance """ return self._parse_request(xml_string, AuthzDecisionQuery, "authz_service", binding) def parse_assertion_id_request(self, xml_string, binding): """ Parse an assertion id query :param xml_string: The AssertionIDRequest as an XML string :param binding: Which binding that was used when receiving this request :return: Query instance """ return self._parse_request(xml_string, AssertionIDRequest, "assertion_id_request_service", binding) def parse_authn_query(self, xml_string, binding): """ Parse an authn query :param xml_string: The AuthnQuery as an XML string :param binding: Which binding that was used when receiving this query :return: Query instance """ return self._parse_request(xml_string, AuthnQuery, "authn_query_service", binding) def parse_name_id_mapping_request(self, xml_string, binding): """ Parse a nameid mapping request :param xml_string: The NameIDMappingRequest as an XML string :param binding: Which binding that was used when receiving this request :return: Query instance """ return self._parse_request(xml_string, NameIDMappingRequest, "name_id_mapping_service", binding) # ------------------------------------------------------------------------ # ------------------------------------------------------------------------ def _authn_response(self, in_response_to, consumer_url, sp_entity_id, identity=None, name_id=None, status=None, authn=None, issuer=None, policy=None, sign_assertion=False, sign_response=False, best_effort=False, encrypt_assertion=False, encrypt_cert=None): """ Create a response. A layer of indirection. :param in_response_to: The session identifier of the request :param consumer_url: The URL which should receive the response :param sp_entity_id: The entity identifier of the SP :param identity: A dictionary with attributes and values that are expected to be the bases for the assertion in the response. :param name_id: The identifier of the subject :param status: The status of the response :param authn: A dictionary containing information about the authn context. :param issuer: The issuer of the response :param sign_assertion: Whether the assertion should be signed or not :param sign_response: Whether the response should be signed or not :param best_effort: Even if not the SPs demands can be met send a response. :return: A response instance """ to_sign = [] args = {} #if identity: _issuer = self._issuer(issuer) ast = Assertion(identity) ast.acs = self.config.getattr("attribute_converters", "idp") if policy is None: policy = Policy() try: ast.apply_policy(sp_entity_id, policy, self.metadata) except MissingValue, exc: if not best_effort: return self.create_error_response(in_response_to, consumer_url, exc, sign_response) if authn: # expected to be a dictionary # Would like to use dict comprehension but ... authn_args = dict([ (AUTHN_DICT_MAP[k], v) for k, v in authn.items() if k in AUTHN_DICT_MAP]) assertion = ast.construct(sp_entity_id, in_response_to, consumer_url, name_id, self.config.attribute_converters, policy, issuer=_issuer, **authn_args) else: assertion = ast.construct(sp_entity_id, in_response_to, consumer_url, name_id, self.config.attribute_converters, policy, issuer=_issuer) if sign_assertion is not None and sign_assertion: assertion.signature = pre_signature_part(assertion.id, self.sec.my_cert, 1) # Just the assertion or the response and the assertion ? to_sign = [(class_name(assertion), assertion.id)] # Store which assertion that has been sent to which SP about which # subject. # self.cache.set(assertion.subject.name_id.text, # sp_entity_id, {"ava": identity, "authn": authn}, # assertion.conditions.not_on_or_after) args["assertion"] = assertion if self.support_AssertionIDRequest() or self.support_AuthnQuery(): self.session_db.store_assertion(assertion, to_sign) return self._response(in_response_to, consumer_url, status, issuer, sign_response, to_sign, encrypt_assertion=encrypt_assertion, encrypt_cert=encrypt_cert, **args) # ------------------------------------------------------------------------ #noinspection PyUnusedLocal def create_attribute_response(self, identity, in_response_to, destination, sp_entity_id, userid="", name_id=None, status=None, issuer=None, sign_assertion=False, sign_response=False, attributes=None, **kwargs): """ Create an attribute assertion response. :param identity: A dictionary with attributes and values that are expected to be the bases for the assertion in the response. :param in_response_to: The session identifier of the request :param destination: The URL which should receive the response :param sp_entity_id: The entity identifier of the SP :param userid: A identifier of the user :param name_id: The identifier of the subject :param status: The status of the response :param issuer: The issuer of the response :param sign_assertion: Whether the assertion should be signed or not :param sign_response: Whether the whole response should be signed :param attributes: :param kwargs: To catch extra keyword arguments :return: A response instance """ policy = self.config.getattr("policy", "aa") if not name_id and userid: try: name_id = self.ident.construct_nameid(userid, policy, sp_entity_id) logger.warning("Unspecified NameID format") except Exception: pass to_sign = [] args = {} if identity: _issuer = self._issuer(issuer) ast = Assertion(identity) if policy: ast.apply_policy(sp_entity_id, policy, self.metadata) else: policy = Policy() if attributes: restr = restriction_from_attribute_spec(attributes) ast = filter_attribute_value_assertions(ast) assertion = ast.construct(sp_entity_id, in_response_to, destination, name_id, self.config.attribute_converters, policy, issuer=_issuer) if sign_assertion: assertion.signature = pre_signature_part(assertion.id, self.sec.my_cert, 1) # Just the assertion or the response and the assertion ? to_sign = [(class_name(assertion), assertion.id)] args["assertion"] = assertion return self._response(in_response_to, destination, status, issuer, sign_response, to_sign, **args) # ------------------------------------------------------------------------ def create_authn_response(self, identity, in_response_to, destination, sp_entity_id, name_id_policy=None, userid=None, name_id=None, authn=None, issuer=None, sign_response=None, sign_assertion=None, encrypt_cert=None, encrypt_assertion=None, **kwargs): """ Constructs an AuthenticationResponse :param identity: Information about an user :param in_response_to: The identifier of the authentication request this response is an answer to. :param destination: Where the response should be sent :param sp_entity_id: The entity identifier of the Service Provider :param name_id_policy: How the NameID should be constructed :param userid: The subject identifier :param authn: Dictionary with information about the authentication context :param issuer: Issuer of the response :param sign_assertion: Whether the assertion should be signed or not. :param sign_response: Whether the response should be signed or not. :return: A response instance """ try: policy = kwargs["release_policy"] except KeyError: policy = self.config.getattr("policy", "idp") try: best_effort = kwargs["best_effort"] except KeyError: best_effort = False if sign_assertion is None: sign_assertion = self.config.getattr("sign_assertion", "idp") if sign_assertion is None: sign_assertion = False if sign_response is None: sign_response = self.config.getattr("sign_response", "idp") if sign_response is None: sign_response = False if encrypt_assertion is None: encrypt_assertion = self.config.getattr("encrypt_assertion", "idp") if encrypt_assertion is None: encrypt_assertion = False if encrypt_assertion: if encrypt_cert is not None: verify_encrypt_cert = self.config.getattr("verify_encrypt_cert", "idp") if verify_encrypt_cert is not None: if not verify_encrypt_cert(encrypt_cert): raise CertificateError("Invalid certificate for encryption!") else: raise CertificateError("No SPCertEncType certificate for encryption contained in authentication " "request.") else: encrypt_assertion = False if not name_id: try: nid_formats = [] for _sp in self.metadata[sp_entity_id]["spsso_descriptor"]: if "name_id_format" in _sp: nid_formats.extend([n["text"] for n in _sp["name_id_format"]]) try: snq = name_id_policy.sp_name_qualifier except AttributeError: snq = sp_entity_id if not snq: snq = sp_entity_id kwa = {"sp_name_qualifier": snq} try: kwa["format"] = name_id_policy.format except AttributeError: pass _nids = self.ident.find_nameid(userid, **kwa) # either none or one if _nids: name_id = _nids[0] else: name_id = self.ident.construct_nameid(userid, policy, sp_entity_id, name_id_policy) except IOError, exc: response = self.create_error_response(in_response_to, destination, sp_entity_id, exc, name_id) return ("%s" % response).split("\n") try: _authn = authn if (sign_assertion or sign_response) and self.sec.cert_handler.generate_cert(): with self.lock: self.sec.cert_handler.update_cert(True) return self._authn_response(in_response_to, # in_response_to destination, # consumer_url sp_entity_id, # sp_entity_id identity, # identity as dictionary name_id, authn=_authn, issuer=issuer, policy=policy, sign_assertion=sign_assertion, sign_response=sign_response, best_effort=best_effort, encrypt_assertion=encrypt_assertion, encrypt_cert=encrypt_cert) return self._authn_response(in_response_to, # in_response_to destination, # consumer_url sp_entity_id, # sp_entity_id identity, # identity as dictionary name_id, authn=_authn, issuer=issuer, policy=policy, sign_assertion=sign_assertion, sign_response=sign_response, best_effort=best_effort, encrypt_assertion=encrypt_assertion, encrypt_cert=encrypt_cert) except MissingValue, exc: return self.create_error_response(in_response_to, destination, sp_entity_id, exc, name_id) def create_authn_request_response(self, identity, in_response_to, destination, sp_entity_id, name_id_policy=None, userid=None, name_id=None, authn=None, authn_decl=None, issuer=None, sign_response=False, sign_assertion=False, **kwargs): return self.create_authn_response(identity, in_response_to, destination, sp_entity_id, name_id_policy, userid, name_id, authn, issuer, sign_response, sign_assertion, authn_decl=authn_decl) #noinspection PyUnusedLocal def create_assertion_id_request_response(self, assertion_id, sign=False, **kwargs): """ :param assertion_id: :param sign: :return: """ try: (assertion, to_sign) = self.session_db.get_assertion(assertion_id) except KeyError: raise Unknown if to_sign: if assertion.signature is None: assertion.signature = pre_signature_part(assertion.id, self.sec.my_cert, 1) return signed_instance_factory(assertion, self.sec, to_sign) else: return assertion #noinspection PyUnusedLocal def create_name_id_mapping_response(self, name_id=None, encrypted_id=None, in_response_to=None, issuer=None, sign_response=False, status=None, **kwargs): """ protocol for mapping a principal's name identifier into a different name identifier for the same principal. Done over soap. :param name_id: :param encrypted_id: :param in_response_to: :param issuer: :param sign_response: :param status: :return: """ # Done over SOAP ms_args = self.message_args() _resp = NameIDMappingResponse(name_id, encrypted_id, in_response_to=in_response_to, **ms_args) if sign_response: return self.sign(_resp) else: logger.info("Message: %s" % _resp) return _resp def create_authn_query_response(self, subject, session_index=None, requested_context=None, in_response_to=None, issuer=None, sign_response=False, status=None, **kwargs): """ A successful <Response> will contain one or more assertions containing authentication statements. :return: """ margs = self.message_args() asserts = [] for statement in self.session_db.get_authn_statements( subject.name_id, session_index, requested_context): asserts.append(saml.Assertion(authn_statement=statement, subject=subject, **margs)) if asserts: args = {"assertion": asserts} else: args = {} return self._response(in_response_to, "", status, issuer, sign_response, to_sign=[], **args) # --------- def parse_ecp_authn_request(self): pass def create_ecp_authn_request_response(self, acs_url, identity, in_response_to, destination, sp_entity_id, name_id_policy=None, userid=None, name_id=None, authn=None, issuer=None, sign_response=False, sign_assertion=False, **kwargs): # ---------------------------------------- # <ecp:Response # ---------------------------------------- ecp_response = ecp.Response(assertion_consumer_service_url=acs_url) header = soapenv.Header() header.extension_elements = [element_to_extension_element(ecp_response)] # ---------------------------------------- # <samlp:Response # ---------------------------------------- response = self.create_authn_response(identity, in_response_to, destination, sp_entity_id, name_id_policy, userid, name_id, authn, issuer, sign_response, sign_assertion) body = soapenv.Body() body.extension_elements = [element_to_extension_element(response)] soap_envelope = soapenv.Envelope(header=header, body=body) return "%s" % soap_envelope
bsd-2-clause
okwasi/googlemock
scripts/generator/cpp/gmock_class.py
82
7454
#!/usr/bin/env python # # Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generate Google Mock classes from base classes. This program will read in a C++ source file and output the Google Mock classes for the specified classes. If no class is specified, all classes in the source file are emitted. Usage: gmock_class.py header-file.h [ClassName]... Output is sent to stdout. """ __author__ = '[email protected] (Neal Norwitz)' import os import re import sys from cpp import ast from cpp import utils # Preserve compatibility with Python 2.3. try: _dummy = set except NameError: import sets set = sets.Set _VERSION = (1, 0, 1) # The version of this script. # How many spaces to indent. Can set me with the INDENT environment variable. _INDENT = 2 def _GenerateMethods(output_lines, source, class_node): function_type = ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR indent = ' ' * _INDENT for node in class_node.body: # We only care about virtual functions. if (isinstance(node, ast.Function) and node.modifiers & function_type and not node.modifiers & ctor_or_dtor): # Pick out all the elements we need from the original function. const = '' if node.modifiers & ast.FUNCTION_CONST: const = 'CONST_' return_type = 'void' if node.return_type: # Add modifiers like 'const'. modifiers = '' if node.return_type.modifiers: modifiers = ' '.join(node.return_type.modifiers) + ' ' return_type = modifiers + node.return_type.name template_args = [arg.name for arg in node.return_type.templated_types] if template_args: return_type += '<' + ', '.join(template_args) + '>' if len(template_args) > 1: for line in [ '// The following line won\'t really compile, as the return', '// type has multiple template arguments. To fix it, use a', '// typedef for the return type.']: output_lines.append(indent + line) if node.return_type.pointer: return_type += '*' if node.return_type.reference: return_type += '&' num_parameters = len(node.parameters) if len(node.parameters) == 1: first_param = node.parameters[0] if source[first_param.start:first_param.end].strip() == 'void': # We must treat T(void) as a function with no parameters. num_parameters = 0 mock_method_macro = 'MOCK_%sMETHOD%d' % (const, num_parameters) args = '' if node.parameters: # Due to the parser limitations, it is impossible to keep comments # while stripping the default parameters. When defaults are # present, we choose to strip them and comments (and produce # compilable code). # TODO([email protected]): Investigate whether it is possible to # preserve parameter name when reconstructing parameter text from # the AST. if len([param for param in node.parameters if param.default]) > 0: args = ', '.join(param.type.name for param in node.parameters) else: # Get the full text of the parameters from the start # of the first parameter to the end of the last parameter. start = node.parameters[0].start end = node.parameters[-1].end # Remove // comments. args_strings = re.sub(r'//.*', '', source[start:end]) # Condense multiple spaces and eliminate newlines putting the # parameters together on a single line. Ensure there is a # space in an argument which is split by a newline without # intervening whitespace, e.g.: int\nBar args = re.sub(' +', ' ', args_strings.replace('\n', ' ')) # Create the mock method definition. output_lines.extend(['%s%s(%s,' % (indent, mock_method_macro, node.name), '%s%s(%s));' % (indent*3, return_type, args)]) def _GenerateMocks(filename, source, ast_list, desired_class_names): processed_class_names = set() lines = [] for node in ast_list: if (isinstance(node, ast.Class) and node.body and # desired_class_names being None means that all classes are selected. (not desired_class_names or node.name in desired_class_names)): class_name = node.name processed_class_names.add(class_name) class_node = node # Add namespace before the class. if class_node.namespace: lines.extend(['namespace %s {' % n for n in class_node.namespace]) # } lines.append('') # Add the class prolog. lines.append('class Mock%s : public %s {' % (class_name, class_name)) # } lines.append('%spublic:' % (' ' * (_INDENT // 2))) # Add all the methods. _GenerateMethods(lines, source, class_node) # Close the class. if lines: # If there are no virtual methods, no need for a public label. if len(lines) == 2: del lines[-1] # Only close the class if there really is a class. lines.append('};') lines.append('') # Add an extra newline. # Close the namespace. if class_node.namespace: for i in range(len(class_node.namespace)-1, -1, -1): lines.append('} // namespace %s' % class_node.namespace[i]) lines.append('') # Add an extra newline. if desired_class_names: missing_class_name_list = list(desired_class_names - processed_class_names) if missing_class_name_list: missing_class_name_list.sort() sys.stderr.write('Class(es) not found in %s: %s\n' % (filename, ', '.join(missing_class_name_list))) elif not processed_class_names: sys.stderr.write('No class found in %s\n' % filename) return lines def main(argv=sys.argv): if len(argv) < 2: sys.stderr.write('Google Mock Class Generator v%s\n\n' % '.'.join(map(str, _VERSION))) sys.stderr.write(__doc__) return 1 global _INDENT try: _INDENT = int(os.environ['INDENT']) except KeyError: pass except: sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT')) filename = argv[1] desired_class_names = None # None means all classes in the source file. if len(argv) >= 3: desired_class_names = set(argv[2:]) source = utils.ReadFile(filename) if source is None: return 1 builder = ast.BuilderFromSource(source, filename) try: entire_ast = filter(None, builder.Generate()) except KeyboardInterrupt: return except: # An error message was already printed since we couldn't parse. pass else: lines = _GenerateMocks(filename, source, entire_ast, desired_class_names) sys.stdout.write('\n'.join(lines)) if __name__ == '__main__': main(sys.argv)
bsd-3-clause
zeha/multiapt
extlib/paramiko-1.7.3/paramiko/common.py
1
4059
# Copyright (C) 2003-2007 Robey Pointer <[email protected]> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ Common constants and global variables. """ MSG_DISCONNECT, MSG_IGNORE, MSG_UNIMPLEMENTED, MSG_DEBUG, MSG_SERVICE_REQUEST, \ MSG_SERVICE_ACCEPT = range(1, 7) MSG_KEXINIT, MSG_NEWKEYS = range(20, 22) MSG_USERAUTH_REQUEST, MSG_USERAUTH_FAILURE, MSG_USERAUTH_SUCCESS, \ MSG_USERAUTH_BANNER = range(50, 54) MSG_USERAUTH_PK_OK = 60 MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE = range(60, 62) MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE = range(80, 83) MSG_CHANNEL_OPEN, MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, \ MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_DATA, MSG_CHANNEL_EXTENDED_DATA, \ MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE, MSG_CHANNEL_REQUEST, \ MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE = range(90, 101) # for debugging: MSG_NAMES = { MSG_DISCONNECT: 'disconnect', MSG_IGNORE: 'ignore', MSG_UNIMPLEMENTED: 'unimplemented', MSG_DEBUG: 'debug', MSG_SERVICE_REQUEST: 'service-request', MSG_SERVICE_ACCEPT: 'service-accept', MSG_KEXINIT: 'kexinit', MSG_NEWKEYS: 'newkeys', 30: 'kex30', 31: 'kex31', 32: 'kex32', 33: 'kex33', 34: 'kex34', MSG_USERAUTH_REQUEST: 'userauth-request', MSG_USERAUTH_FAILURE: 'userauth-failure', MSG_USERAUTH_SUCCESS: 'userauth-success', MSG_USERAUTH_BANNER: 'userauth--banner', MSG_USERAUTH_PK_OK: 'userauth-60(pk-ok/info-request)', MSG_USERAUTH_INFO_RESPONSE: 'userauth-info-response', MSG_GLOBAL_REQUEST: 'global-request', MSG_REQUEST_SUCCESS: 'request-success', MSG_REQUEST_FAILURE: 'request-failure', MSG_CHANNEL_OPEN: 'channel-open', MSG_CHANNEL_OPEN_SUCCESS: 'channel-open-success', MSG_CHANNEL_OPEN_FAILURE: 'channel-open-failure', MSG_CHANNEL_WINDOW_ADJUST: 'channel-window-adjust', MSG_CHANNEL_DATA: 'channel-data', MSG_CHANNEL_EXTENDED_DATA: 'channel-extended-data', MSG_CHANNEL_EOF: 'channel-eof', MSG_CHANNEL_CLOSE: 'channel-close', MSG_CHANNEL_REQUEST: 'channel-request', MSG_CHANNEL_SUCCESS: 'channel-success', MSG_CHANNEL_FAILURE: 'channel-failure' } # authentication request return codes: AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED = range(3) # channel request failed reasons: (OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED, OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE) = range(0, 5) CONNECTION_FAILED_CODE = { 1: 'Administratively prohibited', 2: 'Connect failed', 3: 'Unknown channel type', 4: 'Resource shortage' } DISCONNECT_SERVICE_NOT_AVAILABLE, DISCONNECT_AUTH_CANCELLED_BY_USER, \ DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE = 7, 13, 14 from osrandom import OSRandomPool # keep a crypto-strong PRNG nearby randpool = OSRandomPool() import sys if sys.version_info < (2, 3): try: import logging except: import logging22 as logging import select PY22 = True import socket if not hasattr(socket, 'timeout'): class timeout(socket.error): pass socket.timeout = timeout del timeout else: import logging PY22 = False DEBUG = logging.DEBUG INFO = logging.INFO WARNING = logging.WARNING ERROR = logging.ERROR CRITICAL = logging.CRITICAL
mit
netzkolchose/django-cms
cms/migrations/0002_auto_20140816_1918.py
45
8472
# -*- coding: utf-8 -*- from __future__ import unicode_literals import cms.models.static_placeholder import cms.models.fields from django.conf import settings from django.contrib.auth import get_user_model from django.db import models, migrations import django.utils.timezone User = get_user_model() user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name) user_ptr_name = '%s_ptr' % User._meta.object_name.lower() class Migration(migrations.Migration): dependencies = [ ('cms', '0001_initial'), ] operations = [ migrations.CreateModel( name='PageUser', fields=[ (user_ptr_name, models.OneToOneField(primary_key=True, to=settings.AUTH_USER_MODEL, auto_created=True, parent_link=True, serialize=False)), ('created_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='created_users')), ], options={ 'verbose_name': 'User (page)', 'verbose_name_plural': 'Users (page)', }, bases=(user_model_label,), ), migrations.CreateModel( name='PageUserGroup', fields=[ ('group_ptr', models.OneToOneField(primary_key=True, to='auth.Group', auto_created=True, parent_link=True, serialize=False)), ('created_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='created_usergroups')), ], options={ 'verbose_name': 'User group (page)', 'verbose_name_plural': 'User groups (page)', }, bases=('auth.group',), ), migrations.CreateModel( name='Placeholder', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('slot', models.CharField(db_index=True, max_length=50, verbose_name='slot', editable=False)), ('default_width', models.PositiveSmallIntegerField(null=True, verbose_name='width', editable=False)), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='page', name='placeholders', field=models.ManyToManyField(to='cms.Placeholder', editable=False), preserve_default=True, ), migrations.AlterUniqueTogether( name='page', unique_together=set([('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft')]), ), migrations.AddField( model_name='cmsplugin', name='placeholder', field=models.ForeignKey(null=True, to='cms.Placeholder', editable=False), preserve_default=True, ), migrations.AddField( model_name='aliaspluginmodel', name='alias_placeholder', field=models.ForeignKey(null=True, to='cms.Placeholder', related_name='alias_placeholder', editable=False), preserve_default=True, ), migrations.CreateModel( name='PlaceholderReference', fields=[ ('cmsplugin_ptr', models.OneToOneField(primary_key=True, to='cms.CMSPlugin', auto_created=True, parent_link=True, serialize=False)), ('name', models.CharField(max_length=255)), ('placeholder_ref', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='clipboard', editable=False)), ], options={ }, bases=('cms.cmsplugin',), ), migrations.CreateModel( name='StaticPlaceholder', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('name', models.CharField(max_length=255, default='', help_text='Descriptive name to identify this static placeholder. Not displayed to users.', blank=True, verbose_name='static placeholder name')), ('code', models.CharField(max_length=255, verbose_name='placeholder code', help_text='To render the static placeholder in templates.', blank=True)), ('dirty', models.BooleanField(default=False, editable=False)), ('creation_method', models.CharField(max_length=20, default='code', blank=True, verbose_name='creation_method', choices=cms.models.static_placeholder.StaticPlaceholder.CREATION_METHODS)), ('draft', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', verbose_name='placeholder content', related_name='static_draft', slotname=cms.models.static_placeholder.static_slotname, editable=False)), ('public', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname=cms.models.static_placeholder.static_slotname, related_name='static_public', editable=False)), ('site', models.ForeignKey(null=True, to='sites.Site', blank=True)), ], options={ 'verbose_name': 'static placeholder', 'verbose_name_plural': 'static placeholders', }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='staticplaceholder', unique_together=set([('code', 'site')]), ), migrations.CreateModel( name='Title', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('language', models.CharField(db_index=True, max_length=15, verbose_name='language')), ('title', models.CharField(max_length=255, verbose_name='title')), ('page_title', models.CharField(max_length=255, null=True, help_text='overwrite the title (html title tag)', blank=True, verbose_name='title')), ('menu_title', models.CharField(max_length=255, null=True, help_text='overwrite the title in the menu', blank=True, verbose_name='title')), ('meta_description', models.TextField(max_length=155, null=True, help_text='The text displayed in search engines.', blank=True, verbose_name='description')), ('slug', models.SlugField(max_length=255, verbose_name='slug')), ('path', models.CharField(db_index=True, max_length=255, verbose_name='Path')), ('has_url_overwrite', models.BooleanField(db_index=True, default=False, editable=False, verbose_name='has url overwrite')), ('redirect', models.CharField(max_length=255, null=True, blank=True, verbose_name='redirect')), ('creation_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='creation date', editable=False)), ('published', models.BooleanField(default=False, verbose_name='is published')), ('publisher_is_draft', models.BooleanField(db_index=True, default=True, editable=False)), ('publisher_state', models.SmallIntegerField(db_index=True, default=0, editable=False)), ('page', models.ForeignKey(to='cms.Page', verbose_name='page', related_name='title_set')), ('publisher_public', models.OneToOneField(null=True, to='cms.Title', related_name='publisher_draft', editable=False)), ], options={ }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='title', unique_together=set([('language', 'page')]), ), migrations.CreateModel( name='UserSettings', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)), ('language', models.CharField(max_length=10, choices=settings.LANGUAGES, help_text='The language for the admin interface and toolbar', verbose_name='Language')), ('clipboard', models.ForeignKey(null=True, to='cms.Placeholder', blank=True, editable=False)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True, related_name='djangocms_usersettings', editable=False)), ], options={ 'verbose_name': 'user setting', 'verbose_name_plural': 'user settings', }, bases=(models.Model,), ), ]
bsd-3-clause
sankhesh/VTK
Interaction/Widgets/Testing/Python/TestBoxWidget.py
26
3843
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' ========================================================================= Program: Visualization Toolkit Module: TestNamedColorsIntegration.py Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. ========================================================================= ''' import vtk import vtk.test.Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() class TestBoxWidget(vtk.test.Testing.vtkTest): def testBoxWidget(self): # Demonstrate how to use the vtkBoxWidget. # This script uses a 3D box widget to define a "clipping box" to clip some # simple geometry (a mace). Make sure that you hit the "W" key to activate the widget. # create a sphere source # sphere = vtk.vtkSphereSource() cone = vtk.vtkConeSource() glyph = vtk.vtkGlyph3D() glyph.SetInputConnection(sphere.GetOutputPort()) glyph.SetSourceConnection(cone.GetOutputPort()) glyph.SetVectorModeToUseNormal() glyph.SetScaleModeToScaleByVector() glyph.SetScaleFactor(0.25) apd = vtk.vtkAppendPolyData() apd.AddInputConnection(glyph.GetOutputPort()) apd.AddInputConnection(sphere.GetOutputPort()) maceMapper = vtk.vtkPolyDataMapper() maceMapper.SetInputConnection(apd.GetOutputPort()) maceActor = vtk.vtkLODActor() maceActor.SetMapper(maceMapper) maceActor.VisibilityOn() planes = vtk.vtkPlanes() clipper = vtk.vtkClipPolyData() clipper.SetInputConnection(apd.GetOutputPort()) clipper.SetClipFunction(planes) clipper.InsideOutOn() selectMapper = vtk.vtkPolyDataMapper() selectMapper.SetInputConnection(clipper.GetOutputPort()) selectActor = vtk.vtkLODActor() selectActor.SetMapper(selectMapper) selectActor.GetProperty().SetColor(0, 1, 0) selectActor.VisibilityOff() selectActor.SetScale(1.01, 1.01, 1.01) # Create the RenderWindow, Renderer and both Actors # ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iRen = vtk.vtkRenderWindowInteractor() iRen.SetRenderWindow(renWin); boxWidget = vtk.vtkBoxWidget() boxWidget.SetInteractor(iRen) ren.AddActor(maceActor) ren.AddActor(selectActor) # Add the actors to the renderer, set the background and size # ren.SetBackground(0.1, 0.2, 0.4) renWin.SetSize(300, 300) def SelectPolygons(widget, event_string): ''' The callback takes two parameters. Parameters: widget - the object that generates the event. event_string - the event name (which is a string). ''' boxWidget, selectActor boxWidget.GetPlanes(planes) selectActor.VisibilityOn() # place the interactor initially boxWidget.SetInputConnection(glyph.GetOutputPort()) boxWidget.PlaceWidget() boxWidget.AddObserver("EndInteractionEvent", SelectPolygons) # render and interact with data renWin.Render() img_file = "TestBoxWidget.png" vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25) vtk.test.Testing.interact() if __name__ == "__main__": vtk.test.Testing.main([(TestBoxWidget, 'test')])
bsd-3-clause
Elettronik/SickRage
lib/requests/packages/urllib3/poolmanager.py
137
16345
from __future__ import absolute_import import collections import functools import logging from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import port_by_scheme from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.url import parse_url from .util.retry import Retry __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', 'ssl_version', 'ca_cert_dir', 'ssl_context') # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( 'key_scheme', # str 'key_host', # str 'key_port', # int 'key_timeout', # int or float or Timeout 'key_retries', # int or Retry 'key_strict', # bool 'key_block', # bool 'key_source_address', # str 'key_key_file', # str 'key_cert_file', # str 'key_cert_reqs', # str 'key_ca_certs', # str 'key_ssl_version', # str 'key_ca_cert_dir', # str 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext 'key_maxsize', # int 'key_headers', # dict 'key__proxy', # parsed proxy url 'key__proxy_headers', # dict 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples 'key__socks_options', # dict 'key_assert_hostname', # bool or string 'key_assert_fingerprint', # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple('PoolKey', _key_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context['scheme'] = context['scheme'].lower() context['host'] = context['host'].lower() # These are both dictionaries and need to be transformed into frozensets for key in ('headers', '_proxy_headers', '_socks_options'): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get('socket_options') if socket_opts is not None: context['socket_options'] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context['key_' + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { 'http': functools.partial(_default_key_normalizer, PoolKey), 'https': functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = { 'http': HTTPConnectionPool, 'https': HTTPSConnectionPool, } class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ('scheme', 'host', 'port'): request_context.pop(key, None) if scheme == 'http': for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context['scheme'] = scheme or 'http' if not port: port = port_by_scheme.get(request_context['scheme'].lower(), 80) request_context['port'] = port request_context['host'] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context['scheme'].lower() pool_key_constructor = self.key_fn_by_scheme[scheme] pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context['scheme'] host = request_context['host'] port = request_context['port'] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if 'headers' not in kw: kw['headers'] = self.headers if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) # RFC 7231, Section 6.4.4 if response.status == 303: method = 'GET' retries = kw.get('retries') if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: if retries.raise_on_redirect: raise return response kw['retries'] = retries kw['redirect'] = redirect log.info("Redirecting %s -> %s", url, redirect_location) return self.urlopen(method, redirect_location, **kw) class ProxyManager(PoolManager): """ Behaves just like :class:`PoolManager`, but sends all requests through the defined proxy, using the CONNECT method for HTTPS URLs. :param proxy_url: The URL of the proxy to be used. :param proxy_headers: A dictionary contaning headers that will be sent to the proxy. In case of HTTP they are being sent with each request, while in the HTTPS/CONNECT case they are sent only once. Could be used for proxy authentication. Example: >>> proxy = urllib3.ProxyManager('http://localhost:3128/') >>> r1 = proxy.request('GET', 'http://google.com/') >>> r2 = proxy.request('GET', 'http://httpbin.org/') >>> len(proxy.pools) 1 >>> r3 = proxy.request('GET', 'https://httpbin.org/') >>> r4 = proxy.request('GET', 'https://twitter.com/') >>> len(proxy.pools) 3 """ def __init__(self, proxy_url, num_pools=10, headers=None, proxy_headers=None, **connection_pool_kw): if isinstance(proxy_url, HTTPConnectionPool): proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, proxy_url.port) proxy = parse_url(proxy_url) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) proxy = proxy._replace(port=port) if proxy.scheme not in ("http", "https"): raise ProxySchemeUnknown(proxy.scheme) self.proxy = proxy self.proxy_headers = proxy_headers or {} connection_pool_kw['_proxy'] = self.proxy connection_pool_kw['_proxy_headers'] = self.proxy_headers super(ProxyManager, self).__init__( num_pools, headers, **connection_pool_kw) def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): if scheme == "https": return super(ProxyManager, self).connection_from_host( host, port, scheme, pool_kwargs=pool_kwargs) return super(ProxyManager, self).connection_from_host( self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ headers_ = {'Accept': '*/*'} netloc = parse_url(url).netloc if netloc: headers_['Host'] = netloc if headers: headers_.update(headers) return headers_ def urlopen(self, method, url, redirect=True, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." u = parse_url(url) if u.scheme == "http": # For proxied HTTPS requests, httplib sets the necessary headers # on the CONNECT to the proxy. For HTTP, we'll definitely # need to set 'Host' at the very least. headers = kw.get('headers', self.headers) kw['headers'] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) def proxy_from_url(url, **kw): return ProxyManager(proxy_url=url, **kw)
gpl-3.0
lewismc/nutchpy
setup.py
8
4958
#!/usr/bin/env python from __future__ import absolute_import, division, print_function import os import sys from fnmatch import fnmatchcase from distutils.core import Command, setup from distutils.util import convert_path import glob import subprocess import shutil #------------------------------------------------------------------------ # Top Level Packages #------------------------------------------------------------------------ def find_packages(where='.', exclude=()): out = [] stack = [(convert_path(where), '')] while stack: where, prefix = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where,name) if ('.' not in name and os.path.isdir(fn) and os.path.isfile(os.path.join(fn, '__init__.py')) ): out.append(prefix+name) stack.append((fn, prefix+name+'.')) if sys.version_info[0] == 3: exclude = exclude + ('*py2only*', ) for pat in list(exclude) + ['ez_setup', 'distribute_setup']: out = [item for item in out if not fnmatchcase(item, pat)] return out packages = find_packages() if sys.platform == 'win32': dir_sep = '\\' else: dir_sep = '/' def get_data_files(): data_files = [] root = os.path.join("nutchpy","ex_data") ##scan catalog for files with the above extensions and add to pkg_data_dirs for path, dirs, files in os.walk(root): for fs in files: #remove nutchpy from path name install_path = dir_sep.join(path.split(dir_sep)[1:]) data_files.append(os.path.join(install_path,fs)) return data_files package_data = dict(nutchpy=get_data_files()) #------------------------------------------------------------------------ # Commands #------------------------------------------------------------------------ class CleanCommand(Command): """Custom distutils command to clean the .so and .pyc files.""" user_options = [] def initialize_options(self): self._clean_me = [] self._clean_trees = [] for toplevel in packages: for root, dirs, files in list(os.walk(toplevel)): for f in files: if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o', '.pyd', '.jar'): self._clean_me.append(os.path.join(root, f)) for d in ('build',): if os.path.exists(d): self._clean_trees.append(d) def finalize_options(self): pass def run(self): for clean_me in self._clean_me: try: print('flushing', clean_me) os.unlink(clean_me) except Exception: pass for clean_tree in self._clean_trees: try: print('flushing', clean_tree) shutil.rmtree(clean_tree) except Exception: pass #------------------------------------------------------------------------ # Setup #------------------------------------------------------------------------ longdesc = open('README.md').read() #------------------------------------------------------------------------ # Optional building with MAVEN #------------------------------------------------------------------------ if not 'nojava' in sys.argv: JAVA_SRC = "seqreader-app" os.chdir(JAVA_SRC) build_cmd = "mvn package" os.system(build_cmd) # subprocess.check_call(build_cmd, shell=os.name != 'nt', # stdout=subprocess.PIPE, stderr=subprocess.PIPE) os.chdir("..") jar_file = os.path.join(JAVA_SRC,"target", "seqreader-app-1.0-SNAPSHOT-jar-with-dependencies.jar") java_lib_dir = os.path.join("nutchpy","java_libs") if not os.path.exists(java_lib_dir): os.mkdir(java_lib_dir) shutil.copy(jar_file,java_lib_dir) else: assert 'nojava' == sys.argv.pop(2) jar_file_list = glob.glob("nutchpy/java_libs/*") jar_file_list = [os.path.relpath(path,start='nutchpy') for path in jar_file_list] package_data['nutchpy'] = package_data['nutchpy']+jar_file_list setup( name='nutchpy', version='0.1', author='Continuum Analytics', author_email='[email protected]', description='nutchpy', long_description=longdesc, license='BSD', platforms = ['any'], install_requires=['py4j>=0.8.2.1'], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Intended Audience :: Education', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Big Data', 'Topic :: Java', ], packages=packages, package_data=package_data, cmdclass = { 'clean': CleanCommand, } )
bsd-2-clause
ohmini/thaifoodapi
lib/django/contrib/sessions/models.py
347
1298
from __future__ import unicode_literals from django.contrib.sessions.base_session import ( AbstractBaseSession, BaseSessionManager, ) class SessionManager(BaseSessionManager): use_in_migrations = True class Session(AbstractBaseSession): """ Django provides full support for anonymous sessions. The session framework lets you store and retrieve arbitrary data on a per-site-visitor basis. It stores data on the server side and abstracts the sending and receiving of cookies. Cookies contain a session ID -- not the data itself. The Django sessions framework is entirely cookie-based. It does not fall back to putting session IDs in URLs. This is an intentional design decision. Not only does that behavior make URLs ugly, it makes your site vulnerable to session-ID theft via the "Referer" header. For complete documentation on using Sessions in your code, consult the sessions documentation that is shipped with Django (also available on the Django Web site). """ objects = SessionManager() @classmethod def get_session_store_class(cls): from django.contrib.sessions.backends.db import SessionStore return SessionStore class Meta(AbstractBaseSession.Meta): db_table = 'django_session'
bsd-3-clause
ehirt/odoo
addons/fetchmail/fetchmail.py
6
15874
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import poplib import time from imaplib import IMAP4 from imaplib import IMAP4_SSL from poplib import POP3 from poplib import POP3_SSL try: import cStringIO as StringIO except ImportError: import StringIO import zipfile import base64 from openerp import addons from openerp.osv import fields, osv from openerp import tools, api from openerp.tools.translate import _ _logger = logging.getLogger(__name__) MAX_POP_MESSAGES = 50 MAIL_TIMEOUT = 60 # Workaround for Python 2.7.8 bug https://bugs.python.org/issue23906 poplib._MAXLINE = 65536 class fetchmail_server(osv.osv): """Incoming POP/IMAP mail server account""" _name = 'fetchmail.server' _description = "POP/IMAP Server" _order = 'priority' _columns = { 'name':fields.char('Name', required=True, readonly=False), 'active':fields.boolean('Active', required=False), 'state':fields.selection([ ('draft', 'Not Confirmed'), ('done', 'Confirmed'), ], 'Status', select=True, readonly=True, copy=False), 'server' : fields.char('Server Name', readonly=True, help="Hostname or IP of the mail server", states={'draft':[('readonly', False)]}), 'port' : fields.integer('Port', readonly=True, states={'draft':[('readonly', False)]}), 'type':fields.selection([ ('pop', 'POP Server'), ('imap', 'IMAP Server'), ('local', 'Local Server'), ], 'Server Type', select=True, required=True, readonly=False), 'is_ssl':fields.boolean('SSL/TLS', help="Connections are encrypted with SSL/TLS through a dedicated port (default: IMAPS=993, POP3S=995)"), 'attach':fields.boolean('Keep Attachments', help="Whether attachments should be downloaded. " "If not enabled, incoming emails will be stripped of any attachments before being processed"), 'original':fields.boolean('Keep Original', help="Whether a full original copy of each email should be kept for reference" "and attached to each processed message. This will usually double the size of your message database."), 'date': fields.datetime('Last Fetch Date', readonly=True), 'user' : fields.char('Username', readonly=True, states={'draft':[('readonly', False)]}), 'password' : fields.char('Password', readonly=True, states={'draft':[('readonly', False)]}), 'action_id':fields.many2one('ir.actions.server', 'Server Action', help="Optional custom server action to trigger for each incoming mail, " "on the record that was created or updated by this mail"), 'object_id': fields.many2one('ir.model', "Create a New Record", help="Process each incoming mail as part of a conversation " "corresponding to this document type. This will create " "new documents for new conversations, or attach follow-up " "emails to the existing conversations (documents)."), 'priority': fields.integer('Server Priority', readonly=True, states={'draft':[('readonly', False)]}, help="Defines the order of processing, " "lower values mean higher priority"), 'message_ids': fields.one2many('mail.mail', 'fetchmail_server_id', 'Messages', readonly=True), 'configuration' : fields.text('Configuration', readonly=True), 'script' : fields.char('Script', readonly=True), } _defaults = { 'state': "draft", 'type': "pop", 'active': True, 'priority': 5, 'attach': True, 'script': '/mail/static/scripts/openerp_mailgate.py', } def onchange_server_type(self, cr, uid, ids, server_type=False, ssl=False, object_id=False): port = 0 values = {} if server_type == 'pop': port = ssl and 995 or 110 elif server_type == 'imap': port = ssl and 993 or 143 else: values['server'] = '' values['port'] = port conf = { 'dbname' : cr.dbname, 'uid' : uid, 'model' : 'MODELNAME', } if object_id: m = self.pool.get('ir.model') r = m.read(cr,uid,[object_id],['model']) conf['model']=r[0]['model'] values['configuration'] = """Use the below script with the following command line options with your Mail Transport Agent (MTA) openerp_mailgate.py --host=HOSTNAME --port=PORT -u %(uid)d -p PASSWORD -d %(dbname)s Example configuration for the postfix mta running locally: /etc/postfix/virtual_aliases: @youdomain openerp_mailgate@localhost /etc/aliases: openerp_mailgate: "|/path/to/openerp-mailgate.py --host=localhost -u %(uid)d -p PASSWORD -d %(dbname)s" """ % conf return {'value':values} def set_draft(self, cr, uid, ids, context=None): self.write(cr, uid, ids , {'state':'draft'}) return True @api.cr_uid_ids_context def connect(self, cr, uid, server_id, context=None): if isinstance(server_id, (list,tuple)): server_id = server_id[0] server = self.browse(cr, uid, server_id, context) if server.type == 'imap': if server.is_ssl: connection = IMAP4_SSL(server.server, int(server.port)) else: connection = IMAP4(server.server, int(server.port)) connection.login(server.user, server.password) elif server.type == 'pop': if server.is_ssl: connection = POP3_SSL(server.server, int(server.port)) else: connection = POP3(server.server, int(server.port)) #TODO: use this to remove only unread messages #connection.user("recent:"+server.user) connection.user(server.user) connection.pass_(server.password) # Add timeout on socket connection.sock.settimeout(MAIL_TIMEOUT) return connection def button_confirm_login(self, cr, uid, ids, context=None): if context is None: context = {} for server in self.browse(cr, uid, ids, context=context): try: connection = server.connect() server.write({'state':'done'}) except Exception, e: _logger.exception("Failed to connect to %s server %s.", server.type, server.name) raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s.") % tools.ustr(e)) finally: try: if connection: if server.type == 'imap': connection.close() elif server.type == 'pop': connection.quit() except Exception: # ignored, just a consequence of the previous exception pass return True def _fetch_mails(self, cr, uid, ids=False, context=None): if not ids: ids = self.search(cr, uid, [('state','=','done'),('type','in',['pop','imap'])]) return self.fetch_mail(cr, uid, ids, context=context) def fetch_mail(self, cr, uid, ids, context=None): """WARNING: meant for cron usage only - will commit() after each email!""" context = dict(context or {}) context['fetchmail_cron_running'] = True mail_thread = self.pool.get('mail.thread') action_pool = self.pool.get('ir.actions.server') for server in self.browse(cr, uid, ids, context=context): _logger.info('start checking for new emails on %s server %s', server.type, server.name) context.update({'fetchmail_server_id': server.id, 'server_type': server.type}) count, failed = 0, 0 imap_server = False pop_server = False if server.type == 'imap': try: imap_server = server.connect() imap_server.select() result, data = imap_server.search(None, '(UNSEEN)') for num in data[0].split(): res_id = None result, data = imap_server.fetch(num, '(RFC822)') imap_server.store(num, '-FLAGS', '\\Seen') try: res_id = mail_thread.message_process(cr, uid, server.object_id.model, data[0][1], save_original=server.original, strip_attachments=(not server.attach), context=context) imap_server.store(num, '+FLAGS', '\\Seen') except Exception: _logger.exception('Failed to process mail from %s server %s.', server.type, server.name) failed += 1 if res_id and server.action_id: action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)}) cr.commit() count += 1 _logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.type, server.name, (count - failed), failed) except Exception: _logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name) finally: if imap_server: imap_server.close() imap_server.logout() elif server.type == 'pop': try: while True: pop_server = server.connect() (numMsgs, totalSize) = pop_server.stat() pop_server.list() for num in range(1, min(MAX_POP_MESSAGES, numMsgs) + 1): (header, msges, octets) = pop_server.retr(num) msg = '\n'.join(msges) res_id = None try: res_id = mail_thread.message_process(cr, uid, server.object_id.model, msg, save_original=server.original, strip_attachments=(not server.attach), context=context) pop_server.dele(num) except Exception: _logger.exception('Failed to process mail from %s server %s.', server.type, server.name) failed += 1 if res_id and server.action_id: action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)}) cr.commit() if numMsgs < MAX_POP_MESSAGES: break pop_server.quit() _logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", numMsgs, server.type, server.name, (numMsgs - failed), failed) except Exception: _logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name) finally: if pop_server: pop_server.quit() server.write({'date': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)}) return True def _update_cron(self, cr, uid, context=None): if context and context.get('fetchmail_cron_running'): return try: cron = self.pool['ir.model.data'].get_object( cr, uid, 'fetchmail', 'ir_cron_mail_gateway_action', context=context) except ValueError: # Nevermind if default cron cannot be found return # Enabled/Disable cron based on the number of 'done' server of type pop or imap cron.toggle(model=self._name, domain=[('state','=','done'), ('type','in',['pop','imap'])]) def create(self, cr, uid, values, context=None): res = super(fetchmail_server, self).create(cr, uid, values, context=context) self._update_cron(cr, uid, context=context) return res def write(self, cr, uid, ids, values, context=None): res = super(fetchmail_server, self).write(cr, uid, ids, values, context=context) self._update_cron(cr, uid, context=context) return res def unlink(self, cr, uid, ids, context=None): res = super(fetchmail_server, self).unlink(cr, uid, ids, context=context) self._update_cron(cr, uid, context=context) return res class mail_mail(osv.osv): _inherit = "mail.mail" _columns = { 'fetchmail_server_id': fields.many2one('fetchmail.server', "Inbound Mail Server", readonly=True, select=True, oldname='server_id'), } def create(self, cr, uid, values, context=None): if context is None: context = {} fetchmail_server_id = context.get('fetchmail_server_id') if fetchmail_server_id: values['fetchmail_server_id'] = fetchmail_server_id res = super(mail_mail, self).create(cr, uid, values, context=context) return res def write(self, cr, uid, ids, values, context=None): if context is None: context = {} fetchmail_server_id = context.get('fetchmail_server_id') if fetchmail_server_id: values['fetchmail_server_id'] = fetchmail_server_id res = super(mail_mail, self).write(cr, uid, ids, values, context=context) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
fafaman/django
django/contrib/gis/db/backends/base/models.py
434
7111
import re from django.contrib.gis import gdal from django.utils import six from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class SpatialRefSysMixin(object): """ The SpatialRefSysMixin is a class used by the database-dependent SpatialRefSys objects to reduce redundant code. """ # For pulling out the spheroid from the spatial reference string. This # regular expression is used only if the user does not have GDAL installed. # TODO: Flattening not used in all ellipsoids, could also be a minor axis, # or 'b' parameter. spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),') # For pulling out the units on platforms w/o GDAL installed. # TODO: Figure out how to pull out angular units of projected coordinate system and # fix for LOCAL_CS types. GDAL should be highly recommended for performing # distance queries. units_regex = re.compile( r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)' r'(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)",' r'"(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,' r'AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$' ) @property def srs(self): """ Returns a GDAL SpatialReference object, if GDAL is installed. """ if gdal.HAS_GDAL: # TODO: Is caching really necessary here? Is complexity worth it? if hasattr(self, '_srs'): # Returning a clone of the cached SpatialReference object. return self._srs.clone() else: # Attempting to cache a SpatialReference object. # Trying to get from WKT first. try: self._srs = gdal.SpatialReference(self.wkt) return self.srs except Exception as msg: pass try: self._srs = gdal.SpatialReference(self.proj4text) return self.srs except Exception as msg: pass raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg)) else: raise Exception('GDAL is not installed.') @property def ellipsoid(self): """ Returns a tuple of the ellipsoid parameters: (semimajor axis, semiminor axis, and inverse flattening). """ if gdal.HAS_GDAL: return self.srs.ellipsoid else: m = self.spheroid_regex.match(self.wkt) if m: return (float(m.group('major')), float(m.group('flattening'))) else: return None @property def name(self): "Returns the projection name." return self.srs.name @property def spheroid(self): "Returns the spheroid name for this spatial reference." return self.srs['spheroid'] @property def datum(self): "Returns the datum for this spatial reference." return self.srs['datum'] @property def projected(self): "Is this Spatial Reference projected?" if gdal.HAS_GDAL: return self.srs.projected else: return self.wkt.startswith('PROJCS') @property def local(self): "Is this Spatial Reference local?" if gdal.HAS_GDAL: return self.srs.local else: return self.wkt.startswith('LOCAL_CS') @property def geographic(self): "Is this Spatial Reference geographic?" if gdal.HAS_GDAL: return self.srs.geographic else: return self.wkt.startswith('GEOGCS') @property def linear_name(self): "Returns the linear units name." if gdal.HAS_GDAL: return self.srs.linear_name elif self.geographic: return None else: m = self.units_regex.match(self.wkt) return m.group('unit_name') @property def linear_units(self): "Returns the linear units." if gdal.HAS_GDAL: return self.srs.linear_units elif self.geographic: return None else: m = self.units_regex.match(self.wkt) return m.group('unit') @property def angular_name(self): "Returns the name of the angular units." if gdal.HAS_GDAL: return self.srs.angular_name elif self.projected: return None else: m = self.units_regex.match(self.wkt) return m.group('unit_name') @property def angular_units(self): "Returns the angular units." if gdal.HAS_GDAL: return self.srs.angular_units elif self.projected: return None else: m = self.units_regex.match(self.wkt) return m.group('unit') @property def units(self): "Returns a tuple of the units and the name." if self.projected or self.local: return (self.linear_units, self.linear_name) elif self.geographic: return (self.angular_units, self.angular_name) else: return (None, None) @classmethod def get_units(cls, wkt): """ Class method used by GeometryField on initialization to retrieve the units on the given WKT, without having to use any of the database fields. """ if gdal.HAS_GDAL: return gdal.SpatialReference(wkt).units else: m = cls.units_regex.match(wkt) return m.group('unit'), m.group('unit_name') @classmethod def get_spheroid(cls, wkt, string=True): """ Class method used by GeometryField on initialization to retrieve the `SPHEROID[..]` parameters from the given WKT. """ if gdal.HAS_GDAL: srs = gdal.SpatialReference(wkt) sphere_params = srs.ellipsoid sphere_name = srs['spheroid'] else: m = cls.spheroid_regex.match(wkt) if m: sphere_params = (float(m.group('major')), float(m.group('flattening'))) sphere_name = m.group('name') else: return None if not string: return sphere_name, sphere_params else: # `string` parameter used to place in format acceptable by PostGIS if len(sphere_params) == 3: radius, flattening = sphere_params[0], sphere_params[2] else: radius, flattening = sphere_params return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening) def __str__(self): """ Returns the string representation. If GDAL is installed, it will be 'pretty' OGC WKT. """ try: return six.text_type(self.srs) except Exception: return six.text_type(self.wkt)
bsd-3-clause
blrm/openshift-tools
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
44
1480
#!/usr/bin/python # -*- coding: utf-8 -*- ''' Custom filters for use in openshift_hosted ''' class FilterModule(object): ''' Custom ansible filters for use by openshift_hosted role''' @staticmethod def get_router_replicas(replicas=None, router_nodes=None): ''' This function will return the number of replicas based on the results from the defined openshift_hosted_router_replicas OR the query from oc_obj on openshift nodes with a selector OR default to 1 ''' # We always use what they've specified if they've specified a value if replicas is not None: return replicas replicas = 1 # Ignore boolean expression limit of 5. # pylint: disable=too-many-boolean-expressions if (isinstance(router_nodes, dict) and 'results' in router_nodes and 'results' in router_nodes['results'] and isinstance(router_nodes['results']['results'], list) and len(router_nodes['results']['results']) > 0 and 'items' in router_nodes['results']['results'][0]): if len(router_nodes['results']['results'][0]['items']) > 0: replicas = len(router_nodes['results']['results'][0]['items']) return replicas def filters(self): ''' returns a mapping of filters to methods ''' return {'get_router_replicas': self.get_router_replicas}
apache-2.0
LearnEra/LearnEraPlaftform
common/lib/chem/chem/miller.py
46
9239
""" Calculation of Miller indices """ import numpy as np import math import fractions as fr import decimal import json def lcm(a, b): """ Returns least common multiple of a, b Args: a, b: floats Returns: float """ return a * b / fr.gcd(a, b) def segment_to_fraction(distance): """ Converts lengths of which the plane cuts the axes to fraction. Tries convert distance to closest nicest fraction with denominator less or equal than 10. It is purely for simplicity and clearance of learning purposes. Jenny: 'In typical courses students usually do not encounter indices any higher than 6'. If distance is not a number (numpy nan), it means that plane is parallel to axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is returned Generally (special cases): a) if distance is smaller than some constant, i.g. 0.01011, than fraction's denominator usually much greater than 10. b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane, But if he will slightly move the mouse and click on 0.65 -> it will be (16,15,16) plane. That's why we are doing adjustments for points coordinates, to the closest tick, tick + tick / 2 value. And now UI sends to server only values multiple to 0.05 (half of tick). Same rounding is implemented for unittests. But if one will want to calculate miller indices with exact coordinates and with nice fractions (which produce small Miller indices), he may want shift to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin. In this way he can receive nice small fractions. Also there is can be degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) - it is a line. This case should be considered separately. Small nice Miller numbers and possibility to create very small segments can not be implemented at same time). Args: distance: float distance that plane cuts on axis, it must not be 0. Distance is multiple of 0.05. Returns: Inverted fraction. 0 / 1 if distance is nan """ if np.isnan(distance): return fr.Fraction(0, 1) else: fract = fr.Fraction(distance).limit_denominator(10) return fr.Fraction(fract.denominator, fract.numerator) def sub_miller(segments): ''' Calculates Miller indices from segments. Algorithm: 1. Obtain inverted fraction from segments 2. Find common denominator of inverted fractions 3. Lead fractions to common denominator and throws denominator away. 4. Return obtained values. Args: List of 3 floats, meaning distances that plane cuts on x, y, z axes. Any float not equals zero, it means that plane does not intersect origin, i. e. shift of origin has already been done. Returns: String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2) ''' fracts = [segment_to_fraction(segment) for segment in segments] common_denominator = reduce(lcm, [fract.denominator for fract in fracts]) miller = ([fract.numerator * math.fabs(common_denominator) / fract.denominator for fract in fracts]) return'(' + ','.join(map(str, map(decimal.Decimal, miller))) + ')' def miller(points): """ Calculates Miller indices from points. Algorithm: 1. Calculate normal vector to a plane that goes trough all points. 2. Set origin. 3. Create Cartesian coordinate system (Ccs). 4. Find the lengths of segments of which the plane cuts the axes. Equation of a line for axes: Origin + (Coordinate_vector - Origin) * parameter. 5. If plane goes trough Origin: a) Find new random origin: find unit cube vertex, not crossed by a plane. b) Repeat 2-4. c) Fix signs of segments after Origin shift. This means to consider original directions of axes. I.g.: Origin was 0,0,0 and became new_origin. If new_origin has same Y coordinate as Origin, then segment does not change its sign. But if new_origin has another Y coordinate than origin (was 0, became 1), than segment has to change its sign (it now lies on negative side of Y axis). New Origin 0 value of X or Y or Z coordinate means that segment does not change sign, 1 value -> does change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1 6. Run function that calculates miller indices from segments. Args: List of points. Each point is list of float coordinates. Order of coordinates in point's list: x, y, z. Points are different! Returns: String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2) """ N = np.cross(points[1] - points[0], points[2] - points[0]) O = np.array([0, 0, 0]) P = points[0] # point of plane Ccs = map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]]) segments = ([np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0 else np.nan for ort in Ccs]) if any(x == 0 for x in segments): # Plane goes through origin. vertices = [ # top: np.array([1.0, 1.0, 1.0]), np.array([0.0, 0.0, 1.0]), np.array([1.0, 0.0, 1.0]), np.array([0.0, 1.0, 1.0]), # bottom, except 0,0,0: np.array([1.0, 0.0, 0.0]), np.array([0.0, 1.0, 0.0]), np.array([1.0, 1.0, 1.0]), ] for vertex in vertices: if np.dot(vertex - O, N) != 0: # vertex not in plane new_origin = vertex break # obtain new axes with center in new origin X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]]) Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]]) Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]]) new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin] segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if np.dot(ort, N) != 0 else np.nan for ort in new_Ccs]) # fix signs of indices: 0 -> 1, 1 -> -1 ( segments = (1 - 2 * new_origin) * segments return sub_miller(segments) def grade(user_input, correct_answer): ''' Grade crystallography problem. Returns true if lattices are the same and Miller indices are same or minus same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only on student's selection of origin. Args: user_input, correct_answer: json. Format: user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"], ["0.78","1.00","0.00"],["0.00","1.00","0.72"]]} correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'} "lattice" is one of: "", "sc", "bcc", "fcc" Returns: True or false. ''' def negative(m): """ Change sign of Miller indices. Args: m: string with meaning of Miller indices. E.g.: (-6,3,-6) -> (6, -3, 6) Returns: String with changed signs. """ output = '' i = 1 while i in range(1, len(m) - 1): if m[i] in (',', ' '): output += m[i] elif m[i] not in ('-', '0'): output += '-' + m[i] elif m[i] == '0': output += m[i] else: i += 1 output += m[i] i += 1 return '(' + output + ')' def round0_25(point): """ Rounds point coordinates to closest 0.5 value. Args: point: list of float coordinates. Order of coordinates: x, y, z. Returns: list of coordinates rounded to closes 0.5 value """ rounded_points = [] for coord in point: base = math.floor(coord * 10) fractional_part = (coord * 10 - base) aliquot0_25 = math.floor(fractional_part / 0.25) if aliquot0_25 == 0.0: rounded_points.append(base / 10) if aliquot0_25 in (1.0, 2.0): rounded_points.append(base / 10 + 0.05) if aliquot0_25 == 3.0: rounded_points.append(base / 10 + 0.1) return rounded_points user_answer = json.loads(user_input) if user_answer['lattice'] != correct_answer['lattice']: return False points = [map(float, p) for p in user_answer['points']] if len(points) < 3: return False # round point to closes 0.05 value points = [round0_25(point) for point in points] points = [np.array(point) for point in points] # print miller(points), (correct_answer['miller'].replace(' ', ''), # negative(correct_answer['miller']).replace(' ', '')) if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')): return True return False
agpl-3.0
aparo/django-nonrel
django/contrib/localflavor/at/forms.py
71
2271
""" AT-specific Form helpers """ import re from django.utils.translation import ugettext_lazy as _ from django.forms.fields import Field, RegexField, Select from django.forms import ValidationError re_ssn = re.compile(r'^\d{4} \d{6}') class ATZipCodeField(RegexField): """ A form field that validates its input is an Austrian postcode. Accepts 4 digits. """ default_error_messages = { 'invalid': _('Enter a zip code in the format XXXX.'), } def __init__(self, *args, **kwargs): super(ATZipCodeField, self).__init__(r'^\d{4}$', max_length=None, min_length=None, *args, **kwargs) class ATStateSelect(Select): """ A Select widget that uses a list of AT states as its choices. """ def __init__(self, attrs=None): from django.contrib.localflavor.at.at_states import STATE_CHOICES super(ATStateSelect, self).__init__(attrs, choices=STATE_CHOICES) class ATSocialSecurityNumberField(Field): """ Austrian Social Security numbers are composed of a 4 digits and 6 digits field. The latter represents in most cases the person's birthdate while the first 4 digits represent a 3-digits counter and a one-digit checksum. The 6-digits field can also differ from the person's birthdate if the 3-digits counter suffered an overflow. This code is based on information available on http://de.wikipedia.org/wiki/Sozialversicherungsnummer#.C3.96sterreich """ default_error_messages = { 'invalid': _(u'Enter a valid Austrian Social Security Number in XXXX XXXXXX format.'), } def clean(self, value): if not re_ssn.search(value): raise ValidationError(self.error_messages['invalid']) sqnr, date = value.split(" ") sqnr, check = (sqnr[:3], (sqnr[3])) if int(sqnr) < 100: raise ValidationError(self.error_messages['invalid']) res = int(sqnr[0])*3 + int(sqnr[1])*7 + int(sqnr[2])*9 \ + int(date[0])*5 + int(date[1])*8 + int(date[2])*4 \ + int(date[3])*2 + int(date[4])*1 + int(date[5])*6 res = res % 11 if res != int(check): raise ValidationError(self.error_messages['invalid']) return u'%s%s %s'%(sqnr, check, date,)
bsd-3-clause
40223236/w16b_test
static/Brython3.1.1-20150328-091302/Lib/_struct.py
726
13787
# # This module is a pure Python version of pypy.module.struct. # It is only imported if the vastly faster pypy.module.struct is not # compiled in. For now we keep this version for reference and # because pypy.module.struct is not ootype-backend-friendly yet. # # this module 'borrowed' from # https://bitbucket.org/pypy/pypy/src/18626459a9b2/lib_pypy/_struct.py?at=py3k-listview_str """Functions to convert between Python values and C structs. Python strings are used to hold the data representing the C struct and also as format strings to describe the layout of data in the C struct. The optional first format char indicates byte order, size and alignment: @: native order, size & alignment (default) =: native order, std. size & alignment <: little-endian, std. size & alignment >: big-endian, std. size & alignment !: same as > The remaining chars indicate types of args and must match exactly; these can be preceded by a decimal repeat count: x: pad byte (no data); c:char; b:signed byte; B:unsigned byte; h:short; H:unsigned short; i:int; I:unsigned int; l:long; L:unsigned long; f:float; d:double. Special cases (preceding decimal count indicates length): s:string (array of char); p: pascal string (with count byte). Special case (only available in native format): P:an integer type that is wide enough to hold a pointer. Special case (not in native mode unless 'long long' in platform C): q:long long; Q:unsigned long long Whitespace between formats is ignored. The variable struct.error is an exception raised on errors.""" import math, sys # TODO: XXX Find a way to get information on native sizes and alignments class StructError(Exception): pass error = StructError def unpack_int(data,index,size,le): bytes = [b for b in data[index:index+size]] if le == 'little': bytes.reverse() number = 0 for b in bytes: number = number << 8 | b return int(number) def unpack_signed_int(data,index,size,le): number = unpack_int(data,index,size,le) max = 2**(size*8) if number > 2**(size*8 - 1) - 1: number = int(-1*(max - number)) return number INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY def unpack_char(data,index,size,le): return data[index:index+size] def pack_int(number,size,le): x=number res=[] for i in range(size): res.append(x&0xff) x >>= 8 if le == 'big': res.reverse() return bytes(res) def pack_signed_int(number,size,le): if not isinstance(number, int): raise StructError("argument for i,I,l,L,q,Q,h,H must be integer") if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): raise OverflowError("Number:%i too large to convert" % number) return pack_int(number,size,le) def pack_unsigned_int(number,size,le): if not isinstance(number, int): raise StructError("argument for i,I,l,L,q,Q,h,H must be integer") if number < 0: raise TypeError("can't convert negative long to unsigned") if number > 2**(8*size)-1: raise OverflowError("Number:%i too large to convert" % number) return pack_int(number,size,le) def pack_char(char,size,le): return bytes(char) def isinf(x): return x != 0.0 and x / 2 == x def isnan(v): return v != v*1.0 or (v == 1.0 and v == 2.0) def pack_float(x, size, le): unsigned = float_pack(x, size) result = [] for i in range(8): result.append((unsigned >> (i * 8)) & 0xFF) if le == "big": result.reverse() return bytes(result) def unpack_float(data, index, size, le): binary = [data[i] for i in range(index, index + 8)] if le == "big": binary.reverse() unsigned = 0 for i in range(8): unsigned |= binary[i] << (i * 8) return float_unpack(unsigned, size, le) def round_to_nearest(x): """Python 3 style round: round a float x to the nearest int, but unlike the builtin Python 2.x round function: - return an int, not a float - do round-half-to-even, not round-half-away-from-zero. We assume that x is finite and nonnegative; except wrong results if you use this for negative x. """ int_part = int(x) frac_part = x - int_part if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: int_part += 1 return int_part def float_unpack(Q, size, le): """Convert a 32-bit or 64-bit integer created by float_pack into a Python float.""" if size == 8: MIN_EXP = -1021 # = sys.float_info.min_exp MAX_EXP = 1024 # = sys.float_info.max_exp MANT_DIG = 53 # = sys.float_info.mant_dig BITS = 64 elif size == 4: MIN_EXP = -125 # C's FLT_MIN_EXP MAX_EXP = 128 # FLT_MAX_EXP MANT_DIG = 24 # FLT_MANT_DIG BITS = 32 else: raise ValueError("invalid size value") if Q >> BITS: raise ValueError("input out of range") # extract pieces sign = Q >> BITS - 1 exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 mant = Q & ((1 << MANT_DIG - 1) - 1) if exp == MAX_EXP - MIN_EXP + 2: # nan or infinity result = float('nan') if mant else float('inf') elif exp == 0: # subnormal or zero result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) else: # normal mant += 1 << MANT_DIG - 1 result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) return -result if sign else result def float_pack(x, size): """Convert a Python float x into a 64-bit unsigned integer with the same byte representation.""" if size == 8: MIN_EXP = -1021 # = sys.float_info.min_exp MAX_EXP = 1024 # = sys.float_info.max_exp MANT_DIG = 53 # = sys.float_info.mant_dig BITS = 64 elif size == 4: MIN_EXP = -125 # C's FLT_MIN_EXP MAX_EXP = 128 # FLT_MAX_EXP MANT_DIG = 24 # FLT_MANT_DIG BITS = 32 else: raise ValueError("invalid size value") sign = math.copysign(1.0, x) < 0.0 if math.isinf(x): mant = 0 exp = MAX_EXP - MIN_EXP + 2 elif math.isnan(x): mant = 1 << (MANT_DIG-2) # other values possible exp = MAX_EXP - MIN_EXP + 2 elif x == 0.0: mant = 0 exp = 0 else: m, e = math.frexp(abs(x)) # abs(x) == m * 2**e exp = e - (MIN_EXP - 1) if exp > 0: # Normal case. mant = round_to_nearest(m * (1 << MANT_DIG)) mant -= 1 << MANT_DIG - 1 else: # Subnormal case. if exp + MANT_DIG - 1 >= 0: mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) else: mant = 0 exp = 0 # Special case: rounding produced a MANT_DIG-bit mantissa. assert 0 <= mant <= 1 << MANT_DIG - 1 if mant == 1 << MANT_DIG - 1: mant = 0 exp += 1 # Raise on overflow (in some circumstances, may want to return # infinity instead). if exp >= MAX_EXP - MIN_EXP + 2: raise OverflowError("float too large to pack in this format") # check constraints assert 0 <= mant < 1 << MANT_DIG - 1 assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 assert 0 <= sign <= 1 return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant big_endian_format = { 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, } default = big_endian_format formatmode={ '<' : (default, 'little'), '>' : (default, 'big'), '!' : (default, 'big'), '=' : (default, sys.byteorder), '@' : (default, sys.byteorder) } def getmode(fmt): try: formatdef,endianness = formatmode[fmt[0]] index = 1 except (IndexError, KeyError): formatdef,endianness = formatmode['@'] index = 0 return formatdef,endianness,index def getNum(fmt,i): num=None cur = fmt[i] while ('0'<= cur ) and ( cur <= '9'): if num == None: num = int(cur) else: num = 10*num + int(cur) i += 1 cur = fmt[i] return num,i def calcsize(fmt): """calcsize(fmt) -> int Return size of C struct described by format string fmt. See struct.__doc__ for more on format strings.""" formatdef,endianness,i = getmode(fmt) num = 0 result = 0 while i<len(fmt): num,i = getNum(fmt,i) cur = fmt[i] try: format = formatdef[cur] except KeyError: raise StructError("%s is not a valid format" % cur) if num != None : result += num*format['size'] else: result += format['size'] num = 0 i += 1 return result def pack(fmt,*args): """pack(fmt, v1, v2, ...) -> string Return string containing values v1, v2, ... packed according to fmt. See struct.__doc__ for more on format strings.""" formatdef,endianness,i = getmode(fmt) args = list(args) n_args = len(args) result = [] while i<len(fmt): num,i = getNum(fmt,i) cur = fmt[i] try: format = formatdef[cur] except KeyError: raise StructError("%s is not a valid format" % cur) if num == None : num_s = 0 num = 1 else: num_s = num if cur == 'x': result += [b'\0'*num] elif cur == 's': if isinstance(args[0], bytes): padding = num - len(args[0]) result += [args[0][:num] + b'\0'*padding] args.pop(0) else: raise StructError("arg for string format not a string") elif cur == 'p': if isinstance(args[0], bytes): padding = num - len(args[0]) - 1 if padding > 0: result += [bytes([len(args[0])]) + args[0][:num-1] + b'\0'*padding] else: if num<255: result += [bytes([num-1]) + args[0][:num-1]] else: result += [bytes([255]) + args[0][:num-1]] args.pop(0) else: raise StructError("arg for string format not a string") else: if len(args) < num: raise StructError("insufficient arguments to pack") for var in args[:num]: result += [format['pack'](var,format['size'],endianness)] args=args[num:] num = None i += 1 if len(args) != 0: raise StructError("too many arguments for pack format") return b''.join(result) def unpack(fmt,data): """unpack(fmt, string) -> (v1, v2, ...) Unpack the string, containing packed C structure data, according to fmt. Requires len(string)==calcsize(fmt). See struct.__doc__ for more on format strings.""" formatdef,endianness,i = getmode(fmt) j = 0 num = 0 result = [] length= calcsize(fmt) if length != len (data): raise StructError("unpack str size does not match format") while i<len(fmt): num,i=getNum(fmt,i) cur = fmt[i] i += 1 try: format = formatdef[cur] except KeyError: raise StructError("%s is not a valid format" % cur) if not num : num = 1 if cur == 'x': j += num elif cur == 's': result.append(data[j:j+num]) j += num elif cur == 'p': n=data[j] if n >= num: n = num-1 result.append(data[j+1:j+n+1]) j += num else: for n in range(num): result += [format['unpack'](data,j,format['size'],endianness)] j += format['size'] return tuple(result) def pack_into(fmt, buf, offset, *args): data = pack(fmt, *args) buffer(buf)[offset:offset+len(data)] = data def unpack_from(fmt, buf, offset=0): size = calcsize(fmt) data = buffer(buf)[offset:offset+size] if len(data) != size: raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) return unpack(fmt, data) def _clearcache(): "Clear the internal cache." # No cache in this implementation
agpl-3.0
laperry1/android_external_chromium_org
third_party/tlslite/scripts/tls.py
109
10946
#!/usr/bin/env python # Authors: # Trevor Perrin # Marcelo Fernandez - bugfix and NPN support # Martin von Loewis - python 3 port # # See the LICENSE file for legal information regarding use of this file. from __future__ import print_function import sys import os import os.path import socket import time import getopt try: import httplib from SocketServer import * from BaseHTTPServer import * from SimpleHTTPServer import * except ImportError: # Python 3.x from http import client as httplib from socketserver import * from http.server import * if __name__ != "__main__": raise "This must be run as a command, not used as a module!" from tlslite.api import * from tlslite import __version__ try: from tack.structures.Tack import Tack except ImportError: pass def printUsage(s=None): if s: print("ERROR: %s" % s) print("") print("Version: %s" % __version__) print("") print("RNG: %s" % prngName) print("") print("Modules:") if tackpyLoaded: print(" tackpy : Loaded") else: print(" tackpy : Not Loaded") if m2cryptoLoaded: print(" M2Crypto : Loaded") else: print(" M2Crypto : Not Loaded") if pycryptoLoaded: print(" pycrypto : Loaded") else: print(" pycrypto : Not Loaded") if gmpyLoaded: print(" GMPY : Loaded") else: print(" GMPY : Not Loaded") print("") print("""Commands: server [-k KEY] [-c CERT] [-t TACK] [-v VERIFIERDB] [-d DIR] [--reqcert] HOST:PORT client [-k KEY] [-c CERT] [-u USER] [-p PASS] HOST:PORT """) sys.exit(-1) def printError(s): """Print error message and exit""" sys.stderr.write("ERROR: %s\n" % s) sys.exit(-1) def handleArgs(argv, argString, flagsList=[]): # Convert to getopt argstring format: # Add ":" after each arg, ie "abc" -> "a:b:c:" getOptArgString = ":".join(argString) + ":" try: opts, argv = getopt.getopt(argv, getOptArgString, flagsList) except getopt.GetoptError as e: printError(e) # Default values if arg not present privateKey = None certChain = None username = None password = None tacks = None verifierDB = None reqCert = False directory = None for opt, arg in opts: if opt == "-k": s = open(arg, "rb").read() privateKey = parsePEMKey(s, private=True) elif opt == "-c": s = open(arg, "rb").read() x509 = X509() x509.parse(s) certChain = X509CertChain([x509]) elif opt == "-u": username = arg elif opt == "-p": password = arg elif opt == "-t": if tackpyLoaded: s = open(arg, "rU").read() tacks = Tack.createFromPemList(s) elif opt == "-v": verifierDB = VerifierDB(arg) verifierDB.open() elif opt == "-d": directory = arg elif opt == "--reqcert": reqCert = True else: assert(False) if not argv: printError("Missing address") if len(argv)>1: printError("Too many arguments") #Split address into hostname/port tuple address = argv[0] address = address.split(":") if len(address) != 2: raise SyntaxError("Must specify <host>:<port>") address = ( address[0], int(address[1]) ) # Populate the return list retList = [address] if "k" in argString: retList.append(privateKey) if "c" in argString: retList.append(certChain) if "u" in argString: retList.append(username) if "p" in argString: retList.append(password) if "t" in argString: retList.append(tacks) if "v" in argString: retList.append(verifierDB) if "d" in argString: retList.append(directory) if "reqcert" in flagsList: retList.append(reqCert) return retList def printGoodConnection(connection, seconds): print(" Handshake time: %.3f seconds" % seconds) print(" Version: %s" % connection.getVersionName()) print(" Cipher: %s %s" % (connection.getCipherName(), connection.getCipherImplementation())) if connection.session.srpUsername: print(" Client SRP username: %s" % connection.session.srpUsername) if connection.session.clientCertChain: print(" Client X.509 SHA1 fingerprint: %s" % connection.session.clientCertChain.getFingerprint()) if connection.session.serverCertChain: print(" Server X.509 SHA1 fingerprint: %s" % connection.session.serverCertChain.getFingerprint()) if connection.session.serverName: print(" SNI: %s" % connection.session.serverName) if connection.session.tackExt: if connection.session.tackInHelloExt: emptyStr = "\n (via TLS Extension)" else: emptyStr = "\n (via TACK Certificate)" print(" TACK: %s" % emptyStr) print(str(connection.session.tackExt)) print(" Next-Protocol Negotiated: %s" % connection.next_proto) def clientCmd(argv): (address, privateKey, certChain, username, password) = \ handleArgs(argv, "kcup") if (certChain and not privateKey) or (not certChain and privateKey): raise SyntaxError("Must specify CERT and KEY together") if (username and not password) or (not username and password): raise SyntaxError("Must specify USER with PASS") if certChain and username: raise SyntaxError("Can use SRP or client cert for auth, not both") #Connect to server sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(5) sock.connect(address) connection = TLSConnection(sock) settings = HandshakeSettings() settings.useExperimentalTackExtension = True try: start = time.clock() if username and password: connection.handshakeClientSRP(username, password, settings=settings, serverName=address[0]) else: connection.handshakeClientCert(certChain, privateKey, settings=settings, serverName=address[0]) stop = time.clock() print("Handshake success") except TLSLocalAlert as a: if a.description == AlertDescription.user_canceled: print(str(a)) else: raise sys.exit(-1) except TLSRemoteAlert as a: if a.description == AlertDescription.unknown_psk_identity: if username: print("Unknown username") else: raise elif a.description == AlertDescription.bad_record_mac: if username: print("Bad username or password") else: raise elif a.description == AlertDescription.handshake_failure: print("Unable to negotiate mutually acceptable parameters") else: raise sys.exit(-1) printGoodConnection(connection, stop-start) connection.close() def serverCmd(argv): (address, privateKey, certChain, tacks, verifierDB, directory, reqCert) = handleArgs(argv, "kctbvd", ["reqcert"]) if (certChain and not privateKey) or (not certChain and privateKey): raise SyntaxError("Must specify CERT and KEY together") if tacks and not certChain: raise SyntaxError("Must specify CERT with Tacks") print("I am an HTTPS test server, I will listen on %s:%d" % (address[0], address[1])) if directory: os.chdir(directory) print("Serving files from %s" % os.getcwd()) if certChain and privateKey: print("Using certificate and private key...") if verifierDB: print("Using verifier DB...") if tacks: print("Using Tacks...") ############# sessionCache = SessionCache() class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, HTTPServer): def handshake(self, connection): print("About to handshake...") activationFlags = 0 if tacks: if len(tacks) == 1: activationFlags = 1 elif len(tacks) == 2: activationFlags = 3 try: start = time.clock() settings = HandshakeSettings() settings.useExperimentalTackExtension=True connection.handshakeServer(certChain=certChain, privateKey=privateKey, verifierDB=verifierDB, tacks=tacks, activationFlags=activationFlags, sessionCache=sessionCache, settings=settings, nextProtos=[b"http/1.1"]) # As an example (does not work here): #nextProtos=[b"spdy/3", b"spdy/2", b"http/1.1"]) stop = time.clock() except TLSRemoteAlert as a: if a.description == AlertDescription.user_canceled: print(str(a)) return False else: raise except TLSLocalAlert as a: if a.description == AlertDescription.unknown_psk_identity: if username: print("Unknown username") return False else: raise elif a.description == AlertDescription.bad_record_mac: if username: print("Bad username or password") return False else: raise elif a.description == AlertDescription.handshake_failure: print("Unable to negotiate mutually acceptable parameters") return False else: raise connection.ignoreAbruptClose = True printGoodConnection(connection, stop-start) return True httpd = MyHTTPServer(address, SimpleHTTPRequestHandler) httpd.serve_forever() if __name__ == '__main__': if len(sys.argv) < 2: printUsage("Missing command") elif sys.argv[1] == "client"[:len(sys.argv[1])]: clientCmd(sys.argv[2:]) elif sys.argv[1] == "server"[:len(sys.argv[1])]: serverCmd(sys.argv[2:]) else: printUsage("Unknown command: %s" % sys.argv[1])
bsd-3-clause
kustodian/ansible
lib/ansible/modules/network/f5/bigip_firewall_schedule.py
24
21412
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2019, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_firewall_schedule short_description: Manage BIG-IP AFM schedule configurations description: - Manage BIG-IP AFM schedule configurations. version_added: 2.9 options: name: description: - Specifies the name of the AFM schedule configuration. type: str required: True description: description: - Specifies the user defined description text. type: str daily_hour_end: description: - Specifies the time of day the rule will stop being used. - When not defined, the default of C(24:00) is used when creating a new schedule. - The time zone is always assumed to be UTC and values must be provided as C(HH:MM) using 24hour clock format. type: str daily_hour_start: description: - Specifies the time of day the rule will start to be in use. - The value must be a time before C(daily_hour_end). - When not defined, the default of C(0:00) is used when creating a new schedule. - When the value is set to C(all-day) both C(daily_hour_end) and C(daily_hour_start) are reset to their respective defaults. - The time zone is always assumed to be UTC and values must be provided as C(HH:MM) using 24hour clock format. type: str date_valid_end: description: - Specifies the end date/time this schedule will apply to the rule. - The date must be after C(date_valid_start) - When not defined the default of C(indefinite) is used when creating a new schedule. - The time zone is always assumed to be UTC. - The datetime format should always be the following C(YYYY-MM-DD:HH:MM:SS) format. type: str date_valid_start: description: - Specifies the start date/time this schedule will apply to the rule. - When not defined the default of C(epoch) is used when creating a new schedule. - The time zone is always assumed to be UTC. - The datetime format should always be the following C(YYYY-MM-DD:HH:MM:SS) format. type: str days_of_week: description: - Specifies which days of the week the rule will be applied. - When not defined the default value of C(all) is used when creating a new schedule. - The C(all) value is mutually exclusive with other choices. type: list choices: - sunday - monday - tuesday - wednesday - thursday - friday - saturday - all state: description: - When C(present), ensures that the resource exists. - When C(absent), ensures the resource is removed. type: str choices: - present - absent default: present partition: description: - Device partition to manage resources on. type: str default: Common extends_documentation_fragment: f5 author: - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Create a 6 hour two day schedule, no start/end date bigip_firewall_schedule: name: barfoo daily_hour_start: 13:00 daily_hour_end: 19:00 days_of_week: - monday - tuesday provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost - name: Create a seven day schedule with start/end date bigip_firewall_schedule: name: foobar date_valid_start: "{{ lookup('pipe','date +%Y-%m-%d:%H:%M:%S') }}" date_valid_end: "{{ lookup('pipe','date -d \"now + 7 days\" +%Y-%m-%d:%H:%M:%S') }}" provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost - name: Modify created schedule to all-day bigip_firewall_schedule: name: barfoo daily_hour_start: all-day days_of_week: - monday - tuesday provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost - name: Modify a schedule to have no end date bigip_firewall_schedule: name: foobar date_valid_start: "{{ lookup('pipe','date +%Y-%m-%d:%H:%M:%S') }}" date_valid_end: "indefinite" provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost - name: Remove created schedule bigip_firewall_schedule: name: foobar state: absent provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost ''' RETURN = r''' daily_hour_start: description: The time of day the rule will start to be in use. returned: changed type: str sample: '13:00' daily_hour_end: description: The time of day the rule will stop being used. returned: changed type: str sample: '18:00' date_valid_start: description: The start date/time schedule will apply to the rule. returned: changed type: str sample: 2019-03-01:15:30:00 date_valid_end: description: The end date/time schedule will apply to the rule. returned: changed type: str sample: 2019-03-11:15:30:00 days_of_week: description: The days of the week the rule will be applied. returned: changed type: list sample: ["monday","tuesday"] description: description: The user defined description text. returned: changed type: str sample: Foo is bar ''' import re import datetime from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.compare import cmp_str_with_none from library.module_utils.network.f5.compare import cmp_simple_list except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.compare import cmp_str_with_none from ansible.module_utils.network.f5.compare import cmp_simple_list class Parameters(AnsibleF5Parameters): api_map = { 'dailyHourEnd': 'daily_hour_end', 'dailyHourStart': 'daily_hour_start', 'dateValidEnd': 'date_valid_end', 'dateValidStart': 'date_valid_start', 'daysOfWeek': 'days_of_week', } api_attributes = [ 'dailyHourEnd', 'dailyHourStart', 'dateValidEnd', 'dateValidStart', 'daysOfWeek', 'description', ] returnables = [ 'daily_hour_end', 'daily_hour_start', 'date_valid_end', 'date_valid_start', 'days_of_week', 'description' ] updatables = [ 'daily_hour_end', 'daily_hour_start', 'date_valid_end', 'date_valid_start', 'days_of_week', 'description' ] class ApiParameters(Parameters): pass class ModuleParameters(Parameters): def _convert_datetime(self, value): p = r'(\d{4})-(\d{1,2})-(\d{1,2})[:, T](\d{2}):(\d{2}):(\d{2})' match = re.match(p, value) if match: date = '{0}-{1}-{2}T{3}:{4}:{5}Z'.format(*match.group(1, 2, 3, 4, 5, 6)) return date raise F5ModuleError( 'Invalid datetime provided.' ) def _validate_time(self, value): p = r'(\d{2}):(\d{2})' match = re.match(p, value) if match: time = int(match.group(1)), int(match.group(2)) try: datetime.time(*time) except ValueError as ex: raise F5ModuleError(str(ex)) def _compare_date_time(self, value1, value2, time=False): if time: p1 = r'(\d{2}):(\d{2})' m1 = re.match(p1, value1) m2 = re.match(p1, value2) if m1 and m2: start = tuple(int(i) for i in m1.group(1, 2)) end = tuple(int(i) for i in m2.group(1, 2)) if datetime.time(*start) > datetime.time(*end): raise F5ModuleError( 'End time must be later than start time.' ) else: p1 = r'(\d{4})-(\d{1,2})-(\d{1,2})[:, T](\d{2}):(\d{2}):(\d{2})' m1 = re.match(p1, value1) m2 = re.match(p1, value2) if m1 and m2: start = tuple(int(i) for i in m1.group(1, 2, 3, 4, 5, 6)) end = tuple(int(i) for i in m2.group(1, 2, 3, 4, 5, 6)) if datetime.datetime(*start) > datetime.datetime(*end): raise F5ModuleError( 'End date must be later than start date.' ) @property def daily_hour_start(self): if self._values['daily_hour_start'] is None: return None if self._values['daily_hour_start'] == 'all-day': return '0:00' self._validate_time(self._values['daily_hour_start']) if self._values['daily_hour_end'] is not None and self.daily_hour_end != '24:00': self._compare_date_time(self._values['daily_hour_start'], self.daily_hour_end, time=True) return self._values['daily_hour_start'] @property def daily_hour_end(self): if self._values['daily_hour_end'] is None: return None if self._values['daily_hour_start'] == 'all-day': return '24:00' if not self._values['daily_hour_end'] == '24:00': self._validate_time(self._values['daily_hour_end']) return self._values['daily_hour_end'] @property def date_valid_end(self): if self._values['date_valid_end'] is None: return None if self._values['date_valid_end'] in ['2038-1-18:19:14:07', 'indefinite']: return 'indefinite' result = self._convert_datetime(self._values['date_valid_end']) return result @property def date_valid_start(self): if self._values['date_valid_start'] is None: return None if self._values['date_valid_start'] in ['1970-1-1:00:00:00', 'epoch']: return 'epoch' result = self._convert_datetime(self._values['date_valid_start']) if self._values['date_valid_end']: if self._values['date_valid_end'] not in ['2038-1-18:19:14:07', 'indefinite']: self._compare_date_time(result, self.date_valid_end) return result @property def days_of_week(self): if self._values['days_of_week'] is None: return None if 'all' in self._values['days_of_week']: if len(self._values['days_of_week']) > 1 and self._values['days_of_week'] is list: raise F5ModuleError( "The 'all' value must not be specified with other choices." ) week = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday'] return week return self._values['days_of_week'] class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): def _convert_datetime(self, value): if value is None: return None p = r'(\d{4})-(\d{1,2})-(\d{1,2})[:, T](\d{2}):(\d{2}):(\d{2})' match = re.match(p, value) if match: date = '{0}-{1}-{2}:{3}:{4}:{5}'.format(*match.group(1, 2, 3, 4, 5, 6)) return date @property def date_valid_end(self): result = self._convert_datetime(self._values['date_valid_end']) return result @property def date_valid_start(self): result = self._convert_datetime(self._values['date_valid_start']) return result @property def days_of_week(self): if self._values['days_of_week'] is None: return None if len(self._values['days_of_week']) == 7: return 'all' class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def description(self): return cmp_str_with_none(self.want.description, self.have.description) @property def days_of_week(self): return cmp_simple_list(self.want.days_of_week, self.have.days_of_week) class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def present(self): if self.exists(): return self.update() else: return self.create() def absent(self): if self.exists(): return self.remove() return False def should_update(self): result = self._update_changed_options() if result: return True return False def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() if self.module.check_mode: return True self.create_on_device() return True def exists(self): uri = "https://{0}:{1}/mgmt/tm/security/firewall/schedule/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError: return False if resp.status == 404 or 'code' in response and response['code'] == 404: return False return True def create_on_device(self): params = self.changes.api_params() params['name'] = self.want.name params['partition'] = self.want.partition uri = "https://{0}:{1}/mgmt/tm/security/firewall/schedule/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 409]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/security/firewall/schedule/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/security/firewall/schedule/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) response = self.client.api.delete(uri) if response.status == 200: return True raise F5ModuleError(response.content) def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/security/firewall/schedule/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict( required=True ), description=dict(), daily_hour_end=dict(), daily_hour_start=dict(), date_valid_end=dict(), date_valid_start=dict(), days_of_week=dict( type='list', choices=[ 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'all', ] ), state=dict(default='present', choices=['absent', 'present']), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
gpl-3.0
mesoscloud/events
0.6.1/docker.py
2
6182
import concurrent.futures import datetime import http.client import json import pickle import select import socket __all__ = [] class HTTPConnection(http.client.HTTPConnection): def __init__(self): http.client.HTTPConnection.__init__(self, 'localhost') def connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect('/var/run/docker.sock') self.sock = sock class HTTPError(Exception): def __init__(self, status, reason): self.status = status self.reason = reason def get(path, async=False): conn = HTTPConnection() try: conn.request('GET', path) resp = conn.getresponse() if resp.status != 200: raise HTTPError(resp.status, resp.reason) except Exception: conn.close() raise try: if async: return resp elif resp.headers.get('Content-Type') == 'application/json': return json.loads(resp.read().decode('utf-8')) else: return resp.read() finally: if not async: conn.close() def containers(): return [Container(c['Id'], c['Created']) for c in get('/containers/json')] class Container(object): executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) since = 0 def __init__(self, id_, created): self.id_ = id_ self.created = created self.logs = None self.logs_fd = None self.logs_stream = 'stdout' self.stats = None self.stats_fd = None self._info = None def __repr__(self): return "<Container %s created=%r>" % (self.id_, self.created) def __str__(self): return "%.12s" % self.id_ def __eq__(self, other): if self.id_ == other.id_ and self.created == other.created: return True def inspect(self): return get('/containers/%s/json' % self.id_) def logs_start(self, epoll): try: info = self.inspect() except HTTPError as exc: raise url = '/containers/%s/logs?follow=1&stdout=1&stderr=1&since=%s&timestamps=1' % (self.id_, Container.since) print(self, url) self.logs = Container.executor.submit(get, url, async=True) def logs_stop(self, epoll): # Let's attempt to cancel the future just in case self.logs.cancel() try: logs = self.logs.result(timeout=0) except (concurrent.futures.CancelledError, concurrent.futures.TimeoutError, HTTPError) as exc: print(self, 'logs', exc) return try: fd = logs.fileno() epoll.unregister(fd) print(self, 'logs', "unregistered (fd=%s)." % fd) except FileNotFoundError: pass logs.close() def logs_check(self, epoll): if self.logs_fd is not None: return try: logs = self.logs.result(timeout=0) except (concurrent.futures.TimeoutError, HTTPError) as exc: print(self, 'logs', exc) return print(self, 'logs', logs) self.logs_fd = logs.fileno() print(self, 'logs', self.logs_fd) try: epoll.register(self.logs_fd, select.EPOLLIN) print(self, 'logs', "registered (fd=%s)." % self.logs_fd) except FileExistsError: return def stats_start(self, epoll): try: info = self.inspect() except HTTPError as exc: raise url = '/containers/%s/stats' % self.id_ print(self, url) self.stats = Container.executor.submit(get, url, async=True) def stats_stop(self, epoll): # Let's attempt to cancel the future just in case self.stats.cancel() try: stats = self.stats.result(timeout=0) except (concurrent.futures.CancelledError, concurrent.futures.TimeoutError, HTTPError) as exc: print(self, 'stats', exc) return try: fd = stats.fileno() epoll.unregister(fd) print(self, 'stats', "unregistered (fd=%s)." % fd) except FileNotFoundError: pass stats.close() def stats_check(self, epoll): if self.stats_fd is not None: return try: stats = self.stats.result(timeout=0) except (concurrent.futures.TimeoutError, HTTPError) as exc: print(self, 'stats', exc) return print(self, 'stats', stats) self.stats_fd = stats.fileno() print(self, 'stats', self.stats_fd) try: epoll.register(self.stats_fd, select.EPOLLIN) print(self, 'stats', "registered (fd=%s)." % self.stats_fd) except FileExistsError: return def parse(data): """Parse stream >>> parse(b'80\\r\\n{"status":"create","id":"46e344569d70e9cf849a217701d5ef2e866dff122c1d5f1641b490e680c15c5d","from":"centos:7","time":1445856406}\\n\\r\\n') (b'', b'{"status":"create","id":"46e344569d70e9cf849a217701d5ef2e866dff122c1d5f1641b490e680c15c5d","from":"centos:7","time":1445856406}\\n') >>> parse(b'80\\r\\n{"status":"create","id":"46e344569d70e9cf849a217701d5ef2e866dff122c1d5f1641b490e680c15c5d","from":"centos:7","time":1445856406}\\n') (b'80\\r\\n{"status":"create","id":"46e344569d70e9cf849a217701d5ef2e866dff122c1d5f1641b490e680c15c5d","from":"centos:7","time":1445856406}\\n', b'') >>> parse(b'80\\r\\n{"status":"create","id":"46e344569d70e9cf849a217701d5ef2e866dff122c1d5f1641b490e680c15c5d","from":"centos:7"') (b'80\\r\\n{"status":"create","id":"46e344569d70e9cf849a217701d5ef2e866dff122c1d5f1641b490e680c15c5d","from":"centos:7"', b'') """ if data.count(b'\r\n') < 2: return data, b'' i = data.find(b'\r\n') x = data[:i] y = int(x, 16) data = data[i + 2:] if len(data) < y + 2: return data, b'' line = data[:y] data = data[y + 2:] return data, line
mit
richtermondt/inithub-web
inithub/manager/views/interest_manager.py
1
1278
''' @author: rtermondt ''' from django.shortcuts import render_to_response from django.template import RequestContext from manager.models import Interest, Interests from django.contrib.auth.decorators import login_required @login_required() def interest_manager(request): system_message = None if request.POST: il = request.POST.getlist('interest_list') Interests.objects.filter( profile_id=request.session['profile_id']).delete() for interest in il: Interests.objects.create(profile_id=request.session[ 'profile_id'], interest_id=interest) system_message = 'Interests updated' # return render_to_response('interest_manager.html', { # 'system_message': 'Update complete', #}, context_instance=RequestContext(request)) i = Interest.objects.all() interest_selected = Interests.objects.get_query_set( ).filter(profile_id=request.session['profile_id']) return render_to_response('interest_manager.html', { 'interest_list': i, 'interest_selected': interest_selected, 'system_message': system_message }, RequestContext(request))
mit
hassaanm/stock-trading
pybrain-pybrain-87c7ac3/examples/rl/environments/linear_fa/xor.py
4
2197
""" Toy example for RL with linear function approximation. This illustrates how a 'AND'-state-space can be solved, but not an 'XOR' space. """ __author__ = 'Tom Schaul, [email protected]' from pybrain.rl.learners.valuebased.linearfa import Q_LinFA from pybrain.rl.environments.classic.xor import XORTask from pybrain.rl.experiments.experiment import Experiment from pybrain.rl.agents.learning import LearningAgent from random import random, randint class LinFA_QAgent(LearningAgent): """ Customization of the Agent class for linear function approximation learners. """ epsilon = 0.1 logging = False def __init__(self, learner): self.learner = learner self.previousobs = None def getAction(self): if random() < self.epsilon: a = randint(0, self.learner.num_actions-1) else: a = self.learner._greedyAction(self.lastobs) self.lastaction = a return a def giveReward(self, r): LearningAgent.giveReward(self, r) if self.previousobs is not None: #print self.previousobs, a, self.lastreward, self.lastobs self.learner._updateWeights(self.previousobs, self.previousaction, self.previousreward, self.lastobs) self.previousobs = self.lastobs self.previousaction = self.lastaction self.previousreward = self.lastreward def runExp(gamma=0, epsilon=0.1, xor=False, lr = 0.02): if xor: print "Attempting the XOR task" else: print "Attempting the AND task" task = XORTask() task.and_task = not xor l = Q_LinFA(task.nactions, task.nsenses) l.rewardDiscount = gamma l.learningRate = lr agent = LinFA_QAgent(l) agent.epsilon = epsilon exp = Experiment(task, agent) sofar = 0 for i in range(30): exp.doInteractions(100) print exp.task.cumreward - sofar, if i%10 == 9: print sofar = exp.task.cumreward l._decayLearningRate() if __name__ == "__main__": runExp(xor=False) print runExp(xor=True) print runExp(xor=True)
apache-2.0
dmrtsvetkov/flowercoin
share/rpcuser/rpcuser.py
115
1110
#!/usr/bin/env python2 # Copyright (c) 2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import hashlib import sys import os from random import SystemRandom import base64 import hmac if len(sys.argv) < 2: sys.stderr.write('Please include username as an argument.\n') sys.exit(0) username = sys.argv[1] #This uses os.urandom() underneath cryptogen = SystemRandom() #Create 16 byte hex salt salt_sequence = [cryptogen.randrange(256) for i in range(16)] hexseq = list(map(hex, salt_sequence)) salt = "".join([x[2:] for x in hexseq]) #Create 32 byte b64 password password = base64.urlsafe_b64encode(os.urandom(32)) digestmod = hashlib.sha256 if sys.version_info.major >= 3: password = password.decode('utf-8') digestmod = 'SHA256' m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod) result = m.hexdigest() print("String to be appended to bitcoin.conf:") print("rpcauth="+username+":"+salt+"$"+result) print("Your password:\n"+password)
mit
trabacus-softapps/openerp-8.0-cc
openerp/addons/sale_crm/__openerp__.py
5
2304
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Opportunity to Quotation', 'version': '1.0', 'category': 'Hidden', 'description': """ This module adds a shortcut on one or several opportunity cases in the CRM. =========================================================================== This shortcut allows you to generate a sales order based on the selected case. If different cases are open (a list), it generates one sale order by case. The case is then closed and linked to the generated sales order. We suggest you to install this module, if you installed both the sale and the crm modules. """, 'author': 'OpenERP SA', 'website': 'http://www.openerp.com', 'images': ['images/crm_statistics_dashboard.jpeg', 'images/opportunity_to_quote.jpeg'], 'depends': ['sale', 'crm', 'web_kanban_gauge'], 'data': [ 'wizard/crm_make_sale_view.xml', 'sale_crm_view.xml', 'sale_crm_data.xml', 'process/sale_crm_process.xml', 'security/sale_crm_security.xml', 'security/ir.model.access.csv', 'report/sale_report_view.xml', ], 'js': [ 'static/src/js/sale_crm.js', ], 'demo': ['sale_crm_demo.xml'], 'test': ['test/sale_crm.yml'], 'installable': True, 'auto_install': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
darcyliu/storyboard
boto/rds/dbsnapshot.py
29
2730
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class DBSnapshot(object): """ Represents a RDS DB Snapshot """ def __init__(self, connection=None, id=None): self.connection = connection self.id = id self.engine = None self.snapshot_create_time = None self.instance_create_time = None self.port = None self.status = None self.availability_zone = None self.master_username = None self.allocated_storage = None self.instance_id = None self.availability_zone = None def __repr__(self): return 'DBSnapshot:%s' % self.id def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'Engine': self.engine = value elif name == 'InstanceCreateTime': self.instance_create_time = value elif name == 'SnapshotCreateTime': self.snapshot_create_time = value elif name == 'DBInstanceIdentifier': self.instance_id = value elif name == 'DBSnapshotIdentifier': self.id = value elif name == 'Port': self.port = int(value) elif name == 'Status': self.status = value elif name == 'AvailabilityZone': self.availability_zone = value elif name == 'MasterUsername': self.master_username = value elif name == 'AllocatedStorage': self.allocated_storage = int(value) elif name == 'SnapshotTime': self.time = value else: setattr(self, name, value)
mit
probablytom/tomwallis.net
venv/lib/python2.7/site-packages/django/contrib/gis/measure.py
93
12310
# Copyright (c) 2007, Robert Coup <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Distance nor the names of its contributors may be used # to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """ Distance and Area objects to allow for sensible and convenient calculation and conversions. Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio Inspired by GeoPy (http://exogen.case.edu/projects/geopy/) and Geoff Biggs' PhD work on dimensioned units for robotics. """ __all__ = ['A', 'Area', 'D', 'Distance'] from decimal import Decimal from django.utils.functional import total_ordering from django.utils import six NUMERIC_TYPES = six.integer_types + (float, Decimal) AREA_PREFIX = "sq_" def pretty_name(obj): return obj.__name__ if obj.__class__ == type else obj.__class__.__name__ @total_ordering class MeasureBase(object): STANDARD_UNIT = None ALIAS = {} UNITS = {} LALIAS = {} def __init__(self, default_unit=None, **kwargs): value, self._default_unit = self.default_units(kwargs) setattr(self, self.STANDARD_UNIT, value) if default_unit and isinstance(default_unit, six.string_types): self._default_unit = default_unit def _get_standard(self): return getattr(self, self.STANDARD_UNIT) def _set_standard(self, value): setattr(self, self.STANDARD_UNIT, value) standard = property(_get_standard, _set_standard) def __getattr__(self, name): if name in self.UNITS: return self.standard / self.UNITS[name] else: raise AttributeError('Unknown unit type: %s' % name) def __repr__(self): return '%s(%s=%s)' % (pretty_name(self), self._default_unit, getattr(self, self._default_unit)) def __str__(self): return '%s %s' % (getattr(self, self._default_unit), self._default_unit) # **** Comparison methods **** def __eq__(self, other): if isinstance(other, self.__class__): return self.standard == other.standard else: return NotImplemented def __lt__(self, other): if isinstance(other, self.__class__): return self.standard < other.standard else: return NotImplemented # **** Operators methods **** def __add__(self, other): if isinstance(other, self.__class__): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard + other.standard)}) else: raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)}) def __iadd__(self, other): if isinstance(other, self.__class__): self.standard += other.standard return self else: raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)}) def __sub__(self, other): if isinstance(other, self.__class__): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard - other.standard)}) else: raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)}) def __isub__(self, other): if isinstance(other, self.__class__): self.standard -= other.standard return self else: raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)}) def __mul__(self, other): if isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard * other)}) else: raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)}) def __imul__(self, other): if isinstance(other, NUMERIC_TYPES): self.standard *= float(other) return self else: raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)}) def __rmul__(self, other): return self * other def __truediv__(self, other): if isinstance(other, self.__class__): return self.standard / other.standard if isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard / other)}) else: raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)}) def __div__(self, other): # Python 2 compatibility return type(self).__truediv__(self, other) def __itruediv__(self, other): if isinstance(other, NUMERIC_TYPES): self.standard /= float(other) return self else: raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)}) def __idiv__(self, other): # Python 2 compatibility return type(self).__itruediv__(self, other) def __bool__(self): return bool(self.standard) def __nonzero__(self): # Python 2 compatibility return type(self).__bool__(self) def default_units(self, kwargs): """ Return the unit value and the default units specified from the given keyword arguments dictionary. """ val = 0.0 default_unit = self.STANDARD_UNIT for unit, value in six.iteritems(kwargs): if not isinstance(value, float): value = float(value) if unit in self.UNITS: val += self.UNITS[unit] * value default_unit = unit elif unit in self.ALIAS: u = self.ALIAS[unit] val += self.UNITS[u] * value default_unit = u else: lower = unit.lower() if lower in self.UNITS: val += self.UNITS[lower] * value default_unit = lower elif lower in self.LALIAS: u = self.LALIAS[lower] val += self.UNITS[u] * value default_unit = u else: raise AttributeError('Unknown unit type: %s' % unit) return val, default_unit @classmethod def unit_attname(cls, unit_str): """ Retrieves the unit attribute name for the given unit string. For example, if the given unit string is 'metre', 'm' would be returned. An exception is raised if an attribute cannot be found. """ lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception('Could not find a unit keyword associated with "%s"' % unit_str) class Distance(MeasureBase): STANDARD_UNIT = "m" UNITS = { 'chain': 20.1168, 'chain_benoit': 20.116782, 'chain_sears': 20.1167645, 'british_chain_benoit': 20.1167824944, 'british_chain_sears': 20.1167651216, 'british_chain_sears_truncated': 20.116756, 'cm': 0.01, 'british_ft': 0.304799471539, 'british_yd': 0.914398414616, 'clarke_ft': 0.3047972654, 'clarke_link': 0.201166195164, 'fathom': 1.8288, 'ft': 0.3048, 'german_m': 1.0000135965, 'gold_coast_ft': 0.304799710181508, 'indian_yd': 0.914398530744, 'inch': 0.0254, 'km': 1000.0, 'link': 0.201168, 'link_benoit': 0.20116782, 'link_sears': 0.20116765, 'm': 1.0, 'mi': 1609.344, 'mm': 0.001, 'nm': 1852.0, 'nm_uk': 1853.184, 'rod': 5.0292, 'sears_yd': 0.91439841, 'survey_ft': 0.304800609601, 'um': 0.000001, 'yd': 0.9144, } # Unit aliases for `UNIT` terms encountered in Spatial Reference WKT. ALIAS = { 'centimeter': 'cm', 'foot': 'ft', 'inches': 'inch', 'kilometer': 'km', 'kilometre': 'km', 'meter': 'm', 'metre': 'm', 'micrometer': 'um', 'micrometre': 'um', 'millimeter': 'mm', 'millimetre': 'mm', 'mile': 'mi', 'yard': 'yd', 'British chain (Benoit 1895 B)': 'british_chain_benoit', 'British chain (Sears 1922)': 'british_chain_sears', 'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated', 'British foot (Sears 1922)': 'british_ft', 'British foot': 'british_ft', 'British yard (Sears 1922)': 'british_yd', 'British yard': 'british_yd', "Clarke's Foot": 'clarke_ft', "Clarke's link": 'clarke_link', 'Chain (Benoit)': 'chain_benoit', 'Chain (Sears)': 'chain_sears', 'Foot (International)': 'ft', 'German legal metre': 'german_m', 'Gold Coast foot': 'gold_coast_ft', 'Indian yard': 'indian_yd', 'Link (Benoit)': 'link_benoit', 'Link (Sears)': 'link_sears', 'Nautical Mile': 'nm', 'Nautical Mile (UK)': 'nm_uk', 'US survey foot': 'survey_ft', 'U.S. Foot': 'survey_ft', 'Yard (Indian)': 'indian_yd', 'Yard (Sears)': 'sears_yd' } LALIAS = dict((k.lower(), v) for k, v in ALIAS.items()) def __mul__(self, other): if isinstance(other, self.__class__): return Area(default_unit=AREA_PREFIX + self._default_unit, **{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)}) elif isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard * other)}) else: raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % { "distance": pretty_name(self.__class__), }) class Area(MeasureBase): STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT # Getting the square units values and the alias dictionary. UNITS = dict(('%s%s' % (AREA_PREFIX, k), v ** 2) for k, v in Distance.UNITS.items()) ALIAS = dict((k, '%s%s' % (AREA_PREFIX, v)) for k, v in Distance.ALIAS.items()) LALIAS = dict((k.lower(), v) for k, v in ALIAS.items()) def __truediv__(self, other): if isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard / other)}) else: raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)}) def __div__(self, other): # Python 2 compatibility return type(self).__truediv__(self, other) # Shortcuts D = Distance A = Area
artistic-2.0
krux/adspygoogle
examples/adspygoogle/dfp/v201203/create_labels.py
2
1778
#!/usr/bin/python # # Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code example creates new labels. To determine which labels exist, run get_all_labels.py. This feature is only available to DFP premium solution networks.""" __author__ = '[email protected] (Jeff Sham)' # Locate the client library. If module was installed via "setup.py" script, then # the following two lines are not needed. import os import sys sys.path.insert(0, os.path.join('..', '..', '..', '..')) # Import appropriate classes from the client library. from adspygoogle import DfpClient # Initialize client object. client = DfpClient(path=os.path.join('..', '..', '..', '..')) # Initialize appropriate service. label_service = client.GetService( 'LabelService', 'https://www.google.com', 'v201203') # Create label objects. labels = [] for i in xrange(5): label = { 'name': 'Label #%d' % i, 'isActive': 'True', 'types': ['COMPETITIVE_EXCLUSION'] } labels.append(label) # Add Labels. labels = label_service.CreateLabels(labels) # Display results. for label in labels: print ('Label with id \'%s\', name \'%s\', and types {%s} was found.' % (label['id'], label['name'], ','.join(label['types'])))
apache-2.0
compas-dev/compas
src/compas_rhino/conduits/faces.py
1
2740
from __future__ import print_function from __future__ import absolute_import from __future__ import division try: basestring except NameError: basestring = str from System.Drawing.Color import FromArgb from Rhino.Geometry import Point3d from compas.utilities import color_to_rgb from compas_rhino.conduits.base import BaseConduit __all__ = ['FacesConduit'] class FacesConduit(BaseConduit): """A Rhino display conduit for faces. Parameters ---------- vertices : list of list of float The coordinates of the vertices of the faces. faces : list of list of int The faces defined as lists of indices in ``vertices``. color : list of str or 3-tuple, optional The colors of the faces. Default is ``None``, in which case the default color is used for all faces. Attributes ---------- color : list of RGB colors The color specification per face. vertices : list of list of float The coordinates of the vertices of the faces. faces : list of list of int The faces defined as lists of indices in ``vertices``. Examples -------- .. code-block:: python from compas.geometry import Polyhedron from compas_rhino.conduits import FacesConduit polyhedron = Polyhedron.generate(6) faces = polyhedron.faces vertices = polyhedron.vertices polygons = [[vertices[index] for index in face] for face in faces] conduit = FacesConduit(polygons) with conduit.enabled(): conduit.redraw(pause=5.0) """ def __init__(self, vertices, faces, color=None, **kwargs): super(FacesConduit, self).__init__(**kwargs) self._default_color = FromArgb(255, 255, 255) self._color = None self.vertices = vertices or [] self.faces = faces or [] self.color = color @property def color(self): return self._color @color.setter def color(self, color): if not color: return f = len(self.faces) if isinstance(color, (basestring, tuple)): color = [color for _ in range(f)] color = [FromArgb(* color_to_rgb(c)) for c in color] c = len(color) if c < f: color += [self._default_color for _ in range(f - c)] elif c > f: color[:] = color[:f] self._color = color def DrawForeground(self, e): for i, face in enumerate(self.faces): points = [Point3d(* self.vertices[key]) for key in face] if self.color: e.Display.DrawPolygon(points, self.color[i], True) else: e.Display.DrawPolygon(points, self._default_color, True)
mit
cym13/anime_verif
anime_verif.py
1
4001
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2013 Cédric Picard # # LICENSE # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # END_OF_LICENSE # """ Check size anomalies and episode numbers sequence. Usage: anime_verif.py [-a N] [-s|-n] DIRECTORY Options: -a, --accuracy N Do not show files whose size deviation is less than N times the standart deviation. Default is 3. -s, --size Check size only -n, --numbers Check episode numbers only If -s and -n are missing or if -s and -n are used together, size and numbers are both checked. """ import os import math import re from docopt import docopt def extract_numbers(filename): """ Extract the numbers present in `filename'.. """ numbers = re.compile(r'[0-9]+') return [ int(x) for x in numbers.findall(filename) ] def size_check(size_list, file_list, accuracy): """ Detect 0-sized files and size anomalies. """ if 0 in size_list: print('Presence of files of size 0') return False # Smooth data to the MB order size_list = [ math.floor(x / 1024**2) for x in size_list ] # Set the average size and the variance for statistical size study average_size = sum(size_list) / len(size_list) var = sum({(x - average_size) ** 2 for x in size_list}) / len(size_list) # Detect size anomalies file_token = 0 for size in size_list: if (size - average_size) ** 2 > accuracy * var and size < average_size: print('Size anomaly detected: ' + \ file_list[file_token].encode('utf-8')) return False file_token += 1 # If the directory passed all size tests: return True def ep_numbers_check(file_list): """ Check that all episode numbers are following each other. Rely on alphanumeric naming order. """ file_list.sort() for index in range(1, len(file_list)): prev_numbers = extract_numbers(file_list[index - 1]) follow = False for each in extract_numbers(file_list[index]): if (each - 1) in prev_numbers: follow = True if not follow: return False return True if __name__ == '__main__': arguments = docopt(__doc__) # Set default options if arguments['--accuracy']: accuracy = float(arguments['--accuracy']) else: accuracy = 5 if not arguments['--size'] and not arguments['--numbers']: arguments['--size'] = True arguments['--numbers'] = True target_dir = arguments['DIRECTORY'].decode('utf-8') if not os.path.isdir(target_dir): print('This is not a directory: ' + target_dir.encode('utf-8')) os.sys.exit(1) os.chdir(target_dir) size_list = [] file_list = [] for each in os.listdir('.'): if os.path.isfile(each.decode('utf-8')): size_list.append(os.path.getsize(each.decode('utf-8'))) file_list.append(each.decode('utf-8')) if size_list == []: print('No file found in directory: ' + target_dir.encode('utf-8')) os.sys.exit(1) if arguments['--size'] and not size_check(size_list, file_list, accuracy): os.sys.exit(1) if arguments['--numbers'] and not ep_numbers_check(file_list): print('Some episodes may be missing:' + target_dir.encode('utf-8')) os.sys.exit(1)
gpl-3.0
openjck/kuma
scripts/clone_db.py
2
10579
#!/usr/bin/env python2.7 """ This script performs all the steps needed to produce an anonymized DB dump: * Produce a dump of the original DB * Import the dump into a temporary DB * Run anonymize.sql on the temporary DB * Produce an anonymized dump from the temporary DB * Drop the temporary DB * Delete the dump of the original DB """ from datetime import datetime import os import os.path import subprocess import sys from textwrap import dedent from optparse import OptionParser # # Whenever a new table is created, add appropriate steps to anonymize.sql and # then add the table here. anonymize.sql may be run independantly, instead of # this script, so make sure anonymize.sql performs sanitization as well. # # To remove a table from the anonymized database: # Remove it from TABLES_TO_DUMP # Add DROP TABLE IF EXISTS {table name}; to anonymize.sql # # To ensure an empty table in the anonymized database: # Add to TABLES_TO_DUMP # Add TRUNCATE {table name}; to anonymize.sql # # To anonymize records: # Add to TABLES_TO_DUMP # Add UPDATE {table name} ...; to anonymize.sql # # To keep production records: # Add to TABLES_TO_DUMP # Add a comment to anonymize.sql so future devs know you considered the table # TABLES_TO_DUMP=[x.strip() for x in """ account_emailaddress account_emailconfirmation actioncounters_actioncounterunique actioncounters_testmodel attachments_attachment attachments_attachmentrevision attachments_documentattachment auth_group auth_group_permissions auth_message auth_permission auth_user auth_user_groups auth_user_user_permissions authkeys_key authkeys_keyaction celery_taskmeta celery_tasksetmeta constance_config contentflagging_contentflag core_ipban demos_submission django_admin_log django_cache django_content_type django_migrations django_session django_site djcelery_crontabschedule djcelery_intervalschedule djcelery_periodictask djcelery_periodictasks djcelery_taskstate djcelery_workerstate feeder_bundle feeder_bundle_feeds feeder_entry feeder_feed search_filter search_filtergroup search_index search_outdatedobject soapbox_message socialaccount_socialaccount socialaccount_socialapp socialaccount_socialapp_sites socialaccount_socialtoken tagging_tag tagging_taggeditem taggit_tag taggit_taggeditem tidings_watch tidings_watchfilter users_userban waffle_flag waffle_flag_groups waffle_flag_users waffle_sample waffle_switch wiki_document wiki_documentdeletionlog wiki_documenttag wiki_documentzone wiki_editortoolbar wiki_localizationtag wiki_localizationtaggedrevision wiki_reviewtag wiki_reviewtaggedrevision wiki_revision wiki_revisionip wiki_taggeddocument """.splitlines() if x.strip()] def print_info(s): if not opts.quiet: print s def print_debug(s): if not opts.quiet and opts.debug: print s class NotFound(Exception): pass def sysprint(command): """ Helper to print all system commands in debug mode """ print_debug("command: %s" % command) output = subprocess.check_output( command, shell=True, stderr=subprocess.STDOUT) for line in output.splitlines(): if line.endswith("command not found"): raise NotFound(output) elif line == ( 'Warning: Using a password on the command line interface can' ' be insecure.'): pass else: print(line) def main(): now = datetime.now().strftime('%Y%m%d') usage = """\ %%prog [options] DB_NAME Performs the steps needed to produce an anonymized DB dump. Examples: %%prog mdn_prod Connect to 127.0.0.1 as root, no password. Dump "mdn_prod", import to temporary table. Produce mdn_prod-anon-%(now)s.sql.gz Both the temporary table and the input dump are deleted. %%prog -i downloaded-dump.sql.gz Connect to 127.0.0.1 as root, no password. Import downloaded-dump.sql.gz. Produce mdn_prod-anon-%(now)s.sql.gz Input dump is not deleted. """ % dict( now=now ) options = OptionParser(dedent(usage.rstrip())) options.add_option('-u', '--user', default='root', help='MySQL user name') options.add_option('-p', '--password', default='', help='MySQL password') options.add_option('-H', '--host', default='127.0.0.1', help='MySQL host') options.add_option('-S', '--socket', help='MySQL socket') options.add_option('-i', '--input', help='Input SQL dump filename') options.add_option('-o', '--output', help='Output SQL dump filename') options.add_option('-q', '--quiet', action='store_true', help='Quiet all output') options.add_option('-D', '--debug', action='store_true', help='Enable debug output') options.add_option('--skip-input-dump', action='store_true', help='Skip the initial input DB dump') options.add_option('--skip-temp-create', action='store_true', help='Skip creation of the temporary DB') options.add_option('--skip-temp-import', action='store_true', help='Skip import of the input DB dump into the ' 'temporary DB') options.add_option('--skip-anonymize', action='store_true', help='Skip anonymization of the temporary DB') options.add_option('--skip-output-dump', action='store_true', help='Skip the post-anonymization DB dump') options.add_option('--skip-drop-temp-db', action='store_true', help='Skip dropping the temporary DB') options.add_option('--skip-delete-input', action='store_true', help='Skip deleting the input DB dump') global opts, args (opts, args) = options.parse_args() if len(args) < 1: options.error("Need an input DB name") input_db = args[0] base_dir = os.path.dirname(__file__) mysql_conn = '-u%(user)s %(password)s -h%(host)s' % dict( user=opts.user, password=opts.password and ('-p%s' % opts.password) or '', host=opts.host, ) if opts.socket: mysql_conn = '-u%(user)s %(password)s -S%(socket)s' % dict( user=opts.user, password=opts.password and ('-p%s' % opts.password) or '', socket=opts.socket, ) else: mysql_conn = '-u%(user)s %(password)s -h%(host)s' % dict( user=opts.user, password=opts.password and ('-p%s' % opts.password) or '', host=opts.host, ) if opts.input: input_dump_fn = opts.input else: input_dump_fn = '%s-%s.sql.gz' % (input_db, now) output_dump_fn = '%s-anon-%s.sql.gz' % (input_db, now) # TODO: replace dump, create, import with mysqldbcopy # https://dev.mysql.com/doc/mysql-utilities/1.3/en/mysqldbcopy.html if not opts.skip_input_dump and not opts.input: print_info("Dumping input DB to %s" % input_dump_fn) dump_cmd = ('mysqldump %(mysql_conn)s %(input_db)s %(tables)s | ' 'gzip > %(input_dump_fn)s' % dict( mysql_conn=mysql_conn, input_db=input_db, tables=' '.join(TABLES_TO_DUMP), input_dump_fn=input_dump_fn )) print_debug('\t%s' % dump_cmd) sysprint(dump_cmd) temp_db = '%s_anontmp_%s' % (input_db, now) if not opts.skip_temp_create: print_info('Creating temporary DB %s' % temp_db) sysprint(('mysql %(mysql_conn)s -e' '"DROP DATABASE IF EXISTS %(temp_db)s;"') % dict(mysql_conn=mysql_conn, temp_db=temp_db)) sysprint('mysqladmin %(mysql_conn)s create %(temp_db)s' % dict(mysql_conn=mysql_conn, temp_db=temp_db)) if not opts.skip_temp_import: print_info('Importing the input dump into the temporary DB') sysprint('cat %(input_dump_fn)s | gzip -dc | mysql %(mysql_conn)s ' '%(temp_db)s' % dict( input_dump_fn=input_dump_fn, mysql_conn=mysql_conn, temp_db=temp_db )) if not opts.skip_anonymize: anon_sql_fn = os.path.join(base_dir, 'anonymize.sql') print_info('Applying %s to the temporary DB' % anon_sql_fn) sysprint('cat %(anon_sql_fn)s | mysql %(mysql_conn)s ' '%(temp_db)s' % dict( anon_sql_fn=anon_sql_fn, mysql_conn=mysql_conn, temp_db=temp_db )) if not opts.skip_output_dump: print_info("Dumping temporary DB to %s" % output_dump_fn) dump_cmd = ('mysqldump %(mysql_conn)s %(temp_db)s | ' 'gzip > %(output_dump_fn)s' % dict( mysql_conn=mysql_conn, temp_db=temp_db, output_dump_fn=output_dump_fn )) print_debug('\t%s' % dump_cmd) sysprint(dump_cmd) if not opts.skip_drop_temp_db: print_info("Dropping temporary db %s" % temp_db) sysprint('mysqladmin %(mysql_conn)s -f drop %(temp_db)s' % dict(mysql_conn=mysql_conn, temp_db=temp_db)) if not opts.skip_delete_input and not opts.input: print_info('Deleting input DB dump %s' % input_dump_fn) os.remove(input_dump_fn) if __name__ == '__main__': retcode = None error = None try: main() except subprocess.CalledProcessError as e: if e.retcode < 0: error = "Command was terminated by signal" retcode = -e.retcode else: error = "Command errored with code %s" % e.retcode retcode = e.retcode except (NotFound, OSError) as e: error = "Command failed: %s" % e retcode = 127 if error: print >>sys.stderr, error print >>sys.stderr, "Clone FAILED." sys.exit(retcode) else: print >>sys.stderr, "Clone complete."
mpl-2.0
insomnia-lab/calibre
src/calibre/ebooks/mobi/writer2/serializer.py
9
14926
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' import re, unicodedata from calibre.ebooks.oeb.base import (OEB_DOCS, XHTML, XHTML_NS, XML_NS, namespace, prefixname, urlnormalize) from calibre.ebooks.mobi.mobiml import MBP_NS from calibre.ebooks.mobi.utils import is_guide_ref_start from collections import defaultdict from urlparse import urldefrag from cStringIO import StringIO class Serializer(object): NSRMAP = {'': None, XML_NS: 'xml', XHTML_NS: '', MBP_NS: 'mbp'} def __init__(self, oeb, images, is_periodical, write_page_breaks_after_item=True): ''' Write all the HTML markup in oeb into a single in memory buffer containing a single html document with links replaced by offsets into the buffer. :param oeb: OEBBook object that encapsulates the document to be processed. :param images: Mapping of image hrefs (urlnormalized) to image record indices. :param write_page_breaks_after_item: If True a MOBIpocket pagebreak tag is written after every element of the spine in ``oeb``. ''' self.oeb = oeb # Map of image hrefs to image index in the MOBI file self.images = images self.used_images = set() self.logger = oeb.logger self.is_periodical = is_periodical self.write_page_breaks_after_item = write_page_breaks_after_item # If not None, this is a number pointing to the location at which to # open the MOBI file on the Kindle self.start_offset = None # Mapping of hrefs (urlnormalized) to the offset in the buffer where # the resource pointed to by the href lives. Used at the end to fill in # the correct values into all filepos="..." links. self.id_offsets = {} # Mapping of hrefs (urlnormalized) to a list of offsets into the buffer # where filepos="..." elements are written corresponding to links that # point to the href. This is used at the end to fill in the correct values. self.href_offsets = defaultdict(list) # List of offsets in the buffer of non linear items in the spine. These # become uncrossable breaks in the MOBI self.breaks = [] self.find_blocks() def find_blocks(self): ''' Mark every item in the spine if it is the start/end of a section/article, so that it can be wrapped in divs appropriately. ''' for item in self.oeb.spine: item.is_section_start = item.is_section_end = False item.is_article_start = item.is_article_end = False def spine_item(tocitem): href = urldefrag(tocitem.href)[0] for item in self.oeb.spine: if item.href == href: return item for item in self.oeb.toc.iterdescendants(): if item.klass == 'section': articles = list(item) if not articles: continue spine_item(item).is_section_start = True for i, article in enumerate(articles): si = spine_item(article) if si is not None: si.is_article_start = True items = list(self.oeb.spine) in_sec = in_art = False for i, item in enumerate(items): try: prev_item = items[i-1] except: prev_item = None if in_art and item.is_article_start == True: prev_item.is_article_end = True in_art = False if in_sec and item.is_section_start == True: prev_item.is_section_end = True in_sec = False if item.is_section_start: in_sec = True if item.is_article_start: in_art = True item.is_section_end = item.is_article_end = True def __call__(self): ''' Return the document serialized as a single UTF-8 encoded bytestring. ''' buf = self.buf = StringIO() buf.write(b'<html>') self.serialize_head() self.serialize_body() buf.write(b'</html>') self.end_offset = buf.tell() self.fixup_links() if self.start_offset is None and not self.is_periodical: # If we don't set a start offset, the stupid Kindle will # open the book at the location of the first IndexEntry, which # could be anywhere. So ensure the book is always opened at the # beginning, instead. self.start_offset = self.body_start_offset return buf.getvalue() def serialize_head(self): buf = self.buf buf.write(b'<head>') if len(self.oeb.guide) > 0: self.serialize_guide() buf.write(b'</head>') def serialize_guide(self): ''' The Kindle decides where to open a book based on the presence of an item in the guide that looks like <reference type="text" title="Start" href="chapter-one.xhtml"/> Similarly an item with type="toc" controls where the Goto Table of Contents operation on the kindle goes. ''' buf = self.buf hrefs = self.oeb.manifest.hrefs buf.write(b'<guide>') for ref in self.oeb.guide.values(): path = urldefrag(ref.href)[0] if path not in hrefs or hrefs[path].media_type not in OEB_DOCS: continue buf.write(b'<reference type="') if ref.type.startswith('other.') : self.serialize_text(ref.type.replace('other.',''), quot=True) else: self.serialize_text(ref.type, quot=True) buf.write(b'" ') if ref.title is not None: buf.write(b'title="') self.serialize_text(ref.title, quot=True) buf.write(b'" ') if is_guide_ref_start(ref): self._start_href = ref.href self.serialize_href(ref.href) # Space required or won't work, I kid you not buf.write(b' />') buf.write(b'</guide>') def serialize_href(self, href, base=None): ''' Serialize the href attribute of an <a> or <reference> tag. It is serialized as filepos="000000000" and a pointer to its location is stored in self.href_offsets so that the correct value can be filled in at the end. ''' hrefs = self.oeb.manifest.hrefs try: path, frag = urldefrag(urlnormalize(href)) except ValueError: # Unparseable URL return False if path and base: path = base.abshref(path) if path and path not in hrefs: return False buf = self.buf item = hrefs[path] if path else None if item and item.spine_position is None: return False path = item.href if item else base.href href = '#'.join((path, frag)) if frag else path buf.write(b'filepos=') self.href_offsets[href].append(buf.tell()) buf.write(b'0000000000') return True def serialize_body(self): ''' Serialize all items in the spine of the document. Non linear items are moved to the end. ''' buf = self.buf def serialize_toc_level(tocref, href=None): # add the provided toc level to the output stream # if href is provided add a link ref to the toc level output (e.g. feed_0/index.html) if href is not None: # resolve the section url in id_offsets buf.write('<mbp:pagebreak />') self.id_offsets[urlnormalize(href)] = buf.tell() if tocref.klass == "periodical": buf.write('<div> <div height="1em"></div>') else: t = tocref.title if isinstance(t, unicode): t = t.encode('utf-8') buf.write('<div></div> <div> <h2 height="1em"><font size="+2"><b>' +t+'</b></font></h2> <div height="1em"></div>') buf.write('<ul>') for tocitem in tocref.nodes: buf.write('<li><a filepos=') itemhref = tocitem.href if tocref.klass == 'periodical': # This is a section node. # For periodical tocs, the section urls are like r'feed_\d+/index.html' # We dont want to point to the start of the first article # so we change the href. itemhref = re.sub(r'article_\d+/', '', itemhref) self.href_offsets[itemhref].append(buf.tell()) buf.write('0000000000') buf.write(' ><font size="+1"><b><u>') t = tocitem.title if isinstance(t, unicode): t = t.encode('utf-8') buf.write(t) buf.write('</u></b></font></a></li>') buf.write('</ul><div height="1em"></div></div><mbp:pagebreak />') self.anchor_offset = buf.tell() buf.write(b'<body>') self.body_start_offset = buf.tell() if self.is_periodical: top_toc = self.oeb.toc.nodes[0] serialize_toc_level(top_toc) spine = [item for item in self.oeb.spine if item.linear] spine.extend([item for item in self.oeb.spine if not item.linear]) for item in spine: if self.is_periodical and item.is_section_start: for section_toc in top_toc.nodes: if urlnormalize(item.href) == section_toc.href: # create section url of the form r'feed_\d+/index.html' section_url = re.sub(r'article_\d+/', '', section_toc.href) serialize_toc_level(section_toc, section_url) section_toc.href = section_url break self.serialize_item(item) self.body_end_offset = buf.tell() buf.write(b'</body>') def serialize_item(self, item): ''' Serialize an individual item from the spine of the input document. A reference to this item is stored in self.href_offsets ''' buf = self.buf if not item.linear: self.breaks.append(buf.tell() - 1) self.id_offsets[urlnormalize(item.href)] = buf.tell() if item.is_section_start: buf.write(b'<a ></a> ') if item.is_article_start: buf.write(b'<a ></a> <a ></a>') for elem in item.data.find(XHTML('body')): self.serialize_elem(elem, item) if self.write_page_breaks_after_item: buf.write(b'<mbp:pagebreak/>') if item.is_article_end: # Kindle periodical article end marker buf.write(b'<a ></a> <a ></a>') if item.is_section_end: buf.write(b' <a ></a>') self.anchor_offset = None def serialize_elem(self, elem, item, nsrmap=NSRMAP): buf = self.buf if not isinstance(elem.tag, basestring) \ or namespace(elem.tag) not in nsrmap: return tag = prefixname(elem.tag, nsrmap) # Previous layers take care of @name id_ = elem.attrib.pop('id', None) if id_: href = '#'.join((item.href, id_)) offset = self.anchor_offset or buf.tell() key = urlnormalize(href) # Only set this id_offset if it wasn't previously seen self.id_offsets[key] = self.id_offsets.get(key, offset) if self.anchor_offset is not None and \ tag == 'a' and not elem.attrib and \ not len(elem) and not elem.text: return self.anchor_offset = buf.tell() buf.write(b'<') buf.write(tag.encode('utf-8')) if elem.attrib: for attr, val in elem.attrib.items(): if namespace(attr) not in nsrmap: continue attr = prefixname(attr, nsrmap) buf.write(b' ') if attr == 'href': if self.serialize_href(val, item): continue elif attr == 'src': href = urlnormalize(item.abshref(val)) if href in self.images: index = self.images[href] self.used_images.add(href) buf.write(b'recindex="%05d"' % index) continue buf.write(attr.encode('utf-8')) buf.write(b'="') self.serialize_text(val, quot=True) buf.write(b'"') buf.write(b'>') if elem.text or len(elem) > 0: if elem.text: self.anchor_offset = None self.serialize_text(elem.text) for child in elem: self.serialize_elem(child, item) if child.tail: self.anchor_offset = None self.serialize_text(child.tail) buf.write(b'</%s>' % tag.encode('utf-8')) def serialize_text(self, text, quot=False): text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') text = text.replace(u'\u00AD', '') # Soft-hyphen if quot: text = text.replace('"', '&quot;') if isinstance(text, unicode): text = unicodedata.normalize('NFC', text) self.buf.write(text.encode('utf-8')) def fixup_links(self): ''' Fill in the correct values for all filepos="..." links with the offsets of the linked to content (as stored in id_offsets). ''' buf = self.buf id_offsets = self.id_offsets start_href = getattr(self, '_start_href', None) for href, hoffs in self.href_offsets.items(): is_start = (href and href == start_href) # Iterate over all filepos items if href not in id_offsets: self.logger.warn('Hyperlink target %r not found' % href) # Link to the top of the document, better than just ignoring href, _ = urldefrag(href) if href in self.id_offsets: ioff = self.id_offsets[href] if is_start: self.start_offset = ioff for hoff in hoffs: buf.seek(hoff) buf.write(b'%010d' % ioff)
gpl-3.0
GbalsaC/bitnamiP
lms/djangoapps/instructor/tests/test_registration_codes.py
42
12260
""" Test for the registration code status information. """ from courseware.tests.factories import InstructorFactory from xmodule.modulestore.tests.factories import CourseFactory from django.utils.translation import ugettext as _ from shoppingcart.models import ( Invoice, CourseRegistrationCodeInvoiceItem, CourseRegistrationCode, CourseRegCodeItem, Order, RegistrationCodeRedemption ) from student.models import CourseEnrollment from student.roles import CourseSalesAdminRole from nose.plugins.attrib import attr import json from student.tests.factories import UserFactory, CourseModeFactory from django.core.urlresolvers import reverse from django.test.utils import override_settings from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase @attr('shard_1') @override_settings(REGISTRATION_CODE_LENGTH=8) class TestCourseRegistrationCodeStatus(ModuleStoreTestCase): """ Test registration code status. """ def setUp(self): super(TestCourseRegistrationCodeStatus, self).setUp() self.course = CourseFactory.create() CourseModeFactory.create(course_id=self.course.id, min_price=50) self.instructor = InstructorFactory(course_key=self.course.id) self.client.login(username=self.instructor.username, password='test') CourseSalesAdminRole(self.course.id).add_users(self.instructor) # create testing invoice self.sale_invoice = Invoice.objects.create( total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='[email protected]', recipient_name='Testw', recipient_email='[email protected]', customer_reference_number='2Fwe23S', internal_reference="A", course_id=self.course.id, is_valid=True ) self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create( invoice=self.sale_invoice, qty=1, unit_price=1234.32, course_id=self.course.id ) self.lookup_code_url = reverse('look_up_registration_code', kwargs={'course_id': unicode(self.course.id)}) self.registration_code_detail_url = reverse('registration_code_details', kwargs={'course_id': unicode(self.course.id)}) url = reverse('generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}) data = { 'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': '[email protected]', 'company_contact_email': '[email protected]', 'unit_price': 122.45, 'recipient_name': 'Test123', 'recipient_email': '[email protected]', 'address_line_1': 'Portland Street', 'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '', 'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': '' } response = self.client.post(url, data) self.assertEqual(response.status_code, 200, response.content) def test_look_up_invalid_registration_code(self): """ Verify the view returns HTTP status 400 if an invalid registration code is passed. Also, verify the data returned includes a message indicating the error, and the is_registration_code_valid is set to False. """ data = { 'registration_code': 'invalid_reg_code' } response = self.client.get(self.lookup_code_url, data) self.assertEqual(response.status_code, 400) json_dict = json.loads(response.content) message = _('The enrollment code ({code}) was not found for the {course_name} course.').format( course_name=self.course.display_name, code=data['registration_code'] ) self.assertEqual(message, json_dict['message']) self.assertFalse(json_dict['is_registration_code_valid']) self.assertFalse(json_dict['is_registration_code_redeemed']) def test_look_up_valid_registration_code(self): """ test lookup for the valid registration code and that registration code has been redeemed by user and then mark the registration code as in_valid when marking as invalidate, it also lookup for registration redemption entry and also delete that redemption entry and un_enroll the student who used that registration code for their enrollment. """ for i in range(2): CourseRegistrationCode.objects.create( code='reg_code{}'.format(i), course_id=unicode(self.course.id), created_by=self.instructor, invoice=self.sale_invoice, invoice_item=self.invoice_item, mode_slug='honor' ) reg_code = CourseRegistrationCode.objects.all()[0] student = UserFactory() enrollment = CourseEnrollment.enroll(student, self.course.id) RegistrationCodeRedemption.objects.create( registration_code=reg_code, redeemed_by=student, course_enrollment=enrollment ) data = { 'registration_code': reg_code.code } response = self.client.get(self.lookup_code_url, data) self.assertEqual(response.status_code, 200) json_dict = json.loads(response.content) self.assertTrue(json_dict['is_registration_code_valid']) self.assertTrue(json_dict['is_registration_code_redeemed']) # now mark that registration code as invalid data = { 'registration_code': reg_code.code, 'action_type': 'invalidate_registration_code' } response = self.client.post(self.registration_code_detail_url, data) self.assertEqual(response.status_code, 200) json_dict = json.loads(response.content) message = _('This enrollment code has been canceled. It can no longer be used.') self.assertEqual(message, json_dict['message']) # now check that the registration code should be marked as invalid in the db. reg_code = CourseRegistrationCode.objects.get(code=reg_code.code) self.assertEqual(reg_code.is_valid, False) redemption = RegistrationCodeRedemption.get_registration_code_redemption(reg_code.code, self.course.id) self.assertIsNone(redemption) # now the student course enrollment should be false. enrollment = CourseEnrollment.get_enrollment(student, self.course.id) self.assertEqual(enrollment.is_active, False) def test_lookup_valid_redeemed_registration_code(self): """ test to lookup for the valid and redeemed registration code and then mark that registration code as un_redeemed which will unenroll the user and delete the redemption entry from the database. """ student = UserFactory() self.client.login(username=student.username, password='test') cart = Order.get_cart_for_user(student) cart.order_type = 'business' cart.save() CourseRegCodeItem.add_to_order(cart, self.course.id, 2) cart.purchase() reg_code = CourseRegistrationCode.objects.filter(order=cart)[0] enrollment = CourseEnrollment.enroll(student, self.course.id) RegistrationCodeRedemption.objects.create( registration_code=reg_code, redeemed_by=student, course_enrollment=enrollment ) self.client.login(username=self.instructor.username, password='test') data = { 'registration_code': reg_code.code } response = self.client.get(self.lookup_code_url, data) self.assertEqual(response.status_code, 200) json_dict = json.loads(response.content) self.assertTrue(json_dict['is_registration_code_valid']) self.assertTrue(json_dict['is_registration_code_redeemed']) # now mark the registration code as unredeemed # this will unenroll the user and removed the redemption entry from # the database. data = { 'registration_code': reg_code.code, 'action_type': 'unredeem_registration_code' } response = self.client.post(self.registration_code_detail_url, data) self.assertEqual(response.status_code, 200) json_dict = json.loads(response.content) message = _('This enrollment code has been marked as unused.') self.assertEqual(message, json_dict['message']) redemption = RegistrationCodeRedemption.get_registration_code_redemption(reg_code.code, self.course.id) self.assertIsNone(redemption) # now the student course enrollment should be false. enrollment = CourseEnrollment.get_enrollment(student, self.course.id) self.assertEqual(enrollment.is_active, False) def test_apply_invalid_reg_code_when_updating_code_information(self): """ test to apply an invalid registration code when updating the registration code information. """ data = { 'registration_code': 'invalid_registration_code', 'action_type': 'unredeem_registration_code' } response = self.client.post(self.registration_code_detail_url, data) self.assertEqual(response.status_code, 400) json_dict = json.loads(response.content) message = _('The enrollment code ({code}) was not found for the {course_name} course.').format( course_name=self.course.display_name, code=data['registration_code'] ) self.assertEqual(message, json_dict['message']) def test_mark_registration_code_as_valid(self): """ test to mark the invalid registration code as valid """ for i in range(2): CourseRegistrationCode.objects.create( code='reg_code{}'.format(i), course_id=self.course.id.to_deprecated_string(), created_by=self.instructor, invoice=self.sale_invoice, invoice_item=self.invoice_item, mode_slug='honor', is_valid=False ) reg_code = CourseRegistrationCode.objects.all()[0] data = { 'registration_code': reg_code.code, 'action_type': 'validate_registration_code' } response = self.client.post(self.registration_code_detail_url, data) self.assertEqual(response.status_code, 200) json_dict = json.loads(response.content) message = _('The enrollment code has been restored.') self.assertEqual(message, json_dict['message']) # now check that the registration code should be marked as valid in the db. reg_code = CourseRegistrationCode.objects.get(code=reg_code.code) self.assertEqual(reg_code.is_valid, True) def test_returns_error_when_unredeeming_already_unredeemed_registration_code_redemption(self): """ test to mark the already unredeemed registration code as unredeemed. """ for i in range(2): CourseRegistrationCode.objects.create( code='reg_code{}'.format(i), course_id=self.course.id.to_deprecated_string(), created_by=self.instructor, invoice=self.sale_invoice, invoice_item=self.invoice_item, mode_slug='honor' ) reg_code = CourseRegistrationCode.objects.all()[0] data = { 'registration_code': reg_code.code, 'action_type': 'unredeem_registration_code' } response = self.client.post(self.registration_code_detail_url, data) self.assertEqual(response.status_code, 400) json_dict = json.loads(response.content) message = _('The redemption does not exist against enrollment code ({code}).').format(code=reg_code.code) self.assertEqual(message, json_dict['message'])
agpl-3.0
hanlind/nova
nova/conf/service_token.py
1
2573
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg SERVICE_USER_GROUP = 'service_user' service_user = cfg.OptGroup( SERVICE_USER_GROUP, title = 'Service token authentication type options', help = """ Configuration options for service to service authentication using a service token. These options allow to send a service token along with the user's token when contacting external REST APIs. """ ) service_user_opts = [ cfg.BoolOpt('send_service_user_token', default=False, help=""" When True, if sending a user token to an REST API, also send a service token. Nova often reuses the user token provided to the nova-api to talk to other REST APIs, such as Cinder. It is possible that while the user token was valid when the request was made to Nova, the token may expire before it reaches the other service. To avoid any failures, and to make it clear it is Nova calling the service on the users behalf, we include a server token along with the user token. Should the user's token have expired, a valid service token ensures the REST API request will still be accepted by the keystone middleware. This feature is currently experimental, and as such is turned off by default while full testing and performance tuning of this feature is completed. """), ] def register_opts(conf): conf.register_group(service_user) conf.register_opts(service_user_opts, group=service_user) ks_loading.register_session_conf_options(conf, SERVICE_USER_GROUP) ks_loading.register_auth_conf_options(conf, SERVICE_USER_GROUP) def list_opts(): return { service_user: ( service_user_opts + ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password')) }
apache-2.0
marrow/schema
test/transform/test_boolean.py
1
1445
from marrow.schema.testing import TransformTest from marrow.schema.transform.type import Boolean, boolean, WebBoolean, web_boolean class TestBooleanNative(TransformTest): transform = boolean.native invalid = ('x', ) @property def valid(self): yield None, None if boolean.none: yield '', None for i in boolean.truthy + ('Y', 'True', True, 1, ['foo']): yield i, True for i in boolean.falsy + ('n', 'False', False, 0, []): yield i, False class TestBooleanForeign(TransformTest): transform = boolean.foreign @property def valid(self): if boolean.none: yield None, '' for i in (0, 1, False, True, [], [0]): yield i, boolean.truthy[boolean.use] if bool(i) else boolean.falsy[boolean.use] for i in boolean.truthy: yield i, boolean.truthy[boolean.use] for i in boolean.falsy: yield i, boolean.falsy[boolean.use] class TestBooleanNoNoneNative(TransformTest): transform = Boolean(none=False).native valid = ((None, False), ) invalid = ('', 'bob') class TestBooleanNoNoneForeign(TransformTest): transform = Boolean(none=False).foreign valid = ((None, 'false'), ('foo', 'true'), ('', 'false')) class TestWebBooleanNative(TransformTest): transform = web_boolean.native valid = ( (['', 'true'], True), ([''], False), ('', False), ) class TestWebBooleanForeign(TransformTest): transform = web_boolean.foreign valid = [(i, bool(i)) for i in (0, 1, False, True)]
mit
messi2050/android_kernel_huawei_msm8610
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <[email protected]> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
r-owen/TUI
TUI/Models/PermsModel.py
1
1598
#!/usr/bin/env python """A model of the state of the perms actor. It contains instance variables that are KeyVariables or sets of KeyVariables. All are directly associated with status keywords. History: 2003-12-10 ROwen 2003-12-17 ROwen Moved KeyVarFactory to RO.KeyVariable. 2004-05-18 ROwen Eliminated unused testMode argument. 2004-07-22 ROwen Stopped importing three unused modules. """ import RO.KeyVariable import TUI.TUIModel _theModel = None def getModel(): global _theModel if _theModel == None: _theModel = _Model() return _theModel class _Model(object): def __init__(self): self.dispatcher = TUI.TUIModel.getModel().dispatcher keyVarFact = RO.KeyVariable.KeyVarFactory( actor = "perms", dispatcher = self.dispatcher, refreshCmd = "status", nval = (0, None), converters = str, ) self.actors = keyVarFact( keyword = "actors", description = "Actors controlled by perms", ) self.authList = keyVarFact( keyword = "authList", nval = (1,None), description = "Program and 0 or more authorized actors", refreshCmd = None, # no authLists if no programs yet registered ) self.lockedActors = keyVarFact( keyword = "lockedActors", description = "Actors locked out by APO", ) self.programs = keyVarFact( keyword = "programs", description = "Programs registered with perms", )
bsd-3-clause
tony810430/flink
flink-python/pyflink/fn_execution/beam/beam_operations_slow.py
2
4637
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ from abc import abstractmethod from apache_beam.runners.worker.operations import Operation from apache_beam.utils.windowed_value import WindowedValue from pyflink.fn_execution.operations import BundleOperation class FunctionOperation(Operation): """ Base class of function operation that will execute StatelessFunction or StatefulFunction for each input element. """ def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls): super(FunctionOperation, self).__init__(name, spec, counter_factory, sampler) self.consumer = consumers['output'][0] self._value_coder_impl = self.consumer.windowed_coder.wrapped_value_coder.get_impl() self.operation_cls = operation_cls self.operation = self.generate_operation() self.process_element = self.operation.process_element self.operation.open() def setup(self): super(FunctionOperation, self).setup() def start(self): with self.scoped_start_state: super(FunctionOperation, self).start() def finish(self): with self.scoped_finish_state: super(FunctionOperation, self).finish() self.operation.finish() def needs_finalization(self): return False def reset(self): super(FunctionOperation, self).reset() def teardown(self): with self.scoped_finish_state: self.operation.close() def progress_metrics(self): metrics = super(FunctionOperation, self).progress_metrics() metrics.processed_elements.measured.output_element_counts.clear() tag = None receiver = self.receivers[0] metrics.processed_elements.measured.output_element_counts[ str(tag)] = receiver.opcounter.element_counter.value() return metrics def process(self, o: WindowedValue): with self.scoped_process_state: output_stream = self.consumer.output_stream if isinstance(self.operation, BundleOperation): for value in o.value: self.process_element(value) self._value_coder_impl.encode_to_stream( self.operation.finish_bundle(), output_stream, True) output_stream.maybe_flush() else: for value in o.value: self._value_coder_impl.encode_to_stream( self.process_element(value), output_stream, True) output_stream.maybe_flush() def monitoring_infos(self, transform_id, tag_to_pcollection_id): """ Only pass user metric to Java :param tag_to_pcollection_id: useless for user metric """ return super().user_monitoring_infos(transform_id) @abstractmethod def generate_operation(self): pass class StatelessFunctionOperation(FunctionOperation): def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls): super(StatelessFunctionOperation, self).__init__( name, spec, counter_factory, sampler, consumers, operation_cls) def generate_operation(self): return self.operation_cls(self.spec) class StatefulFunctionOperation(FunctionOperation): def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls, keyed_state_backend): self.keyed_state_backend = keyed_state_backend super(StatefulFunctionOperation, self).__init__( name, spec, counter_factory, sampler, consumers, operation_cls) def generate_operation(self): return self.operation_cls(self.spec, self.keyed_state_backend)
apache-2.0
viacoin/viacoin
test/functional/p2p_segwit.py
1
92836
#!/usr/bin/env python3 # Copyright (c) 2016-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test segwit transactions and blocks on P2P network.""" from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.script import * from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER from test_framework.key import CECKey, CPubKey import time import random from binascii import hexlify # The versionbit bit used to signal activation of SegWit VB_WITNESS_BIT = 1 VB_PERIOD = 144 VB_TOP_BITS = 0x20000000 MAX_SIGOP_COST = 8000 # Calculate the virtual size of a witness block: # (base + witness/4) def get_virtual_size(witness_block): base_size = len(witness_block.serialize(with_witness=False)) total_size = len(witness_block.serialize(with_witness=True)) # the "+3" is so we round up vsize = int((3*base_size + total_size + 3)/4) return vsize def test_transaction_acceptance(rpc, p2p, tx, with_witness, accepted, reason=None): """Send a transaction to the node and check that it's accepted to the mempool - Submit the transaction over the p2p interface - use the getrawmempool rpc to check for acceptance.""" tx_message = msg_tx(tx) if with_witness: tx_message = msg_witness_tx(tx) p2p.send_message(tx_message) p2p.sync_with_ping() assert_equal(tx.hash in rpc.getrawmempool(), accepted) if (reason != None and not accepted): # Check the rejection reason as well. with mininode_lock: assert_equal(p2p.last_message["reject"].reason, reason) def test_witness_block(rpc, p2p, block, accepted, with_witness=True): """Send a block to the node and check that it's accepted - Submit the block over the p2p interface - use the getbestblockhash rpc to check for acceptance.""" if with_witness: p2p.send_message(msg_witness_block(block)) else: p2p.send_message(msg_block(block)) p2p.sync_with_ping() assert_equal(rpc.getbestblockhash() == block.hash, accepted) class TestNode(P2PInterface): def __init__(self): super().__init__() self.getdataset = set() def on_getdata(self, message): for inv in message.inv: self.getdataset.add(inv.hash) def announce_tx_and_wait_for_getdata(self, tx, timeout=60): with mininode_lock: self.last_message.pop("getdata", None) self.send_message(msg_inv(inv=[CInv(1, tx.sha256)])) self.wait_for_getdata(timeout) def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): with mininode_lock: self.last_message.pop("getdata", None) self.last_message.pop("getheaders", None) msg = msg_headers() msg.headers = [ CBlockHeader(block) ] if use_header: self.send_message(msg) else: self.send_message(msg_inv(inv=[CInv(2, block.sha256)])) self.wait_for_getheaders() self.send_message(msg) self.wait_for_getdata() def request_block(self, blockhash, inv_type, timeout=60): with mininode_lock: self.last_message.pop("block", None) self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) self.wait_for_block(blockhash, timeout) return self.last_message["block"].block # Used to keep track of anyone-can-spend outputs that we can use in the tests class UTXO(): def __init__(self, sha256, n, nValue): self.sha256 = sha256 self.n = n self.nValue = nValue # Helper for getting the script associated with a P2PKH def GetP2PKHScript(pubkeyhash): return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)]) # Add signature for a P2PK witness program. def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key): tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value) signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1') txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script] txTo.rehash() class SegWitTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]] def setup_network(self): self.setup_nodes() connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) self.sync_all() ''' Helpers ''' # Build a block on top of node0's tip. def build_next_block(self, nVersion=VB_TOP_BITS): tip = self.nodes[0].getbestblockhash() height = self.nodes[0].getblockcount() + 1 block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 block = create_block(int(tip, 16), create_coinbase(height), block_time) block.nVersion = nVersion block.rehash() return block # Adds list of transactions to block, adds witness commitment, then solves. def update_witness_block_with_transactions(self, block, tx_list, nonce=0): block.vtx.extend(tx_list) add_witness_commitment(block, nonce) block.solve() return ''' Individual tests ''' def test_witness_services(self): self.log.info("Verifying NODE_WITNESS service bit") assert((self.test_node.nServices & NODE_WITNESS) != 0) # See if sending a regular transaction works, and create a utxo # to use in later tests. def test_non_witness_transaction(self): # Mine a block with an anyone-can-spend coinbase, # let it mature, then try to spend it. self.log.info("Testing non-witness transaction") block = self.build_next_block(nVersion=1) block.solve() self.test_node.send_message(msg_block(block)) self.test_node.sync_with_ping() # make sure the block was processed txid = block.vtx[0].sha256 self.nodes[0].generate(99) # let the block mature # Create a transaction that spends the coinbase tx = CTransaction() tx.vin.append(CTxIn(COutPoint(txid, 0), b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx.calc_sha256() # Check that serializing it with or without witness is the same # This is a sanity check of our testing framework. assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize()) self.test_node.send_message(msg_witness_tx(tx)) self.test_node.sync_with_ping() # make sure the tx was processed assert(tx.hash in self.nodes[0].getrawmempool()) # Save this transaction for later self.utxo.append(UTXO(tx.sha256, 0, 49*100000000)) self.nodes[0].generate(1) # Verify that blocks with witnesses are rejected before activation. def test_unnecessary_witness_before_segwit_activation(self): self.log.info("Testing behavior of unnecessary witnesses") # For now, rely on earlier tests to have created at least one utxo for # us to use assert(len(self.utxo) > 0) assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active') tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])] # Verify the hash with witness differs from the txid # (otherwise our testing framework must be broken!) tx.rehash() assert(tx.sha256 != tx.calc_sha256(with_witness=True)) # Construct a segwit-signaling block that includes the transaction. block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT))) self.update_witness_block_with_transactions(block, [tx]) # Sending witness data before activation is not allowed (anti-spam # rule). test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # TODO: fix synchronization so we can test reject reason # Right now, bitcoind delays sending reject messages for blocks # until the future, making synchronization here difficult. #assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness") # But it should not be permanently marked bad... # Resend without witness information. self.test_node.send_message(msg_block(block)) self.test_node.sync_with_ping() assert_equal(self.nodes[0].getbestblockhash(), block.hash) sync_blocks(self.nodes) # Create a p2sh output -- this is so we can pass the standardness # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped # in P2SH). p2sh_program = CScript([OP_TRUE]) p2sh_pubkey = hash160(p2sh_program) scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) # Now check that unnecessary witnesses can't be used to blind a node # to a transaction, eg by violating standardness checks. tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey)) tx2.rehash() test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True) self.nodes[0].generate(1) sync_blocks(self.nodes) # We'll add an unnecessary witness to this transaction that would cause # it to be non-standard, to test that violating policy with a witness before # segwit activation doesn't blind a node to a transaction. Transactions # rejected for having a witness before segwit activation shouldn't be added # to the rejection cache. tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program]))) tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey)) tx3.wit.vtxinwit.append(CTxInWitness()) tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000] tx3.rehash() # Note that this should be rejected for the premature witness reason, # rather than a policy check, since segwit hasn't activated yet. test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'no-witness-yet') # If we send without witness, it should be accepted. test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, False, True) # Now create a new anyone-can-spend utxo for the next test. tx4 = CTransaction() tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program]))) tx4.vout.append(CTxOut(tx3.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx4.rehash() test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True) test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, False, True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Update our utxo list; we spent the first entry. self.utxo.pop(0) self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue)) # Mine enough blocks for segwit's vb state to be 'started'. def advance_to_segwit_started(self): height = self.nodes[0].getblockcount() # Will need to rewrite the tests here if we are past the first period assert(height < VB_PERIOD - 1) # Genesis block is 'defined'. assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined') # Advance to end of period, status should now be 'started' self.nodes[0].generate(VB_PERIOD-height-1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') # Mine enough blocks to lock in segwit, but don't activate. # TODO: we could verify that lockin only happens at the right threshold of # signalling blocks, rather than just at the right period boundary. def advance_to_segwit_lockin(self): height = self.nodes[0].getblockcount() assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') # Advance to end of period, and verify lock-in happens at the end self.nodes[0].generate(VB_PERIOD-1) height = self.nodes[0].getblockcount() assert((height % VB_PERIOD) == VB_PERIOD - 2) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') self.nodes[0].generate(1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') # Mine enough blocks to activate segwit. # TODO: we could verify that activation only happens at the right threshold # of signalling blocks, rather than just at the right period boundary. def advance_to_segwit_active(self): assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') height = self.nodes[0].getblockcount() self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') self.nodes[0].generate(1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active') # This test can only be run after segwit has activated def test_witness_commitments(self): self.log.info("Testing witness commitments") # First try a correct witness commitment. block = self.build_next_block() add_witness_commitment(block) block.solve() # Test the test -- witness serialization should be different assert(msg_witness_block(block).serialize() != msg_block(block).serialize()) # This empty block should be valid. test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Try to tweak the nonce block_2 = self.build_next_block() add_witness_commitment(block_2, nonce=28) block_2.solve() # The commitment should have changed! assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]) # This should also be valid. test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=True) # Now test commitments with actual transactions assert (len(self.utxo) > 0) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) # Let's construct a witness program witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() # tx2 will spend tx1, and send back to a regular anyone-can-spend address tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx2.rehash() block_3 = self.build_next_block() self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1) # Add an extra OP_RETURN output that matches the witness commitment template, # even though it has extra data after the incorrect commitment. # This block should fail. block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10]))) block_3.vtx[0].rehash() block_3.hashMerkleRoot = block_3.calc_merkle_root() block_3.rehash() block_3.solve() test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False) # Add a different commitment with different nonce, but in the # right location, and with some funds burned(!). # This should succeed (nValue shouldn't affect finding the # witness commitment). add_witness_commitment(block_3, nonce=0) block_3.vtx[0].vout[0].nValue -= 1 block_3.vtx[0].vout[-1].nValue += 1 block_3.vtx[0].rehash() block_3.hashMerkleRoot = block_3.calc_merkle_root() block_3.rehash() assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns block_3.solve() test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=True) # Finally test that a block with no witness transactions can # omit the commitment. block_4 = self.build_next_block() tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) tx3.rehash() block_4.vtx.append(tx3) block_4.hashMerkleRoot = block_4.calc_merkle_root() block_4.solve() test_witness_block(self.nodes[0].rpc, self.test_node, block_4, with_witness=False, accepted=True) # Update available utxo's for use in later test. self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) def test_block_malleability(self): self.log.info("Testing witness block malleability") # Make sure that a block that has too big a virtual size # because of a too-large coinbase witness is not permanently # marked bad. block = self.build_next_block() add_witness_commitment(block) block.solve() block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000) assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE) # We can't send over the p2p network, because this is too big to relay # TODO: repeat this test with a block that can be relayed self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() != block.hash) block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop() assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE) self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() == block.hash) # Now make sure that malleating the witness nonce doesn't # result in a block permanently marked bad. block = self.build_next_block() add_witness_commitment(block) block.solve() # Change the nonce -- should not cause the block to be permanently # failed block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ] test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Changing the witness nonce doesn't change the block hash block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ] test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) def test_witness_block_size(self): self.log.info("Testing witness block size limit") # TODO: Test that non-witness carrying blocks can't exceed 1MB # Skipping this test for now; this is covered in p2p-fullblocktest.py # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. block = self.build_next_block() assert(len(self.utxo) > 0) # Create a P2WSH transaction. # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE. # This should give us plenty of room to tweak the spending tx's # virtual size. NUM_DROPS = 10 # 201 max ops per script! NUM_OUTPUTS = 50 witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE]) witness_hash = uint256_from_str(sha256(witness_program)) scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)]) prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) value = self.utxo[0].nValue parent_tx = CTransaction() parent_tx.vin.append(CTxIn(prevout, b"")) child_value = int(value/NUM_OUTPUTS) for i in range(NUM_OUTPUTS): parent_tx.vout.append(CTxOut(child_value, scriptPubKey)) parent_tx.vout[0].nValue -= 50000 assert(parent_tx.vout[0].nValue > 0) parent_tx.rehash() child_tx = CTransaction() for i in range(NUM_OUTPUTS): child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] for i in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program] child_tx.rehash() self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) vsize = get_virtual_size(block) additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4 i = 0 while additional_bytes > 0: # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1 extra_bytes = min(additional_bytes+1, 55) block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes) additional_bytes -= extra_bytes i += 1 block.vtx[0].vout.pop() # Remove old commitment add_witness_commitment(block) block.solve() vsize = get_virtual_size(block) assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1) # Make sure that our test case would exceed the old max-network-message # limit # Viacoin: scale to blocksize proportion assert(len(block.serialize(True)) > 2*1024*1024 // 16.66) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Now resize the second transaction to make the block fit. cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0]) block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1) block.vtx[0].vout.pop() add_witness_commitment(block) block.solve() assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Update available utxo's self.utxo.pop(0) self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue)) # submitblock will try to add the nonce automatically, so that mining # software doesn't need to worry about doing so itself. def test_submit_block(self): block = self.build_next_block() # Try using a custom nonce and then don't supply it. # This shouldn't possibly work. add_witness_commitment(block, nonce=1) block.vtx[0].wit = CTxWitness() # drop the nonce block.solve() self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() != block.hash) # Now redo commitment with the standard nonce, but let bitcoind fill it in. add_witness_commitment(block, nonce=0) block.vtx[0].wit = CTxWitness() block.solve() self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert_equal(self.nodes[0].getbestblockhash(), block.hash) # This time, add a tx with non-empty witness, but don't supply # the commitment. block_2 = self.build_next_block() add_witness_commitment(block_2) block_2.solve() # Drop commitment and nonce -- submitblock should not fill in. block_2.vtx[0].vout.pop() block_2.vtx[0].wit = CTxWitness() self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True))) # Tip should not advance! assert(self.nodes[0].getbestblockhash() != block_2.hash) # Consensus tests of extra witness data in a transaction. def test_extra_witness_data(self): self.log.info("Testing extra witness data in tx") assert(len(self.utxo) > 0) block = self.build_next_block() witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) # First try extra witness data on a tx that doesn't require a witness tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-200000, scriptPubKey)) tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])] tx.rehash() self.update_witness_block_with_transactions(block, [tx]) # Extra witness data should not be allowed. test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Try extra signature data. Ok if we're not spending a witness output. block.vtx[1].wit.vtxinwit = [] block.vtx[1].vin[0].scriptSig = CScript([OP_0]) block.vtx[1].rehash() add_witness_commitment(block) block.solve() test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Now try extra witness/signature data on an input that DOES require a # witness tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ] tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ] block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) # This has extra witness data, so it should fail. test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Now get rid of the extra witness, but add extra scriptSig data tx2.vin[0].scriptSig = CScript([OP_TRUE]) tx2.vin[1].scriptSig = CScript([OP_TRUE]) tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0) tx2.wit.vtxinwit[1].scriptWitness.stack = [] tx2.rehash() add_witness_commitment(block) block.solve() # This has extra signature data for a witness input, so it should fail. test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Now get rid of the extra scriptsig on the witness input, and verify # success (even with extra scriptsig data in the non-witness input) tx2.vin[0].scriptSig = b"" tx2.rehash() add_witness_commitment(block) block.solve() test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Update utxo for later tests self.utxo.pop(0) self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_max_witness_push_length(self): ''' Should only allow up to 520 byte pushes in witness stack ''' self.log.info("Testing maximum witness push size") MAX_SCRIPT_ELEMENT_SIZE = 520 assert(len(self.utxo)) block = self.build_next_block() witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey)) tx.rehash() tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) # First try a 521-byte stack element tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Now reduce the length of the stack element tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE) add_witness_commitment(block) block.solve() test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Update the utxo for later tests self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_max_witness_program_length(self): # Can create witness outputs that are long, but can't be greater than # 10k bytes to successfully spend self.log.info("Testing maximum witness program length") assert(len(self.utxo)) MAX_PROGRAM_LENGTH = 10000 # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes. long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE]) assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1) long_witness_hash = sha256(long_witness_program) long_scriptPubKey = CScript([OP_0, long_witness_hash]) block = self.build_next_block() tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-100000, long_scriptPubKey)) tx.rehash() tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Try again with one less byte in the witness program witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE]) assert(len(witness_program) == MAX_PROGRAM_LENGTH) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey) tx.rehash() tx2.vin[0].prevout.hash = tx.sha256 tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program] tx2.rehash() block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_witness_input_length(self): ''' Ensure that vin length must match vtxinwit length ''' self.log.info("Testing witness input length") assert(len(self.utxo)) witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) # Create a transaction that splits our utxo into many outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) nValue = self.utxo[0].nValue for i in range(10): tx.vout.append(CTxOut(int(nValue/10), scriptPubKey)) tx.vout[0].nValue -= 1000 assert(tx.vout[0].nValue >= 0) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Try various ways to spend tx that should all break. # This "broken" transaction serializer will not normalize # the length of vtxinwit. class BrokenCTransaction(CTransaction): def serialize_with_witness(self): flags = 0 if not self.wit.is_null(): flags |= 1 r = b"" r += struct.pack("<i", self.nVersion) if flags: dummy = [] r += ser_vector(dummy) r += struct.pack("<B", flags) r += ser_vector(self.vin) r += ser_vector(self.vout) if flags & 1: r += self.wit.serialize() r += struct.pack("<I", self.nLockTime) return r tx2 = BrokenCTransaction() for i in range(10): tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE]))) # First try using a too long vtxinwit for i in range(11): tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program] block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Now try using a too short vtxinwit tx2.wit.vtxinwit.pop() tx2.wit.vtxinwit.pop() block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Now make one of the intermediate witnesses be incorrect tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program] tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Fix the broken witness and the block should be accepted. tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) def test_witness_tx_relay_before_segwit_activation(self): self.log.info("Testing relay of witness transactions") # Generate a transaction that doesn't require a witness, but send it # with a witness. Should be rejected for premature-witness, but should # not be added to recently rejected list. assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ] tx.rehash() tx_hash = tx.sha256 tx_value = tx.vout[0].nValue # Verify that if a peer doesn't set nServices to include NODE_WITNESS, # the getdata is just for the non-witness portion. self.old_node.announce_tx_and_wait_for_getdata(tx) assert(self.old_node.last_message["getdata"].inv[0].type == 1) # Since we haven't delivered the tx yet, inv'ing the same tx from # a witness transaction ought not result in a getdata. try: self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2) self.log.error("Error: duplicate tx getdata!") assert(False) except AssertionError as e: pass # Delivering this transaction with witness should fail (no matter who # its from) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) test_transaction_acceptance(self.nodes[0].rpc, self.old_node, tx, with_witness=True, accepted=False) test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False) # But eliminating the witness should fix it test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) # Cleanup: mine the first transaction and update utxo self.nodes[0].generate(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.utxo.pop(0) self.utxo.append(UTXO(tx_hash, 0, tx_value)) # After segwit activates, verify that mempool: # - rejects transactions with unnecessary/extra witnesses # - accepts transactions with valid witnesses # and that witness transactions are relayed to non-upgraded peers. def test_tx_relay_after_segwit_activation(self): self.log.info("Testing relay of witness transactions") # Generate a transaction that doesn't require a witness, but send it # with a witness. Should be rejected because we can't use a witness # when spending a non-witness output. assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ] tx.rehash() tx_hash = tx.sha256 # Verify that unnecessary witnesses are rejected. self.test_node.announce_tx_and_wait_for_getdata(tx) assert_equal(len(self.nodes[0].getrawmempool()), 0) test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False) # Verify that removing the witness succeeds. self.test_node.announce_tx_and_wait_for_getdata(tx) test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) # Now try to add extra witness data to a valid witness tx. witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey)) tx2.rehash() tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) # Add too-large for IsStandard witness and check that it does not enter reject filter p2sh_program = CScript([OP_TRUE]) p2sh_pubkey = hash160(p2sh_program) witness_program2 = CScript([b'a'*400000]) tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]))) tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2] tx3.rehash() # Node will not be blinded to the transaction self.std_node.announce_tx_and_wait_for_getdata(tx3) test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size') self.std_node.announce_tx_and_wait_for_getdata(tx3) test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size') # Remove witness stuffing, instead add extra witness push on stack tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])) tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ] tx3.rehash() test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False) # Get rid of the extra witness, and verify acceptance. tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] # Also check that old_node gets a tx announcement, even though this is # a witness transaction. self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True) self.old_node.wait_for_inv([CInv(1, tx3.sha256)]) # Test that getrawtransaction returns correct witness information # hash, size, vsize raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1) assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True)) assert_equal(raw_tx["size"], len(tx3.serialize_with_witness())) vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4 assert_equal(raw_tx["vsize"], vsize) assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1) assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii')) assert(vsize != raw_tx["size"]) # Cleanup: mine the transactions and update utxo for next test self.nodes[0].generate(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) # Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG # This is true regardless of segwit activation. # Also test that we don't ask for blocks from unupgraded peers def test_block_relay(self, segwit_activated): self.log.info("Testing block relay") blocktype = 2|MSG_WITNESS_FLAG # test_node has set NODE_WITNESS, so all getdata requests should be for # witness blocks. # Test announcing a block via inv results in a getdata, and that # announcing a version 4 or random VB block with a header results in a getdata block1 = self.build_next_block() block1.solve() self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) test_witness_block(self.nodes[0].rpc, self.test_node, block1, True) # Viacoin: Blocks with nVersion < VB_TOP_BITS are rejected # self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True) # assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) # self.test_node.test_witness_block(block2, True) block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15))) block3.solve() self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) test_witness_block(self.nodes[0].rpc, self.test_node, block3, True) # Check that we can getdata for witness blocks or regular blocks, # and the right thing happens. if segwit_activated == False: # Before activation, we should be able to request old blocks with # or without witness, and they should be the same. chain_height = self.nodes[0].getblockcount() # Pick 10 random blocks on main chain, and verify that getdata's # for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal. all_heights = list(range(chain_height+1)) random.shuffle(all_heights) all_heights = all_heights[0:10] for height in all_heights: block_hash = self.nodes[0].getblockhash(height) rpc_block = self.nodes[0].getblock(block_hash, False) block_hash = int(block_hash, 16) block = self.test_node.request_block(block_hash, 2) wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG) assert_equal(block.serialize(True), wit_block.serialize(True)) assert_equal(block.serialize(), hex_str_to_bytes(rpc_block)) else: # After activation, witness blocks and non-witness blocks should # be different. Verify rpc getblock() returns witness blocks, while # getdata respects the requested type. block = self.build_next_block() self.update_witness_block_with_transactions(block, []) # This gives us a witness commitment. assert(len(block.vtx[0].wit.vtxinwit) == 1) assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Now try to retrieve it... rpc_block = self.nodes[0].getblock(block.hash, False) non_wit_block = self.test_node.request_block(block.sha256, 2) wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG) assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block)) assert_equal(wit_block.serialize(False), non_wit_block.serialize()) assert_equal(wit_block.serialize(True), block.serialize(True)) # Test size, vsize, weight rpc_details = self.nodes[0].getblock(block.hash, True) assert_equal(rpc_details["size"], len(block.serialize(True))) assert_equal(rpc_details["strippedsize"], len(block.serialize(False))) weight = 3*len(block.serialize(False)) + len(block.serialize(True)) assert_equal(rpc_details["weight"], weight) # Upgraded node should not ask for blocks from unupgraded # Viacoin: Blocks with nVersion < VB_TOP_BITS are rejected block4 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15))) block4.solve() self.old_node.getdataset = set() # Blocks can be requested via direct-fetch (immediately upon processing the announcement) # or via parallel download (with an indeterminate delay from processing the announcement) # so to test that a block is NOT requested, we could guess a time period to sleep for, # and then check. We can avoid the sleep() by taking advantage of transaction getdata's # being processed after block getdata's, and announce a transaction as well, # and then check to see if that particular getdata has been received. # Since 0.14, inv's will only be responded to with a getheaders, so send a header # to announce this block. msg = msg_headers() msg.headers = [ CBlockHeader(block4) ] self.old_node.send_message(msg) self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0]) assert(block4.sha256 not in self.old_node.getdataset) # V0 segwit outputs should be standard after activation, but not before. def test_standardness_v0(self, segwit_activated): self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before")) assert(len(self.utxo)) witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) p2sh_pubkey = hash160(witness_program) p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) # First prepare a p2sh output (so that spending it will pass standardness) p2sh_tx = CTransaction() p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)] p2sh_tx.rehash() # Mine it on test_node to create the confirmed output. test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_tx, with_witness=True, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Now test standardness of v0 P2WSH outputs. # Start by creating a transaction with two outputs. tx = CTransaction() tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)] tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later tx.rehash() test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=segwit_activated) # Now create something that looks like a P2PKH output. This won't be spendable. scriptPubKey = CScript([OP_0, hash160(witness_hash)]) tx2 = CTransaction() if segwit_activated: # if tx was accepted, then we spend the second output. tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")] tx2.vout = [CTxOut(700000, scriptPubKey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] else: # if tx wasn't accepted, we just re-spend the p2sh output we started with. tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)] tx2.rehash() test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=segwit_activated) # Now update self.utxo for later tests. tx3 = CTransaction() if segwit_activated: # tx and tx2 were both accepted. Don't bother trying to reclaim the # P2PKH output; just send tx's first output back to an anyone-can-spend. sync_mempools([self.nodes[0], self.nodes[1]]) tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx3.vout = [CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))] tx3.wit.vtxinwit.append(CTxInWitness()) tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx3.rehash() test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True) else: # tx and tx2 didn't go anywhere; just clean up the p2sh_tx output. tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))] tx3.rehash() test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) assert_equal(len(self.nodes[1].getrawmempool()), 0) # Verify that future segwit upgraded transactions are non-standard, # but valid in blocks. Can run this before and after segwit activation. def test_segwit_versions(self): self.log.info("Testing standardness/consensus for segwit versions (0-16)") assert(len(self.utxo)) NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16 if (len(self.utxo) < NUM_TESTS): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) split_value = (self.utxo[0].nValue - 400000) // NUM_TESTS for i in range(NUM_TESTS): tx.vout.append(CTxOut(split_value, CScript([OP_TRUE]))) tx.rehash() block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) self.utxo.pop(0) for i in range(NUM_TESTS): self.utxo.append(UTXO(tx.sha256, i, split_value)) sync_blocks(self.nodes) temp_utxo = [] tx = CTransaction() count = 0 witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) assert_equal(len(self.nodes[1].getrawmempool()), 0) for version in list(range(OP_1, OP_16+1)) + [OP_0]: count += 1 # First try to spend to a future version segwit scriptPubKey. scriptPubKey = CScript([CScriptOp(version), witness_hash]) tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)] tx.rehash() test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=False) test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True) self.utxo.pop(0) temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue)) self.nodes[0].generate(1) # Mine all the transactions sync_blocks(self.nodes) assert(len(self.nodes[0].getrawmempool()) == 0) # Finally, verify that version 0 -> version 1 transactions # are non-standard scriptPubKey = CScript([CScriptOp(OP_1), witness_hash]) tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] tx2.rehash() # Gets accepted to test_node, because standardness of outputs isn't # checked with fRequireStandard test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=False) temp_utxo.pop() # last entry in temp_utxo was the output we just spent temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) # Spend everything in temp_utxo back to an OP_TRUE output. tx3 = CTransaction() total_value = 0 for i in temp_utxo: tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) total_value += i.nValue tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE]))) tx3.rehash() # Spending a higher version witness output is not allowed by policy, # even with fRequireStandard=false. test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False) self.test_node.sync_with_ping() with mininode_lock: assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason) # Building a block with the transaction must be valid, however. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2, tx3]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) sync_blocks(self.nodes) # Add utxo to our list self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) def test_premature_coinbase_witness_spend(self): self.log.info("Testing premature coinbase witness spend") block = self.build_next_block() # Change the output of the block to be a witness output. witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) block.vtx[0].vout[0].scriptPubKey = scriptPubKey # This next line will rehash the coinbase and update the merkle # root, and solve. self.update_witness_block_with_transactions(block, []) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) spend_tx = CTransaction() spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)] spend_tx.wit.vtxinwit.append(CTxInWitness()) spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] spend_tx.rehash() # Now test a premature spend. self.nodes[0].generate(98) sync_blocks(self.nodes) block2 = self.build_next_block() self.update_witness_block_with_transactions(block2, [spend_tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=False) # Advancing one more block should allow the spend. self.nodes[0].generate(1) block2 = self.build_next_block() self.update_witness_block_with_transactions(block2, [spend_tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=True) sync_blocks(self.nodes) def test_signature_version_1(self): self.log.info("Testing segwit signature hash version 1") key = CECKey() key.set_secretbytes(b"9") pubkey = CPubKey(key.get_pubkey()) witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) # First create a witness output for use in the tests. assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey)) tx.rehash() test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True) # Mine this transaction in preparation for following tests. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) sync_blocks(self.nodes) self.utxo.pop(0) # Test each hashtype prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) for sigflag in [ 0, SIGHASH_ANYONECANPAY ]: for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]: hashtype |= sigflag block = self.build_next_block() tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) tx.vout.append(CTxOut(prev_utxo.nValue - 100000, scriptPubKey)) tx.wit.vtxinwit.append(CTxInWitness()) # Too-large input value sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key) self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Too-small input value sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key) block.vtx.pop() # remove last tx self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Now try correct value sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) block.vtx.pop() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) # Test combinations of signature hashes. # Split the utxo into a lot of outputs. # Randomly choose up to 10 to spend, sign with different hashtypes, and # output to a random number of outputs. Repeat NUM_TESTS times. # Ensure that we've tested a situation where we use SIGHASH_SINGLE with # an input index > number of outputs. NUM_TESTS = 300 temp_utxos = [] tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) split_value = prev_utxo.nValue // NUM_TESTS for i in range(NUM_TESTS): tx.vout.append(CTxOut(split_value, scriptPubKey)) tx.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) for i in range(NUM_TESTS): temp_utxos.append(UTXO(tx.sha256, i, split_value)) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) block = self.build_next_block() used_sighash_single_out_of_bounds = False for i in range(NUM_TESTS): # Ping regularly to keep the connection alive if (not i % 100): self.test_node.sync_with_ping() # Choose random number of inputs to use. num_inputs = random.randint(1, 10) # Create a slight bias for producing more utxos num_outputs = random.randint(1, 11) random.shuffle(temp_utxos) assert(len(temp_utxos) > num_inputs) tx = CTransaction() total_value = 0 for i in range(num_inputs): tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b"")) tx.wit.vtxinwit.append(CTxInWitness()) total_value += temp_utxos[i].nValue split_value = total_value // num_outputs for i in range(num_outputs): tx.vout.append(CTxOut(split_value, scriptPubKey)) for i in range(num_inputs): # Now try to sign each input, using a random hashtype. anyonecanpay = 0 if random.randint(0, 1): anyonecanpay = SIGHASH_ANYONECANPAY hashtype = random.randint(1, 3) | anyonecanpay sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key) if (hashtype == SIGHASH_SINGLE and i >= num_outputs): used_sighash_single_out_of_bounds = True tx.rehash() for i in range(num_outputs): temp_utxos.append(UTXO(tx.sha256, i, split_value)) temp_utxos = temp_utxos[num_inputs:] # Test the block periodically, if we're close to maxblocksize if (get_virtual_size(block) >= MAX_BLOCK_BASE_SIZE - len(tx.serialize_with_witness())): self.update_witness_block_with_transactions(block, []) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) block = self.build_next_block() block.vtx.append(tx) if (not used_sighash_single_out_of_bounds): self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value") # Test the transactions we've added to the block if (len(block.vtx) > 1): self.update_witness_block_with_transactions(block, []) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Now test witness version 0 P2PKH transactions pubkeyhash = hash160(pubkey) scriptPKH = CScript([OP_0, pubkeyhash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH)) tx.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) script = GetP2PKHScript(pubkeyhash) sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL # Check that we can't have a scriptSig tx2.vin[0].scriptSig = CScript([signature, pubkey]) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) # Move the signature to the witness. block.vtx.pop() tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] tx2.vin[0].scriptSig = b"" tx2.rehash() self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) temp_utxos.pop(0) # Update self.utxos for later tests by creating two outputs # that consolidate all the coins in temp_utxos. output_value = sum(i.nValue for i in temp_utxos) // 2 tx = CTransaction() index = 0 # Just spend to our usual anyone-can-spend output tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2 for i in temp_utxos: # Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up # the signatures as we go. tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_ALL|SIGHASH_ANYONECANPAY, i.nValue, key) index += 1 block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) assert_greater_than_or_equal(MAX_BLOCK_BASE_SIZE, get_virtual_size(block)) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) for i in range(len(tx.vout)): self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue)) # Test P2SH wrapped witness programs. def test_p2sh_witness(self, segwit_activated): self.log.info("Testing P2SH witness transactions") assert(len(self.utxo)) # Prepare the p2sh-wrapped witness output witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) p2wsh_pubkey = CScript([OP_0, witness_hash]) p2sh_witness_hash = hash160(p2wsh_pubkey) scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script # Fund the P2SH output tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey)) tx.rehash() # Verify mempool acceptance and block validity test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=segwit_activated) sync_blocks(self.nodes) # Now test attempts to spend the output. spend_tx = CTransaction() spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig)) spend_tx.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))) spend_tx.rehash() # This transaction should not be accepted into the mempool pre- or # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which # will require a witness to spend a witness program regardless of # segwit activation. Note that older bitcoind's that are not # segwit-aware would also reject this for failing CLEANSTACK. test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False) # Try to put the witness script in the scriptSig, should also fail. spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) spend_tx.rehash() test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False) # Now put the witness script in the witness, should succeed after # segwit activates. spend_tx.vin[0].scriptSig = scriptSig spend_tx.rehash() spend_tx.wit.vtxinwit.append(CTxInWitness()) spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ] # Verify mempool acceptance test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=True, accepted=segwit_activated) block = self.build_next_block() self.update_witness_block_with_transactions(block, [spend_tx]) # If we're before activation, then sending this without witnesses # should be valid. If we're after activation, then sending this with # witnesses should be valid. if segwit_activated: test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) else: test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=False) # Update self.utxo self.utxo.pop(0) self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue)) # Test the behavior of starting up a segwit-aware node after the softfork # has activated. As segwit requires different block data than pre-segwit # nodes would have stored, this requires special handling. # To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to # the test. def test_upgrade_after_activation(self, node_id): self.log.info("Testing software upgrade after softfork activation") assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind # Make sure the nodes are all up sync_blocks(self.nodes) # Restart with the new binary self.stop_node(node_id) self.start_node(node_id, extra_args=["-vbparams=segwit:0:999999999999"]) connect_nodes(self.nodes[0], node_id) sync_blocks(self.nodes) # Make sure that this peer thinks segwit has activated. assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active") # Make sure this peers blocks match those of node0. height = self.nodes[node_id].getblockcount() while height >= 0: block_hash = self.nodes[node_id].getblockhash(height) assert_equal(block_hash, self.nodes[0].getblockhash(height)) assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash)) height -= 1 def test_witness_sigops(self): '''Ensure sigop counting is correct inside witnesses.''' self.log.info("Testing sigops limit") assert(len(self.utxo)) # Keep this under MAX_OPS_PER_SCRIPT (201) witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF]) witness_hash = sha256(witness_program) scriptPubKey = CScript([OP_0, witness_hash]) sigops_per_script = 20*5 + 193*1 # We'll produce 2 extra outputs, one with a program that would take us # over max sig ops, and one with a program that would exactly reach max # sig ops outputs = (MAX_SIGOP_COST // sigops_per_script) + 2 extra_sigops_available = MAX_SIGOP_COST % sigops_per_script # We chose the number of checkmultisigs/checksigs to make this work: assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT # This script, when spent with the first # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction, # would push us just over the block sigop limit. witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF]) witness_hash_toomany = sha256(witness_program_toomany) scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany]) # If we spend this script instead, we would exactly reach our sigop # limit (for witness sigops). witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF]) witness_hash_justright = sha256(witness_program_justright) scriptPubKey_justright = CScript([OP_0, witness_hash_justright]) # First split our available utxo into a bunch of outputs split_value = self.utxo[0].nValue // outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) for i in range(outputs): tx.vout.append(CTxOut(split_value, scriptPubKey)) tx.vout[-2].scriptPubKey = scriptPubKey_toomany tx.vout[-1].scriptPubKey = scriptPubKey_justright tx.rehash() block_1 = self.build_next_block() self.update_witness_block_with_transactions(block_1, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block_1, accepted=True) tx2 = CTransaction() # If we try to spend the first n-1 outputs from tx, that should be # too many sigops. total_value = 0 for i in range(outputs-1): tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ] total_value += tx.vout[i].nValue tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ] tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE]))) tx2.rehash() block_2 = self.build_next_block() self.update_witness_block_with_transactions(block_2, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=False) # Try dropping the last input in tx2, and add an output that has # too many sigops (contributing to legacy sigop count). checksig_count = (extra_sigops_available // 4) + 1 scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count) tx2.vout.append(CTxOut(0, scriptPubKey_checksigs)) tx2.vin.pop() tx2.wit.vtxinwit.pop() tx2.vout[0].nValue -= tx.vout[-2].nValue tx2.rehash() block_3 = self.build_next_block() self.update_witness_block_with_transactions(block_3, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False) # If we drop the last checksig in this output, the tx should succeed. block_4 = self.build_next_block() tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1)) tx2.rehash() self.update_witness_block_with_transactions(block_4, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block_4, accepted=True) # Reset the tip back down for the next test sync_blocks(self.nodes) for x in self.nodes: x.invalidateblock(block_4.hash) # Try replacing the last input of tx2 to be spending the last # output of tx block_5 = self.build_next_block() tx2.vout.pop() tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ] tx2.rehash() self.update_witness_block_with_transactions(block_5, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block_5, accepted=True) # TODO: test p2sh sigop counting def test_getblocktemplate_before_lockin(self): self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)") # Node0 is segwit aware, node2 is not. for node in [self.nodes[0], self.nodes[2]]: gbt_results = node.getblocktemplate() block_version = gbt_results['version'] # If we're not indicating segwit support, we will still be # signalling for segwit activation. assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0]) # If we don't specify the segwit rule, then we won't get a default # commitment. assert('default_witness_commitment' not in gbt_results) # Workaround: # Can either change the tip, or change the mempool and wait 5 seconds # to trigger a recomputation of getblocktemplate. txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16) # Using mocktime lets us avoid sleep() sync_mempools(self.nodes) self.nodes[0].setmocktime(int(time.time())+10) self.nodes[2].setmocktime(int(time.time())+10) for node in [self.nodes[0], self.nodes[2]]: gbt_results = node.getblocktemplate({"rules" : ["segwit"]}) block_version = gbt_results['version'] if node == self.nodes[2]: # If this is a non-segwit node, we should still not get a witness # commitment, nor a version bit signalling segwit. assert_equal(block_version & (1 << VB_WITNESS_BIT), 0) assert('default_witness_commitment' not in gbt_results) else: # For segwit-aware nodes, check the version bit and the witness # commitment are correct. assert(block_version & (1 << VB_WITNESS_BIT) != 0) assert('default_witness_commitment' in gbt_results) witness_commitment = gbt_results['default_witness_commitment'] # Check that default_witness_commitment is present. witness_root = CBlock.get_merkle_root([ser_uint256(0), ser_uint256(txid)]) script = get_witness_script(witness_root, 0) assert_equal(witness_commitment, bytes_to_hex_str(script)) # undo mocktime self.nodes[0].setmocktime(0) self.nodes[2].setmocktime(0) # Uncompressed pubkeys are no longer supported in default relay policy, # but (for now) are still valid in blocks. def test_uncompressed_pubkey(self): self.log.info("Testing uncompressed pubkeys") # Segwit transactions using uncompressed pubkeys are not accepted # under default policy, but should still pass consensus. key = CECKey() key.set_secretbytes(b"9") key.set_compressed(False) pubkey = CPubKey(key.get_pubkey()) assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey assert(len(self.utxo) > 0) utxo = self.utxo.pop(0) # Test 1: P2WPKH # First create a P2WPKH output that uses an uncompressed pubkey pubkeyhash = hash160(pubkey) scriptPKH = CScript([OP_0, pubkeyhash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b"")) tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH)) tx.rehash() # Confirm it in a block. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Now try to spend it. Send it to a P2WSH output, which we'll # use in the next test. witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) witness_hash = sha256(witness_program) scriptWSH = CScript([OP_0, witness_hash]) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH)) script = GetP2PKHScript(pubkeyhash) sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ] tx2.rehash() # Should fail policy test. test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Test 2: P2WSH # Try to spend the P2WSH output created in last test. # Send it to a P2SH(P2WSH) output, which we'll use in the next test. p2sh_witness_hash = hash160(scriptWSH) scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) scriptSig = CScript([scriptWSH]) tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH)) tx3.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) # Should fail policy test. test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx3]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Test 3: P2SH(P2WSH) # Try to spend the P2SH output created in the last test. # Send it to a P2PKH output, which we'll use in the next test. scriptPubKey = GetP2PKHScript(pubkeyhash) tx4 = CTransaction() tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig)) tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey)) tx4.wit.vtxinwit.append(CTxInWitness()) sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) # Should fail policy test. test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx4]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) # Test 4: Uncompressed pubkeys should still be valid in non-segwit # transactions. tx5 = CTransaction() tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b"")) tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE]))) (sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx5.vin[0].scriptSig = CScript([signature, pubkey]) tx5.rehash() # Should pass policy and consensus. test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx5, True, True) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx5]) test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue)) def test_non_standard_witness(self): self.log.info("Testing detection of non-standard P2WSH witness") pad = chr(1).encode('latin-1') # Create scripts for tests scripts = [] scripts.append(CScript([OP_DROP] * 100)) scripts.append(CScript([OP_DROP] * 99)) scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60)) scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61)) p2wsh_scripts = [] assert(len(self.utxo)) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) # For each script, generate a pair of P2WSH and P2SH-P2WSH output. outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2) for i in scripts: p2wsh = CScript([OP_0, sha256(i)]) p2sh = hash160(p2wsh) p2wsh_scripts.append(p2wsh) tx.vout.append(CTxOut(outputvalue, p2wsh)) tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL]))) tx.rehash() txid = tx.sha256 test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Creating transactions for tests p2wsh_txs = [] p2sh_txs = [] for i in range(len(scripts)): p2wsh_tx = CTransaction() p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2))) p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) p2wsh_tx.rehash() p2wsh_txs.append(p2wsh_tx) p2sh_tx = CTransaction() p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]]))) p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) p2sh_tx.wit.vtxinwit.append(CTxInWitness()) p2sh_tx.rehash() p2sh_txs.append(p2sh_tx) # Testing native P2WSH # Witness stack size, excluding witnessScript, over 100 is non-standard p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard') # Non-standard nodes should accept test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[0], True, True) # Stack element size over 80 bytes is non-standard p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard') # Non-standard nodes should accept test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[1], True, True) # Standard nodes should accept if element size is not over 80 bytes p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, True) # witnessScript size at 3600 bytes is standard p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[2], True, True) test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[2], True, True) # witnessScript size at 3601 bytes is non-standard p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard') # Non-standard nodes should accept test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[3], True, True) # Repeating the same tests with P2SH-P2WSH p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard') test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[0], True, True) p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard') test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[1], True, True) p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, True) p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[2], True, True) test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[2], True, True) p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard') test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[3], True, True) self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node # Valid but non-standard transactions in a block should be accepted by standard node sync_blocks(self.nodes) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) self.utxo.pop(0) def run_test(self): # Setup the p2p connections and start up the network thread. # self.test_node sets NODE_WITNESS|NODE_NETWORK self.test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) # self.old_node sets only NODE_NETWORK self.old_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK) # self.std_node is for testing node1 (fRequireStandard=true) self.std_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) network_thread_start() # Keep a place to store utxo's that can be used in later tests self.utxo = [] # Test logic begins here self.test_node.wait_for_verack() self.log.info("Starting tests before segwit lock in:") self.test_witness_services() # Verifies NODE_WITNESS self.test_non_witness_transaction() # non-witness tx's are accepted self.test_unnecessary_witness_before_segwit_activation() self.test_block_relay(segwit_activated=False) # Advance to segwit being 'started' self.advance_to_segwit_started() sync_blocks(self.nodes) self.test_getblocktemplate_before_lockin() sync_blocks(self.nodes) # At lockin, nothing should change. self.log.info("Testing behavior post lockin, pre-activation") self.advance_to_segwit_lockin() # Retest unnecessary witnesses self.test_unnecessary_witness_before_segwit_activation() self.test_witness_tx_relay_before_segwit_activation() self.test_block_relay(segwit_activated=False) self.test_p2sh_witness(segwit_activated=False) self.test_standardness_v0(segwit_activated=False) sync_blocks(self.nodes) # Now activate segwit self.log.info("Testing behavior after segwit activation") self.advance_to_segwit_active() sync_blocks(self.nodes) # Test P2SH witness handling again self.test_p2sh_witness(segwit_activated=True) self.test_witness_commitments() self.test_block_malleability() self.test_witness_block_size() self.test_submit_block() self.test_extra_witness_data() self.test_max_witness_push_length() self.test_max_witness_program_length() self.test_witness_input_length() self.test_block_relay(segwit_activated=True) self.test_tx_relay_after_segwit_activation() self.test_standardness_v0(segwit_activated=True) self.test_segwit_versions() self.test_premature_coinbase_witness_spend() self.test_uncompressed_pubkey() self.test_signature_version_1() # Viacoin: Disable test due to occasional travis issue #self.test_non_standard_witness() sync_blocks(self.nodes) self.test_upgrade_after_activation(node_id=2) self.test_witness_sigops() if __name__ == '__main__': SegWitTest().main()
mit
mihail911/nupic
nupic/regions/PictureSensorExplorers/block1DOF.py
8
2863
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ This file defines Block1DOFPictureExplorer, an explorer for PictureSensor. """ from nupic.regions.PictureSensor import PictureSensor class Block1DOFPictureExplorer(PictureSensor.PictureExplorer): """ Presents each category at an Nx1 "block" of shifted positions centered upon the centroid of the canvas, where N is 2R+1 (where R is the radialLength); each such presentation is spaced radialStep pixels apart in both X and Y dimensions. """ @classmethod def queryRelevantParams(klass): """ Returns a sequence of parameter names that are relevant to the operation of the explorer. May be extended or overridden by sub-classes as appropriate. """ return ( 'radialLength', 'radialStep', ) def initSequence(self, state, params): self._presentNextBlockPosn(state, params) def updateSequence(self, state, params): self._presentNextBlockPosn(state, params) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Internal helper method(s) def _presentNextBlockPosn(self, state, params): """ Compute the appropriate category and block position deterministically based on the current iteration count. """ # Compute iteration indices edgeLen = 2 * params['radialLength'] + 1 numBlocksPerCat = edgeLen numCats = self._getNumCategories() numBlocks = numBlocksPerCat * numCats blockCounter = self._getIterCount() % numBlocks catIndex = blockCounter // numBlocksPerCat blockCatIndex = blockCounter % numBlocksPerCat # Compute position within onion block posnX = ((blockCatIndex % edgeLen) - params['radialLength']) * params['radialStep'] # Override default state state['posnX'] = posnX state['posnY'] = 0 state['velocityX'] = 0 state['velocityY'] = 0 state['angularPosn'] = 0 state['angularVelocity'] = 0 state['catIndex'] = catIndex
gpl-3.0
SlimRemix/android_external_chromium_org
tools/telemetry/telemetry/core/browser_options.py
26
12934
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import logging import optparse import os import shlex import sys from telemetry.core import browser_finder from telemetry.core import profile_types from telemetry.core import util from telemetry.core import wpr_modes from telemetry.core.platform.profiler import profiler_finder util.AddDirToPythonPath( util.GetChromiumSrcDir(), 'third_party', 'webpagereplay') import net_configs # pylint: disable=F0401 class BrowserFinderOptions(optparse.Values): """Options to be used for discovering a browser.""" def __init__(self, browser_type=None): optparse.Values.__init__(self) self.browser_type = browser_type self.browser_executable = None self.chrome_root = None self.android_device = None self.cros_ssh_identity = None self.extensions_to_load = [] # If set, copy the generated profile to this path on exit. self.output_profile_path = None self.cros_remote = None self.profiler = None self.verbosity = 0 self.browser_options = BrowserOptions() self.output_file = None self.android_rndis = False self.no_performance_mode = False def __repr__(self): return str(sorted(self.__dict__.items())) def Copy(self): return copy.deepcopy(self) def CreateParser(self, *args, **kwargs): parser = optparse.OptionParser(*args, **kwargs) # Selection group group = optparse.OptionGroup(parser, 'Which browser to use') group.add_option('--browser', dest='browser_type', default=None, help='Browser type to run, ' 'in order of priority. Supported values: list,%s' % ','.join(browser_finder.FindAllBrowserTypes(self))) group.add_option('--browser-executable', dest='browser_executable', help='The exact browser to run.') group.add_option('--chrome-root', dest='chrome_root', help='Where to look for chrome builds.' 'Defaults to searching parent dirs by default.') group.add_option('--device', dest='android_device', help='The android device ID to use' 'If not specified, only 0 or 1 connected devices are supported.') group.add_option('--target-arch', dest='target_arch', help='The target architecture of the browser. Options available are: ' 'x64, x86_64, arm, arm64 and mips. ' 'Defaults to the default architecture of the platform if omitted.') group.add_option( '--remote', dest='cros_remote', help='The IP address of a remote ChromeOS device to use.') identity = None testing_rsa = os.path.join( util.GetChromiumSrcDir(), 'third_party', 'chromite', 'ssh_keys', 'testing_rsa') if os.path.exists(testing_rsa): identity = testing_rsa group.add_option('--identity', dest='cros_ssh_identity', default=identity, help='The identity file to use when ssh\'ing into the ChromeOS device') parser.add_option_group(group) # Debugging options group = optparse.OptionGroup(parser, 'When things go wrong') profiler_choices = profiler_finder.GetAllAvailableProfilers() group.add_option( '--profiler', default=None, type='choice', choices=profiler_choices, help='Record profiling data using this tool. Supported values: ' + ', '.join(profiler_choices)) group.add_option( '--interactive', dest='interactive', action='store_true', help='Let the user interact with the page; the actions specified for ' 'the page are not run.') group.add_option( '-v', '--verbose', action='count', dest='verbosity', help='Increase verbosity level (repeat as needed)') group.add_option('--print-bootstrap-deps', action='store_true', help='Output bootstrap deps list.') parser.add_option_group(group) # Platform options group = optparse.OptionGroup(parser, 'Platform options') group.add_option('--no-performance-mode', action='store_true', help='Some platforms run on "full performance mode" where the ' 'test is executed at maximum CPU speed in order to minimize noise ' '(specially important for dashboards / continuous builds). ' 'This option prevents Telemetry from tweaking such platform settings.') group.add_option('--android-rndis', dest='android_rndis', default=False, action='store_true', help='Use RNDIS forwarding on Android.') group.add_option('--no-android-rndis', dest='android_rndis', action='store_false', help='Do not use RNDIS forwarding on Android.' ' [default]') parser.add_option_group(group) # Browser options. self.browser_options.AddCommandLineArgs(parser) real_parse = parser.parse_args def ParseArgs(args=None): defaults = parser.get_default_values() for k, v in defaults.__dict__.items(): if k in self.__dict__ and self.__dict__[k] != None: continue self.__dict__[k] = v ret = real_parse(args, self) # pylint: disable=E1121 if self.verbosity >= 2: logging.getLogger().setLevel(logging.DEBUG) elif self.verbosity: logging.getLogger().setLevel(logging.INFO) else: logging.getLogger().setLevel(logging.WARNING) if self.browser_executable and not self.browser_type: self.browser_type = 'exact' if self.browser_type == 'list': try: types = browser_finder.GetAllAvailableBrowserTypes(self) except browser_finder.BrowserFinderException, ex: sys.stderr.write('ERROR: ' + str(ex)) sys.exit(1) sys.stdout.write('Available browsers:\n') sys.stdout.write(' %s\n' % '\n '.join(types)) sys.exit(0) # Parse browser options. self.browser_options.UpdateFromParseResults(self) return ret parser.parse_args = ParseArgs return parser def AppendExtraBrowserArgs(self, args): self.browser_options.AppendExtraBrowserArgs(args) def MergeDefaultValues(self, defaults): for k, v in defaults.__dict__.items(): self.ensure_value(k, v) class BrowserOptions(object): """Options to be used for launching a browser.""" def __init__(self): self.browser_type = None self.show_stdout = False # When set to True, the browser will use the default profile. Telemetry # will not provide an alternate profile directory. self.dont_override_profile = False self.profile_dir = None self.profile_type = None self._extra_browser_args = set() self.extra_wpr_args = [] self.wpr_mode = wpr_modes.WPR_OFF self.netsim = None self.disable_background_networking = True self.no_proxy_server = False self.browser_user_agent_type = None self.clear_sytem_cache_for_browser_and_profile_on_start = False self.startup_url = 'about:blank' # Background pages of built-in component extensions can interfere with # performance measurements. self.disable_component_extensions_with_background_pages = True # Whether to use the new code path for choosing an ephemeral port for # DevTools. The bots set this to true. When Chrome 37 reaches stable, # remove this setting and the old code path. http://crbug.com/379980 self.use_devtools_active_port = False def __repr__(self): return str(sorted(self.__dict__.items())) @classmethod def AddCommandLineArgs(cls, parser): ############################################################################ # Please do not add any more options here without first discussing with # # a telemetry owner. This is not the right place for platform-specific # # options. # ############################################################################ group = optparse.OptionGroup(parser, 'Browser options') profile_choices = profile_types.GetProfileTypes() group.add_option('--profile-type', dest='profile_type', type='choice', default='clean', choices=profile_choices, help=('The user profile to use. A clean profile is used by default. ' 'Supported values: ' + ', '.join(profile_choices))) group.add_option('--profile-dir', dest='profile_dir', help='Profile directory to launch the browser with. ' 'A clean profile is used by default') group.add_option('--extra-browser-args', dest='extra_browser_args_as_string', help='Additional arguments to pass to the browser when it starts') group.add_option('--extra-wpr-args', dest='extra_wpr_args_as_string', help=('Additional arguments to pass to Web Page Replay. ' 'See third_party/webpagereplay/replay.py for usage.')) group.add_option('--netsim', default=None, type='choice', choices=net_configs.NET_CONFIG_NAMES, help=('Run benchmark under simulated network conditions. ' 'Will prompt for sudo. Supported values: ' + ', '.join(net_configs.NET_CONFIG_NAMES))) group.add_option('--show-stdout', action='store_true', help='When possible, will display the stdout of the process') # This hidden option is to be removed, and the older code path deleted, # once Chrome 37 reaches Stable. http://crbug.com/379980 group.add_option('--use-devtools-active-port', action='store_true', help=optparse.SUPPRESS_HELP) parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Compatibility options') group.add_option('--gtest_output', help='Ignored argument for compatibility with runtest.py harness') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Synthetic gesture options') synthetic_gesture_source_type_choices = [ 'default', 'mouse', 'touch' ] group.add_option('--synthetic-gesture-source-type', dest='synthetic_gesture_source_type', default='default', type='choice', choices=synthetic_gesture_source_type_choices, help='Specify the source type for synthetic gestures. Note that some ' + 'actions only support a specific source type. ' + 'Supported values: ' + ', '.join(synthetic_gesture_source_type_choices)) parser.add_option_group(group) def UpdateFromParseResults(self, finder_options): """Copies our options from finder_options""" browser_options_list = [ 'extra_browser_args_as_string', 'extra_wpr_args_as_string', 'netsim', 'profile_dir', 'profile_type', 'show_stdout', 'synthetic_gesture_source_type', 'use_devtools_active_port', ] for o in browser_options_list: a = getattr(finder_options, o, None) if a is not None: setattr(self, o, a) delattr(finder_options, o) self.browser_type = finder_options.browser_type if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101 tmp = shlex.split( self.extra_browser_args_as_string) # pylint: disable=E1101 self.AppendExtraBrowserArgs(tmp) delattr(self, 'extra_browser_args_as_string') if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101 tmp = shlex.split( self.extra_wpr_args_as_string) # pylint: disable=E1101 self.extra_wpr_args.extend(tmp) delattr(self, 'extra_wpr_args_as_string') if self.profile_type == 'default': self.dont_override_profile = True if self.profile_dir and self.profile_type != 'clean': logging.critical( "It's illegal to specify both --profile-type and --profile-dir.\n" "For more information see: http://goo.gl/ngdGD5") sys.exit(1) if self.profile_dir and not os.path.isdir(self.profile_dir): logging.critical( "Directory specified by --profile-dir (%s) doesn't exist " "or isn't a directory.\n" "For more information see: http://goo.gl/ngdGD5" % self.profile_dir) sys.exit(1) if not self.profile_dir: self.profile_dir = profile_types.GetProfileDir(self.profile_type) # This deferred import is necessary because browser_options is imported in # telemetry/telemetry/__init__.py. from telemetry.core.backends.chrome import chrome_browser_options finder_options.browser_options = ( chrome_browser_options.CreateChromeBrowserOptions(self)) @property def extra_browser_args(self): return self._extra_browser_args def AppendExtraBrowserArgs(self, args): if isinstance(args, list): self._extra_browser_args.update(args) else: self._extra_browser_args.add(args)
bsd-3-clause
stack-of-tasks/rbdlpy
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/matrix_palette.py
9
1708
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GL import _types as _cs # End users want this... from OpenGL.raw.GL._types import * from OpenGL.raw.GL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GL_ARB_matrix_palette' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_matrix_palette',error_checker=_errors._error_checker) GL_CURRENT_MATRIX_INDEX_ARB=_C('GL_CURRENT_MATRIX_INDEX_ARB',0x8845) GL_CURRENT_PALETTE_MATRIX_ARB=_C('GL_CURRENT_PALETTE_MATRIX_ARB',0x8843) GL_MATRIX_INDEX_ARRAY_ARB=_C('GL_MATRIX_INDEX_ARRAY_ARB',0x8844) GL_MATRIX_INDEX_ARRAY_POINTER_ARB=_C('GL_MATRIX_INDEX_ARRAY_POINTER_ARB',0x8849) GL_MATRIX_INDEX_ARRAY_SIZE_ARB=_C('GL_MATRIX_INDEX_ARRAY_SIZE_ARB',0x8846) GL_MATRIX_INDEX_ARRAY_STRIDE_ARB=_C('GL_MATRIX_INDEX_ARRAY_STRIDE_ARB',0x8848) GL_MATRIX_INDEX_ARRAY_TYPE_ARB=_C('GL_MATRIX_INDEX_ARRAY_TYPE_ARB',0x8847) GL_MATRIX_PALETTE_ARB=_C('GL_MATRIX_PALETTE_ARB',0x8840) GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB=_C('GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB',0x8841) GL_MAX_PALETTE_MATRICES_ARB=_C('GL_MAX_PALETTE_MATRICES_ARB',0x8842) @_f @_p.types(None,_cs.GLint) def glCurrentPaletteMatrixARB(index):pass @_f @_p.types(None,_cs.GLint,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p) def glMatrixIndexPointerARB(size,type,stride,pointer):pass @_f @_p.types(None,_cs.GLint,arrays.GLubyteArray) def glMatrixIndexubvARB(size,indices):pass @_f @_p.types(None,_cs.GLint,arrays.GLuintArray) def glMatrixIndexuivARB(size,indices):pass @_f @_p.types(None,_cs.GLint,arrays.GLushortArray) def glMatrixIndexusvARB(size,indices):pass
lgpl-3.0
j-be/wien-geodatenviewer-exporter
convert_coordinates.py
1
1134
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- from get_size import getSize import os # Fetch parameters from environment start_major = int(os.environ['__VJ_START_MAJOR__']) start_minor = int(os.environ['__VJ_START_MINOR__']) # Specific do internet data LINE_SHIFT=10 def getNext((major, minor) = (None, None), n = 0): if (major is None): return (start_major, start_minor), 0 # End of Line if not (n < getSize()[0] - 1): # Next line is low Minors if minor > 2: if (start_minor < 3): return (major + LINE_SHIFT - n / 2, start_minor), 0 else: return (major + LINE_SHIFT - n / 2, (start_minor % 3) + 1), 0 # Next line is high Minors else: if (start_minor < 3): return (major - n/2, start_minor + 2), 0 else: return (major - n/2, start_minor), 0 # Normal case n += 1 # Odd Minors if (minor % 2 == 1): return (major, minor + 1), n # Even Minors if (minor % 2 == 0): return (major + 1, minor - 1), n if __name__ == "__main__": size=getSize() x, n = getNext() for i in range(size[0] * size[1]): if (n == 0): print print str(x[0]) + "_" + str(x[1]), x, n = getNext(x, n)
mit
wakatime/komodo-wakatime
components/wakatime/packages/pygments/lexers/dotnet.py
7
27664
# -*- coding: utf-8 -*- """ pygments.lexers.dotnet ~~~~~~~~~~~~~~~~~~~~~~ Lexers for .net languages. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \ using, this, default, words from pygments.token import Punctuation, \ Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other from pygments.util import get_choice_opt, iteritems from pygments import unistring as uni from pygments.lexers.html import XmlLexer __all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer', 'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer'] class CSharpLexer(RegexLexer): """ For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_ source code. Additional options accepted: `unicodelevel` Determines which Unicode characters this lexer allows for identifiers. The possible values are: * ``none`` -- only the ASCII letters and numbers are allowed. This is the fastest selection. * ``basic`` -- all Unicode characters from the specification except category ``Lo`` are allowed. * ``full`` -- all Unicode characters as specified in the C# specs are allowed. Note that this means a considerable slowdown since the ``Lo`` category has more than 40,000 characters in it! The default value is ``basic``. .. versionadded:: 0.8 """ name = 'C#' aliases = ['csharp', 'c#'] filenames = ['*.cs'] mimetypes = ['text/x-csharp'] # inferred flags = re.MULTILINE | re.DOTALL | re.UNICODE # for the range of allowed unicode characters in identifiers, see # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf levels = { 'none': '@?[_a-zA-Z]\w*', 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), 'full': ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), } tokens = {} token_variants = True for levelname, cs_ident in iteritems(levels): tokens[levelname] = { 'root': [ # method names (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type r'(' + cs_ident + ')' # method name r'(\s*)(\()', # signature start bygroups(using(this), Name.Function, Text, Punctuation)), (r'^\s*\[.*?\]', Name.Attribute), (r'[^\S\n]+', Text), (r'\\\n', Text), # line continuation (r'//.*?\n', Comment.Single), (r'/[*].*?[*]/', Comment.Multiline), (r'\n', Text), (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation), (r'[{}]', Punctuation), (r'@"(""|[^"])*"', String), (r'"(\\\\|\\"|[^"\n])*["\n]', String), (r"'\\.'|'[^\\]'", String.Char), (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?" r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number), (r'#[ \t]*(if|endif|else|elif|define|undef|' r'line|error|warning|region|endregion|pragma)\b.*?\n', Comment.Preproc), (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text, Keyword)), (r'(abstract|as|async|await|base|break|by|case|catch|' r'checked|const|continue|default|delegate|' r'do|else|enum|event|explicit|extern|false|finally|' r'fixed|for|foreach|goto|if|implicit|in|interface|' r'internal|is|let|lock|new|null|on|operator|' r'out|override|params|private|protected|public|readonly|' r'ref|return|sealed|sizeof|stackalloc|static|' r'switch|this|throw|true|try|typeof|' r'unchecked|unsafe|virtual|void|while|' r'get|set|new|partial|yield|add|remove|value|alias|ascending|' r'descending|from|group|into|orderby|select|thenby|where|' r'join|equals)\b', Keyword), (r'(global)(::)', bygroups(Keyword, Punctuation)), (r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|' r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type), (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'), (r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'), (cs_ident, Name), ], 'class': [ (cs_ident, Name.Class, '#pop'), default('#pop'), ], 'namespace': [ (r'(?=\()', Text, '#pop'), # using (resource) ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'), ] } def __init__(self, **options): level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic') if level not in self._all_tokens: # compile the regexes now self._tokens = self.__class__.process_tokendef(level) else: self._tokens = self._all_tokens[level] RegexLexer.__init__(self, **options) class NemerleLexer(RegexLexer): """ For `Nemerle <http://nemerle.org>`_ source code. Additional options accepted: `unicodelevel` Determines which Unicode characters this lexer allows for identifiers. The possible values are: * ``none`` -- only the ASCII letters and numbers are allowed. This is the fastest selection. * ``basic`` -- all Unicode characters from the specification except category ``Lo`` are allowed. * ``full`` -- all Unicode characters as specified in the C# specs are allowed. Note that this means a considerable slowdown since the ``Lo`` category has more than 40,000 characters in it! The default value is ``basic``. .. versionadded:: 1.5 """ name = 'Nemerle' aliases = ['nemerle'] filenames = ['*.n'] mimetypes = ['text/x-nemerle'] # inferred flags = re.MULTILINE | re.DOTALL | re.UNICODE # for the range of allowed unicode characters in identifiers, see # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf levels = { 'none': '@?[_a-zA-Z]\w*', 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), 'full': ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), } tokens = {} token_variants = True for levelname, cs_ident in iteritems(levels): tokens[levelname] = { 'root': [ # method names (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type r'(' + cs_ident + ')' # method name r'(\s*)(\()', # signature start bygroups(using(this), Name.Function, Text, Punctuation)), (r'^\s*\[.*?\]', Name.Attribute), (r'[^\S\n]+', Text), (r'\\\n', Text), # line continuation (r'//.*?\n', Comment.Single), (r'/[*].*?[*]/', Comment.Multiline), (r'\n', Text), (r'\$\s*"', String, 'splice-string'), (r'\$\s*<#', String, 'splice-string2'), (r'<#', String, 'recursive-string'), (r'(<\[)\s*(' + cs_ident + ':)?', Keyword), (r'\]\>', Keyword), # quasiquotation only (r'\$' + cs_ident, Name), (r'(\$)(\()', bygroups(Name, Punctuation), 'splice-string-content'), (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation), (r'[{}]', Punctuation), (r'@"(""|[^"])*"', String), (r'"(\\\\|\\"|[^"\n])*["\n]', String), (r"'\\.'|'[^\\]'", String.Char), (r"0[xX][0-9a-fA-F]+[Ll]?", Number), (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number), (r'#[ \t]*(if|endif|else|elif|define|undef|' r'line|error|warning|region|endregion|pragma)\b.*?\n', Comment.Preproc), (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text, Keyword)), (r'(abstract|and|as|base|catch|def|delegate|' r'enum|event|extern|false|finally|' r'fun|implements|interface|internal|' r'is|macro|match|matches|module|mutable|new|' r'null|out|override|params|partial|private|' r'protected|public|ref|sealed|static|' r'syntax|this|throw|true|try|type|typeof|' r'virtual|volatile|when|where|with|' r'assert|assert2|async|break|checked|continue|do|else|' r'ensures|for|foreach|if|late|lock|new|nolate|' r'otherwise|regexp|repeat|requires|return|surroundwith|' r'unchecked|unless|using|while|yield)\b', Keyword), (r'(global)(::)', bygroups(Keyword, Punctuation)), (r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|' r'short|string|uint|ulong|ushort|void|array|list)\b\??', Keyword.Type), (r'(:>?)\s*(' + cs_ident + r'\??)', bygroups(Punctuation, Keyword.Type)), (r'(class|struct|variant|module)(\s+)', bygroups(Keyword, Text), 'class'), (r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'), (cs_ident, Name), ], 'class': [ (cs_ident, Name.Class, '#pop') ], 'namespace': [ (r'(?=\()', Text, '#pop'), # using (resource) ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop') ], 'splice-string': [ (r'[^"$]', String), (r'\$' + cs_ident, Name), (r'(\$)(\()', bygroups(Name, Punctuation), 'splice-string-content'), (r'\\"', String), (r'"', String, '#pop') ], 'splice-string2': [ (r'[^#<>$]', String), (r'\$' + cs_ident, Name), (r'(\$)(\()', bygroups(Name, Punctuation), 'splice-string-content'), (r'<#', String, '#push'), (r'#>', String, '#pop') ], 'recursive-string': [ (r'[^#<>]', String), (r'<#', String, '#push'), (r'#>', String, '#pop') ], 'splice-string-content': [ (r'if|match', Keyword), (r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation), (cs_ident, Name), (r'\d+', Number), (r'\(', Punctuation, '#push'), (r'\)', Punctuation, '#pop') ] } def __init__(self, **options): level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic') if level not in self._all_tokens: # compile the regexes now self._tokens = self.__class__.process_tokendef(level) else: self._tokens = self._all_tokens[level] RegexLexer.__init__(self, **options) class BooLexer(RegexLexer): """ For `Boo <http://boo.codehaus.org/>`_ source code. """ name = 'Boo' aliases = ['boo'] filenames = ['*.boo'] mimetypes = ['text/x-boo'] tokens = { 'root': [ (r'\s+', Text), (r'(#|//).*$', Comment.Single), (r'/[*]', Comment.Multiline, 'comment'), (r'[]{}:(),.;[]', Punctuation), (r'\\\n', Text), (r'\\', Text), (r'(in|is|and|or|not)\b', Operator.Word), (r'/(\\\\|\\/|[^/\s])/', String.Regex), (r'@/(\\\\|\\/|[^/])*/', String.Regex), (r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator), (r'(as|abstract|callable|constructor|destructor|do|import|' r'enum|event|final|get|interface|internal|of|override|' r'partial|private|protected|public|return|set|static|' r'struct|transient|virtual|yield|super|and|break|cast|' r'continue|elif|else|ensure|except|for|given|goto|if|in|' r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|' r'while|from|as)\b', Keyword), (r'def(?=\s+\(.*?\))', Keyword), (r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'), (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), (r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'), (r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|' r'assert|checked|enumerate|filter|getter|len|lock|map|' r'matrix|max|min|normalArrayIndexing|print|property|range|' r'rawArrayIndexing|required|typeof|unchecked|using|' r'yieldAll|zip)\b', Name.Builtin), (r'"""(\\\\|\\"|.*?)"""', String.Double), (r'"(\\\\|\\"|[^"]*?)"', String.Double), (r"'(\\\\|\\'|[^']*?)'", String.Single), (r'[a-zA-Z_]\w*', Name), (r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float), (r'[0-9][0-9.]*(ms?|d|h|s)', Number), (r'0\d+', Number.Oct), (r'0x[a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer), ], 'comment': [ ('/[*]', Comment.Multiline, '#push'), ('[*]/', Comment.Multiline, '#pop'), ('[^/*]', Comment.Multiline), ('[*/]', Comment.Multiline) ], 'funcname': [ ('[a-zA-Z_]\w*', Name.Function, '#pop') ], 'classname': [ ('[a-zA-Z_]\w*', Name.Class, '#pop') ], 'namespace': [ ('[a-zA-Z_][\w.]*', Name.Namespace, '#pop') ] } class VbNetLexer(RegexLexer): """ For `Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_ source code. """ name = 'VB.net' aliases = ['vb.net', 'vbnet'] filenames = ['*.vb', '*.bas'] mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?) uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \ '[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*' flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'^\s*<.*?>', Name.Attribute), (r'\s+', Text), (r'\n', Text), (r'rem\b.*?\n', Comment), (r"'.*?\n", Comment), (r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|' r'#ExternalSource.*?\n|#End\s+ExternalSource|' r'#Region.*?\n|#End\s+Region|#ExternalChecksum', Comment.Preproc), (r'[(){}!#,.:]', Punctuation), (r'Option\s+(Strict|Explicit|Compare)\s+' r'(On|Off|Binary|Text)', Keyword.Declaration), (words(( 'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case', 'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl', 'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng', 'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare', 'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else', 'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False', 'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo', 'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let', 'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase', 'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing', 'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator', 'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides', 'ParamArray', 'Partial', 'Private', 'Protected', 'Public', 'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume', 'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single', 'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To', 'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While', 'Widening', 'With', 'WithEvents', 'WriteOnly'), prefix='(?<!\.)', suffix=r'\b'), Keyword), (r'(?<!\.)End\b', Keyword, 'end'), (r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'), (r'(?<!\.)(Function|Sub|Property)(\s+)', bygroups(Keyword, Text), 'funcname'), (r'(?<!\.)(Class|Structure|Enum)(\s+)', bygroups(Keyword, Text), 'classname'), (r'(?<!\.)(Module|Namespace|Imports)(\s+)', bygroups(Keyword, Text), 'namespace'), (r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|' r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|' r'UShort)\b', Keyword.Type), (r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|' r'Or|OrElse|TypeOf|Xor)\b', Operator.Word), (r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|' r'<=|>=|<>|[-&*/\\^+=<>\[\]]', Operator), ('"', String, 'string'), (r'_\n', Text), # Line continuation (must be before Name) (uni_name + '[%&@!#$]?', Name), ('#.*?#', Literal.Date), (r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float), (r'\d+([SILDFR]|US|UI|UL)?', Number.Integer), (r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer), (r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer), ], 'string': [ (r'""', String), (r'"C?', String, '#pop'), (r'[^"]+', String), ], 'dim': [ (uni_name, Name.Variable, '#pop'), default('#pop'), # any other syntax ], 'funcname': [ (uni_name, Name.Function, '#pop'), ], 'classname': [ (uni_name, Name.Class, '#pop'), ], 'namespace': [ (uni_name, Name.Namespace), (r'\.', Name.Namespace), default('#pop'), ], 'end': [ (r'\s+', Text), (r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b', Keyword, '#pop'), default('#pop'), ] } def analyse_text(text): if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE): return 0.5 class GenericAspxLexer(RegexLexer): """ Lexer for ASP.NET pages. """ name = 'aspx-gen' filenames = [] mimetypes = [] flags = re.DOTALL tokens = { 'root': [ (r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)), (r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer), Other, using(XmlLexer))), (r'(.+?)(?=<)', using(XmlLexer)), (r'.+', using(XmlLexer)), ], } # TODO support multiple languages within the same source file class CSharpAspxLexer(DelegatingLexer): """ Lexer for highlighting C# within ASP.NET pages. """ name = 'aspx-cs' aliases = ['aspx-cs'] filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'] mimetypes = [] def __init__(self, **options): super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer, **options) def analyse_text(text): if re.search(r'Page\s*Language="C#"', text, re.I) is not None: return 0.2 elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None: return 0.15 class VbNetAspxLexer(DelegatingLexer): """ Lexer for highlighting Visual Basic.net within ASP.NET pages. """ name = 'aspx-vb' aliases = ['aspx-vb'] filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'] mimetypes = [] def __init__(self, **options): super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer, **options) def analyse_text(text): if re.search(r'Page\s*Language="Vb"', text, re.I) is not None: return 0.2 elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None: return 0.15 # Very close to functional.OcamlLexer class FSharpLexer(RegexLexer): """ For the F# language (version 3.0). AAAAACK Strings http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775 .. versionadded:: 1.5 """ name = 'F#' aliases = ['fsharp'] filenames = ['*.fs', '*.fsi'] mimetypes = ['text/x-fsharp'] keywords = [ 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default', 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else', 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function', 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal', 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable', 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public', 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to', 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when', 'while', 'with', 'yield!', 'yield', ] # Reserved words; cannot hurt to color them as keywords too. keywords += [ 'atomic', 'break', 'checked', 'component', 'const', 'constraint', 'constructor', 'continue', 'eager', 'event', 'external', 'fixed', 'functor', 'include', 'method', 'mixin', 'object', 'parallel', 'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait', 'virtual', 'volatile', ] keyopts = [ '!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.', '->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-', '<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]', '_', '`', '\{', '\|\]', '\|', '\}', '~', '<@@', '<@', '=', '@>', '@@>', ] operators = r'[!$%&*+\./:<=>?@^|~-]' word_operators = ['and', 'or', 'not'] prefix_syms = r'[!?~]' infix_syms = r'[=<>@^|&+\*/$%-]' primitives = [ 'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single', 'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string', 'list', 'exn', 'obj', 'enum', ] # See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or # http://fsharp.org/about/files/spec.pdf for reference. Good luck. tokens = { 'escape-sequence': [ (r'\\[\\"\'ntbrafv]', String.Escape), (r'\\[0-9]{3}', String.Escape), (r'\\u[0-9a-fA-F]{4}', String.Escape), (r'\\U[0-9a-fA-F]{8}', String.Escape), ], 'root': [ (r'\s+', Text), (r'\(\)|\[\]', Name.Builtin.Pseudo), (r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), (r'\b([A-Z][\w\']*)', Name), (r'///.*?\n', String.Doc), (r'//.*?\n', Comment.Single), (r'\(\*(?!\))', Comment, 'comment'), (r'@"', String, 'lstring'), (r'"""', String, 'tqs'), (r'"', String, 'string'), (r'\b(open|module)(\s+)([\w.]+)', bygroups(Keyword, Text, Name.Namespace)), (r'\b(let!?)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)), (r'\b(type)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class)), (r'\b(member|override)(\s+)(\w+)(\.)(\w+)', bygroups(Keyword, Text, Name, Punctuation, Name.Function)), (r'\b(%s)\b' % '|'.join(keywords), Keyword), (r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name), (r'(%s)' % '|'.join(keyopts), Operator), (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), (r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n', Comment.Preproc), (r"[^\W\d][\w']*", Name), (r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer), (r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex), (r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct), (r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin), (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?', Number.Float), (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?", String.Char), (r"'.'", String.Char), (r"'", Keyword), # a stray quote is another syntax element (r'@?"', String.Double, 'string'), (r'[~?][a-z][\w\']*:', Name.Variable), ], 'dotted': [ (r'\s+', Text), (r'\.', Punctuation), (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), (r'[A-Z][\w\']*', Name, '#pop'), (r'[a-z_][\w\']*', Name, '#pop'), # e.g. dictionary index access default('#pop'), ], 'comment': [ (r'[^(*)@"]+', Comment), (r'\(\*', Comment, '#push'), (r'\*\)', Comment, '#pop'), # comments cannot be closed within strings in comments (r'@"', String, 'lstring'), (r'"""', String, 'tqs'), (r'"', String, 'string'), (r'[(*)@]', Comment), ], 'string': [ (r'[^\\"]+', String), include('escape-sequence'), (r'\\\n', String), (r'\n', String), # newlines are allowed in any string (r'"B?', String, '#pop'), ], 'lstring': [ (r'[^"]+', String), (r'\n', String), (r'""', String), (r'"B?', String, '#pop'), ], 'tqs': [ (r'[^"]+', String), (r'\n', String), (r'"""B?', String, '#pop'), (r'"', String), ], }
bsd-3-clause
ludmilamarian/invenio
invenio/modules/upgrader/upgrades/invenio_2013_03_18_aidPERSONIDDATA_last_updated.py
15
1685
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2013 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. import warnings from invenio.legacy.dbquery import run_sql from invenio.utils.text import wait_for_user depends_on = ['invenio_release_1_1_0'] def info(): return "Introduces aidPERSONIDDATA last_updated column and new table indexes" def do_upgrade(): column_exists = run_sql("SHOW COLUMNS FROM `aidPERSONIDDATA` LIKE 'last_updated'") if not column_exists: run_sql(""" ALTER TABLE aidPERSONIDDATA ADD COLUMN last_updated TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP AFTER opt3, ADD INDEX `timestamp-b` (`last_updated`) """) indexes = [i[2] for i in run_sql('SHOW INDEX FROM aidPERSONIDPAPERS')] if 'personid-flag-b' not in indexes: run_sql(""" ALTER TABLE aidPERSONIDPAPERS ADD INDEX `personid-flag-b` (`personid`, `flag`) """) def estimate(): return 1
gpl-2.0
John-Boik/Principled-Societies-Project
leddaApp/static/brython/src/Lib/test/test_shlex.py
113
5912
import io import shlex import string import unittest from test import support # The original test data set was from shellwords, by Hartmut Goebel. data = r"""x|x| foo bar|foo|bar| foo bar|foo|bar| foo bar |foo|bar| foo bar bla fasel|foo|bar|bla|fasel| x y z xxxx|x|y|z|xxxx| \x bar|\|x|bar| \ x bar|\|x|bar| \ bar|\|bar| foo \x bar|foo|\|x|bar| foo \ x bar|foo|\|x|bar| foo \ bar|foo|\|bar| foo "bar" bla|foo|"bar"|bla| "foo" "bar" "bla"|"foo"|"bar"|"bla"| "foo" bar "bla"|"foo"|bar|"bla"| "foo" bar bla|"foo"|bar|bla| foo 'bar' bla|foo|'bar'|bla| 'foo' 'bar' 'bla'|'foo'|'bar'|'bla'| 'foo' bar 'bla'|'foo'|bar|'bla'| 'foo' bar bla|'foo'|bar|bla| blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz| blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz| ""|""| ''|''| foo "" bar|foo|""|bar| foo '' bar|foo|''|bar| foo "" "" "" bar|foo|""|""|""|bar| foo '' '' '' bar|foo|''|''|''|bar| \""|\|""| "\"|"\"| "foo\ bar"|"foo\ bar"| "foo\\ bar"|"foo\\ bar"| "foo\\ bar\"|"foo\\ bar\"| "foo\\" bar\""|"foo\\"|bar|\|""| "foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"| "foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"| "foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"| "foo\x bar\" dfadf"|"foo\x bar\"|dfadf"| \''|\|''| 'foo\ bar'|'foo\ bar'| 'foo\\ bar'|'foo\\ bar'| "foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'| \"foo"|\|"foo"| \"foo"\x|\|"foo"|\|x| "foo\x"|"foo\x"| "foo\ "|"foo\ "| foo\ xx|foo|\|xx| foo\ x\x|foo|\|x|\|x| foo\ x\x\""|foo|\|x|\|x|\|""| "foo\ x\x"|"foo\ x\x"| "foo\ x\x\\"|"foo\ x\x\\"| "foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"| "foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"| "foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"| "foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'| 'foo\ bar'|'foo\ bar'| 'foo\\ bar'|'foo\\ bar'| foo\ bar|foo|\|bar| foo#bar\nbaz|foobaz| :-) ;-)|:|-|)|;|-|)| áéíóú|á|é|í|ó|ú| """ posix_data = r"""x|x| foo bar|foo|bar| foo bar|foo|bar| foo bar |foo|bar| foo bar bla fasel|foo|bar|bla|fasel| x y z xxxx|x|y|z|xxxx| \x bar|x|bar| \ x bar| x|bar| \ bar| bar| foo \x bar|foo|x|bar| foo \ x bar|foo| x|bar| foo \ bar|foo| bar| foo "bar" bla|foo|bar|bla| "foo" "bar" "bla"|foo|bar|bla| "foo" bar "bla"|foo|bar|bla| "foo" bar bla|foo|bar|bla| foo 'bar' bla|foo|bar|bla| 'foo' 'bar' 'bla'|foo|bar|bla| 'foo' bar 'bla'|foo|bar|bla| 'foo' bar bla|foo|bar|bla| blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz| blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz| ""|| ''|| foo "" bar|foo||bar| foo '' bar|foo||bar| foo "" "" "" bar|foo||||bar| foo '' '' '' bar|foo||||bar| \"|"| "\""|"| "foo\ bar"|foo\ bar| "foo\\ bar"|foo\ bar| "foo\\ bar\""|foo\ bar"| "foo\\" bar\"|foo\|bar"| "foo\\ bar\" dfadf"|foo\ bar" dfadf| "foo\\\ bar\" dfadf"|foo\\ bar" dfadf| "foo\\\x bar\" dfadf"|foo\\x bar" dfadf| "foo\x bar\" dfadf"|foo\x bar" dfadf| \'|'| 'foo\ bar'|foo\ bar| 'foo\\ bar'|foo\\ bar| "foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df| \"foo|"foo| \"foo\x|"foox| "foo\x"|foo\x| "foo\ "|foo\ | foo\ xx|foo xx| foo\ x\x|foo xx| foo\ x\x\"|foo xx"| "foo\ x\x"|foo\ x\x| "foo\ x\x\\"|foo\ x\x\| "foo\ x\x\\""foobar"|foo\ x\x\foobar| "foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar| "foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar| "foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't| "foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\| 'foo\ bar'|foo\ bar| 'foo\\ bar'|foo\\ bar| foo\ bar|foo bar| foo#bar\nbaz|foo|baz| :-) ;-)|:-)|;-)| áéíóú|áéíóú| """ class ShlexTest(unittest.TestCase): def setUp(self): self.data = [x.split("|")[:-1] for x in data.splitlines()] self.posix_data = [x.split("|")[:-1] for x in posix_data.splitlines()] for item in self.data: item[0] = item[0].replace(r"\n", "\n") for item in self.posix_data: item[0] = item[0].replace(r"\n", "\n") def splitTest(self, data, comments): for i in range(len(data)): l = shlex.split(data[i][0], comments=comments) self.assertEqual(l, data[i][1:], "%s: %s != %s" % (data[i][0], l, data[i][1:])) def oldSplit(self, s): ret = [] lex = shlex.shlex(io.StringIO(s)) tok = lex.get_token() while tok: ret.append(tok) tok = lex.get_token() return ret def testSplitPosix(self): """Test data splitting with posix parser""" self.splitTest(self.posix_data, comments=True) def testCompat(self): """Test compatibility interface""" for i in range(len(self.data)): l = self.oldSplit(self.data[i][0]) self.assertEqual(l, self.data[i][1:], "%s: %s != %s" % (self.data[i][0], l, self.data[i][1:])) def testQuote(self): safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./' unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s unsafe = '"`$\\!' + unicode_sample self.assertEqual(shlex.quote(''), "''") self.assertEqual(shlex.quote(safeunquoted), safeunquoted) self.assertEqual(shlex.quote('test file name'), "'test file name'") for u in unsafe: self.assertEqual(shlex.quote('test%sname' % u), "'test%sname'" % u) for u in unsafe: self.assertEqual(shlex.quote("test%s'name'" % u), "'test%s'\"'\"'name'\"'\"''" % u) # Allow this test to be used with old shlex.py if not getattr(shlex, "split", None): for methname in dir(ShlexTest): if methname.startswith("test") and methname != "testCompat": delattr(ShlexTest, methname) def test_main(): support.run_unittest(ShlexTest) if __name__ == "__main__": test_main()
gpl-3.0
DNFcode/edx-platform
lms/djangoapps/courseware/tests/test_about.py
4
19162
""" Test the about xblock """ import datetime import pytz from django.conf import settings from django.core.urlresolvers import reverse from django.test.utils import override_settings from mock import patch from opaque_keys.edx.locations import SlashSeparatedCourseKey from course_modes.models import CourseMode from xmodule.modulestore.tests.django_utils import ( TEST_DATA_MOCK_MODULESTORE, TEST_DATA_MIXED_CLOSED_MODULESTORE ) from student.models import CourseEnrollment from student.tests.factories import UserFactory, CourseEnrollmentAllowedFactory from shoppingcart.models import Order, PaidCourseRegistration from xmodule.course_module import CATALOG_VISIBILITY_ABOUT, CATALOG_VISIBILITY_NONE from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from .helpers import LoginEnrollmentTestCase # HTML for registration button REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">" SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course." @override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE) class AboutTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase): """ Tests about xblock. """ def setUp(self): self.course = CourseFactory.create() self.about = ItemFactory.create( category="about", parent_location=self.course.location, data="OOGIE BLOOGIE", display_name="overview" ) self.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE) self.about = ItemFactory.create( category="about", parent_location=self.course_without_about.location, data="WITHOUT ABOUT", display_name="overview" ) self.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT) self.about = ItemFactory.create( category="about", parent_location=self.course_with_about.location, data="WITH ABOUT", display_name="overview" ) self.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy') self.course_mode = CourseMode(course_id=self.purchase_course.id, mode_slug="honor", mode_display_name="honor cert", min_price=10) self.course_mode.save() def test_anonymous_user(self): """ This test asserts that a non-logged in user can visit the course about page """ url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("OOGIE BLOOGIE", resp.content) # Check that registration button is present self.assertIn(REG_STR, resp.content) def test_logged_in(self): """ This test asserts that a logged-in user can visit the course about page """ self.setup_user() url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("OOGIE BLOOGIE", resp.content) def test_already_enrolled(self): """ Asserts that the end user sees the appropriate messaging when he/she visits the course about page, but is already enrolled """ self.setup_user() self.enroll(self.course, True) url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("You are registered for this course", resp.content) self.assertIn("View Courseware", resp.content) @override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page") def test_visible_about_page_settings(self): """ Verify that the About Page honors the permission settings in the course module """ url = reverse('about_course', args=[self.course_with_about.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("WITH ABOUT", resp.content) url = reverse('about_course', args=[self.course_without_about.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 404) @patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True}) def test_logged_in_marketing(self): self.setup_user() url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) # should be redirected self.assertEqual(resp.status_code, 302) # follow this time, and check we're redirected to the course info page resp = self.client.get(url, follow=True) target_url = resp.redirect_chain[-1][0] info_url = reverse('info', args=[self.course.id.to_deprecated_string()]) self.assertTrue(target_url.endswith(info_url)) @override_settings(MODULESTORE=TEST_DATA_MIXED_CLOSED_MODULESTORE) class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase): """ Tests for the course about page """ # The following XML test course (which lives at common/test/data/2014) # is closed; we're testing that an about page still appears when # the course is already closed xml_course_id = SlashSeparatedCourseKey('edX', 'detached_pages', '2014') # this text appears in that course's about page # common/test/data/2014/about/overview.html xml_data = "about page 463139" @patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False}) def test_logged_in_xml(self): self.setup_user() url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn(self.xml_data, resp.content) @patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False}) def test_anonymous_user_xml(self): url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn(self.xml_data, resp.content) @override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE) class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase): """ This test case will check the About page when a course has a capped enrollment """ def setUp(self): """ Set up the tests """ self.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1}) self.about = ItemFactory.create( category="about", parent_location=self.course.location, data="OOGIE BLOOGIE", display_name="overview" ) def test_enrollment_cap(self): """ This test will make sure that enrollment caps are enforced """ self.setup_user() url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn('<a href="#" class="register">', resp.content) self.enroll(self.course, verify=True) # create a new account since the first account is already registered for the course self.email = '[email protected]' self.password = 'bar' self.username = 'test_second' self.create_account(self.username, self.email, self.password) self.activate_user(self.email) self.login(self.email, self.password) # Get the about page again and make sure that the page says that the course is full resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Course is full", resp.content) # Try to enroll as well result = self.enroll(self.course) self.assertFalse(result) # Check that registration button is not present self.assertNotIn(REG_STR, resp.content) @override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE) class AboutWithInvitationOnly(ModuleStoreTestCase): """ This test case will check the About page when a course is invitation only. """ def setUp(self): self.course = CourseFactory.create(metadata={"invitation_only": True}) self.about = ItemFactory.create( category="about", parent_location=self.course.location, display_name="overview" ) def test_invitation_only(self): """ Test for user not logged in, invitation only course. """ url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Enrollment in this course is by invitation only", resp.content) # Check that registration button is not present self.assertNotIn(REG_STR, resp.content) def test_invitation_only_but_allowed(self): """ Test for user logged in and allowed to enroll in invitation only course. """ # Course is invitation only, student is allowed to enroll and logged in user = UserFactory.create(username='allowed_student', password='test', email='[email protected]') CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id) self.client.login(username=user.username, password='test') url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn(u"Register for {}".format(self.course.id.course), resp.content) # Check that registration button is present self.assertIn(REG_STR, resp.content) @patch.dict(settings.FEATURES, {'RESTRICT_ENROLL_BY_REG_METHOD': True}) @override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE) class AboutTestCaseShibCourse(LoginEnrollmentTestCase, ModuleStoreTestCase): """ Test cases covering about page behavior for courses that use shib enrollment domain ("shib courses") """ def setUp(self): self.course = CourseFactory.create(enrollment_domain="shib:https://idp.stanford.edu/") self.about = ItemFactory.create( category="about", parent_location=self.course.location, data="OOGIE BLOOGIE", display_name="overview" ) def test_logged_in_shib_course(self): """ For shib courses, logged in users will see the register button, but get rejected once they click there """ self.setup_user() url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("OOGIE BLOOGIE", resp.content) self.assertIn(u"Register for {}".format(self.course.id.course), resp.content) self.assertIn(SHIB_ERROR_STR, resp.content) self.assertIn(REG_STR, resp.content) def test_anonymous_user_shib_course(self): """ For shib courses, anonymous users will also see the register button """ url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("OOGIE BLOOGIE", resp.content) self.assertIn(u"Register for {}".format(self.course.id.course), resp.content) self.assertIn(SHIB_ERROR_STR, resp.content) self.assertIn(REG_STR, resp.content) @override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE) class AboutWithClosedEnrollment(ModuleStoreTestCase): """ This test case will check the About page for a course that has enrollment start/end set but it is currently outside of that period. """ def setUp(self): super(AboutWithClosedEnrollment, self).setUp() self.course = CourseFactory.create(metadata={"invitation_only": False}) # Setup enrollment period to be in future now = datetime.datetime.now(pytz.UTC) tomorrow = now + datetime.timedelta(days=1) nextday = tomorrow + datetime.timedelta(days=1) self.course.enrollment_start = tomorrow self.course.enrollment_end = nextday self.course = self.update_course(self.course, self.user.id) self.about = ItemFactory.create( category="about", parent_location=self.course.location, display_name="overview" ) def test_closed_enrollmement(self): url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Enrollment is Closed", resp.content) # Check that registration button is not present self.assertNotIn(REG_STR, resp.content) @override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE) @patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True}) @patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True}) class AboutPurchaseCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase): """ This test class runs through a suite of verifications regarding purchaseable courses """ def setUp(self): super(AboutPurchaseCourseTestCase, self).setUp() self.course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy') self._set_ecomm(self.course) def _set_ecomm(self, course): """ Helper method to turn on ecommerce on the course """ course_mode = CourseMode( course_id=course.id, mode_slug="honor", mode_display_name="honor cert", min_price=10, ) course_mode.save() def test_anonymous_user(self): """ Make sure an anonymous user sees the purchase button """ url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Add buyme to Cart ($10)", resp.content) def test_logged_in(self): """ Make sure a logged in user sees the purchase button """ self.setup_user() url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Add buyme to Cart ($10)", resp.content) def test_already_in_cart(self): """ This makes sure if a user has this course in the cart, that the expected message appears """ self.setup_user() cart = Order.get_cart_for_user(self.user) PaidCourseRegistration.add_to_order(cart, self.course.id) url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("This course is in your", resp.content) self.assertNotIn("Add buyme to Cart ($10)", resp.content) def test_already_enrolled(self): """ This makes sure that the already enrolled message appears for paywalled courses """ self.setup_user() # note that we can't call self.enroll here since that goes through # the Django student views, which doesn't allow for enrollments # for paywalled courses CourseEnrollment.enroll(self.user, self.course.id) url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("You are registered for this course", resp.content) self.assertIn("View Courseware", resp.content) self.assertNotIn("Add buyme to Cart ($10)", resp.content) def test_closed_enrollment(self): """ This makes sure that paywalled courses also honor the registration window """ self.setup_user() now = datetime.datetime.now(pytz.UTC) tomorrow = now + datetime.timedelta(days=1) nextday = tomorrow + datetime.timedelta(days=1) self.course.enrollment_start = tomorrow self.course.enrollment_end = nextday self.course = self.update_course(self.course, self.user.id) url = reverse('about_course', args=[self.course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Enrollment is Closed", resp.content) self.assertNotIn("Add buyme to Cart ($10)", resp.content) def test_invitation_only(self): """ This makes sure that the invitation only restirction takes prescendence over any purchase enablements """ course = CourseFactory.create(metadata={"invitation_only": True}) self._set_ecomm(course) self.setup_user() url = reverse('about_course', args=[course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Enrollment in this course is by invitation only", resp.content) def test_enrollment_cap(self): """ Make sure that capped enrollments work even with paywalled courses """ course = CourseFactory.create( metadata={ "max_student_enrollments_allowed": 1, "display_coursenumber": "buyme", } ) self._set_ecomm(course) self.setup_user() url = reverse('about_course', args=[course.id.to_deprecated_string()]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Add buyme to Cart ($10)", resp.content) # note that we can't call self.enroll here since that goes through # the Django student views, which doesn't allow for enrollments # for paywalled courses CourseEnrollment.enroll(self.user, course.id) # create a new account since the first account is already registered for the course email = '[email protected]' password = 'bar' username = 'test_second' self.create_account(username, email, password) self.activate_user(email) self.login(email, password) # Get the about page again and make sure that the page says that the course is full resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("Course is full", resp.content) self.assertNotIn("Add buyme to Cart ($10)", resp.content)
agpl-3.0
UrLab/incubator
events/tests/test_forms.py
1
1547
from events.forms import EventForm from datetime import datetime import pytest from users.models import User @pytest.fixture(scope='function') def user(): user = User.objects.create(username="test", email="[email protected]", first_name="Test", last_name="Test") return user.id @pytest.mark.django_db def test_only_title_and_state_required(user): form_data = { 'title': 'wtf', 'status': 'i', 'organizer': user, } form = EventForm(data=form_data) assert form.is_valid(), form.errors @pytest.mark.django_db def test_no_stop_but_start(user): form_data = { 'title': 'wtf', 'status': 'i', 'start': datetime(2000, 1, 1), 'organizer': user, } form = EventForm(data=form_data) assert form.is_valid(), form.errors assert form.cleaned_data['start'] == form.cleaned_data['stop'] assert form.cleaned_data['start'].year == 2000 def test_ready_must_have_date(): form_data = { 'title': 'wtf', 'status': 'r', } form = EventForm(data=form_data) assert not form.is_valid(), form.errors assert 'Un événement prêt doit avoir une date de début' in form.errors['__all__'] def test_stop_must_be_after_start(): form_data = { 'title': 'wtf', 'status': 'i', 'start': datetime(2100, 1, 1), 'stop': datetime(2000, 1, 1) } form = EventForm(data=form_data) assert not form.is_valid() assert 'La date de fin ne peut être avant la date de début' in form.errors['__all__']
agpl-3.0
gerrive/horizon
openstack_dashboard/test/integration_tests/tests/test_host_aggregates.py
14
2211
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack_dashboard.test.integration_tests import helpers from openstack_dashboard.test.integration_tests.regions import messages class TestHostAggregates(helpers.AdminTestCase): HOST_AGGREGATE_NAME = helpers.gen_random_resource_name("host_aggregate") HOST_AGGREGATE_AVAILABILITY_ZONE = "nova" def test_host_aggregate_create(self): """tests the host aggregate creation and deletion functionalities: * creates a new host aggregate * verifies the host aggregate appears in the host aggregates table * deletes the newly created host aggregate * verifies the host aggregate does not appear in the table * after deletion """ hostaggregates_page = self.home_pg.go_to_system_hostaggregatespage() hostaggregates_page.create_host_aggregate( name=self.HOST_AGGREGATE_NAME, availability_zone=self.HOST_AGGREGATE_AVAILABILITY_ZONE) self.assertTrue( hostaggregates_page.find_message_and_dismiss(messages.SUCCESS)) self.assertFalse(hostaggregates_page.find_message_and_dismiss( messages.ERROR)) self.assertTrue(hostaggregates_page.is_host_aggregate_present( self.HOST_AGGREGATE_NAME)) hostaggregates_page.delete_host_aggregate(self.HOST_AGGREGATE_NAME) self.assertTrue( hostaggregates_page.find_message_and_dismiss(messages.SUCCESS)) self.assertFalse(hostaggregates_page.find_message_and_dismiss( messages.ERROR)) self.assertFalse(hostaggregates_page.is_host_aggregate_present( self.HOST_AGGREGATE_NAME))
apache-2.0
zacoxicompton/damnvid
ui/dMainFrame/dMainFrame.py
12
32421
# -*- coding: utf-8 -*- from ..dUI import * from dConverter import * from dUpdater import * from ..dPrefEditor import * from ..dDoneDialog import * from ..dAddURLDialog import * from ..dAboutDialog import * from ..dReportBug import * from ..dBrowser import * from ..dVideoHistory import * from dMenubar import * from dMainListPanel import * from dMainSidePanel import * from dMainGaugePanel import * from dMainGoPanel import * class DamnMainFrame(DamnFrame): # The main window def __init__(self, parent, id, title): Damnlog('DamnMainFrame GUI building starting.') DamnFrame.__init__(self, parent, wx.ID_ANY, title, pos=wx.Display().GetGeometry()[:2], size=(780, 580), style=wx.DEFAULT_FRAME_STYLE) self.CreateStatusBar() self.SetMenuBar(DamnMainMenubar(self)) Damnlog('DamnMainFrame menu bar is up.') vbox = wx.BoxSizer(wx.VERTICAL) self.SetSizer(vbox) #vbox.Add((0,DV.border_padding)) #Actually, do NOT add a padding there, it looks better when stuck on the edge panel = wx.Panel(self, -1) vbox.Add(panel, 1, wx.EXPAND) self.setupGrid(panel) self.Bind(wx.EVT_CLOSE, self.onClose, self) self.Bind(wx.EVT_SIZE, self.onResize, self) self.Bind(wx.EVT_ICONIZE, self.onMinimize) self.Bind(DV.evt_prog, self.onProgress) self.Bind(DV.evt_load, self.onLoading) Damnlog('DamnMainFrame: All GUI is up.') self.clipboardtimer = wx.Timer(self, -1) self.clipboardtimer.Start(1000) self.Bind(wx.EVT_TIMER, self.onClipboardTimer, self.clipboardtimer) Damnlog('DaminMainFrame: Clipboard timer started.') DV.icon = wx.Icon(DV.images_path + 'icon.ico', wx.BITMAP_TYPE_ICO) #DV.icon2 = wx.Icon(DV.images_path + 'icon-alt.ico', wx.BITMAP_TYPE_ICO) DV.icon16 = wx.Icon(DV.images_path + 'icon16.ico', wx.BITMAP_TYPE_ICO) self.SetIcon(DV.icon) Damnlog('DamnMainFrame: init stage 1 done.') def setupGrid(self, panel): grid = wx.FlexGridSizer(2, 2, 8, 8) grid.Add(DamnMainListPanel(self), 1, wx.EXPAND) grid.Add(DamnMainSidePanel(self), 0, wx.EXPAND) grid.Add(DamnMainGaugePanel(self), 0, wx.EXPAND) grid.Add(DamnMainGoPanel(self), 0, wx.EXPAND) panel.SetSizer(grid) grid.AddGrowableRow(0, 1) grid.AddGrowableCol(0, 1) def getNiceDimensions(self): return self.niceDimensions def setNiceDimensions(self, dimensions): self.niceDimensions = dimensions def init2(self): Damnlog('Starting DamnMainFrame init stage 2.') if os.path.exists(DV.conf_file_directory + u'lastversion.' + DV.safeProduct): lastversion = DamnOpenFile(DV.conf_file_directory + u'lastversion.' + DV.safeProduct, 'r') dvversion = lastversion.readline().strip() lastversion.close() del lastversion Damnlog('Version file found; version number read:',dvversion) else: dvversion = 'old' # This is not just an arbitrary erroneous value, it's actually handy in the concatenation on the wx.FileDialog line below Damnlog('No version file found.') Damnlog('Read version:',dvversion,';running version:',DV.version) if dvversion != DV.version: # Just updated to new version, ask what to do about the preferences #dlg = wx.MessageDialog(self, DV.l('DamnVid was updated to ') + DV.version + '.\n' + DV.l('locale:damnvid-updated-export-prefs'), DV.l('DamnVid was successfully updated'), wx.YES | wx.NO | wx.ICON_QUESTION) tmpprefs = DamnPrefs() try: checkupdates = tmpprefs.get('CheckForUpdates') locale = tmpprefs.get('locale') except: pass Damnlog('Check for updates preference is',checkupdates) if False: #dlg.ShowModal() == wx.ID_YES: dlg.Destroy() dlg = wx.FileDialog(self, DV.l('Where do you want to export DamnVid\'s configuration?'), tmpprefs.get('lastprefdir'), 'DamnVid-' + dvversion + '-configuration.ini', DV.l('locale:browse-ini-files'), wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() f = DamnOpenFile(path, 'w') tmpprefs.ini.write(f) f.close() dlg.Destroy() else: pass # Now, overwrite the preferences! del tmpprefs os.remove(DV.conf_file) shutil.copyfile(DV.curdir + 'conf' + DV.sep + 'conf.ini', DV.conf_file) lastversion = DamnOpenFile(DV.conf_file_directory + u'lastversion.' + DV.safeProduct, 'w') lastversion.write(DV.version.encode('utf8')) lastversion.close() del lastversion tmpprefs = DamnPrefs() try: tmpprefs.set('CheckForUpdates', checkupdates) tmpprefs.set('locale', locale) except: pass tmpprefs.save() del tmpprefs Damnlog('Local version check done, initializing DamnMainFrame properties.') self.videos = [] self.clippedvideos = [] self.resultlist = [] self.thisbatch = 0 self.thisvideo = [] self.meta = {} DV.prefs = DamnPrefs() self.converting = -1 self.isclosing = False self.searchopen = False self.addurl = None self.loadingvisible = 0 self.trayicon = None self.historyDialog = None self.onListSelect() Damnlog('DamnMainFrame properties OK, first run?',DV.first_run) if DV.first_run: if DV.os == 'mac': DV.prefs.set('CheckForUpdates', 'True') Damnlog('Skipping asking user for updates because this is a Mac and Mac users don\'t want to configure stuff.') else: dlg = wx.MessageDialog(self, DV.l('Welcome to DamnVid ') + DV.version + '!\n' + DV.l('Would you like DamnVid to check for updates every time it starts?'), DV.l('Welcome to DamnVid ') + DV.version + '!', wx.YES | wx.NO | wx.ICON_QUESTION) if dlg.ShowModal() == wx.ID_YES: DV.prefs.set('CheckForUpdates', 'True') else: DV.prefs.set('CheckForUpdates', 'False') if DV.prefs.get('CheckForUpdates') == 'True': Damnlog('DamnMainFrame checking for updates.') self.onCheckUpdates(None) self.SetStatusText(DV.l('DamnVid ready.')) windowpolicy = DV.prefs.get('windowpolicy') if not len(windowpolicy) or windowpolicy=='center': Damnlog('Window policy is centering.') self.Center() elif windowpolicy=='remember': Damnlog('Window policy is remember; trying to load saved window geometry.') allstuff=( DV.prefs.gets('damnvid-mainwindow','lastx'), DV.prefs.gets('damnvid-mainwindow','lasty'), DV.prefs.gets('damnvid-mainwindow','lastw'), DV.prefs.gets('damnvid-mainwindow','lasth'), DV.prefs.gets('damnvid-mainwindow','lastresw'), DV.prefs.gets('damnvid-mainwindow','lastresh') ) Damnlog('Old window geometry information:',allstuff) allstuff2=[] for i in allstuff: try: allstuff2.append(int(i)) except: allstuff2.append(-1) if -1 in allstuff2: Damnlog('Invalid information in old window geometry information; giving up on restoring window geometry.') else: try: screen = wx.Display().GetGeometry()[2:] if allstuff2[4] != screen[0] or allstuff2[5]!= screen[1]: Damnlog('Resolution information is different:',allstuff2[4:5],'vs',screen,'(current); giving up on restoring window geometry.') elif allstuff2[0] < 0 or allstuff2[0] + allstuff2[2] >= allstuff2[4] or allstuff2[1] < 0 or allstuff2[1] + allstuff2[3] >= allstuff2[5]: Damnlog('Window position is out of bounds; giving up.') else: Damnlog('All window geometry tests passed, attempting to restore window geometry.') try: self.SetSizeWH(allstuff2[2],allstuff2[3]) self.MoveXY(allstuff2[0],allstuff2[1]) Damnlog('Window geometry restored successfully.') except: Damnlog('Window manager refused to change window geometry.') except: Damnlog('Could not get screen resolution; giving up on restoring window geometry.') else: Damnlog('Window policy is',windowpolicy,'; doing nothing.') self.updateHistory() Damnlog('DamnMainFrame: Main window all ready,') def onMinimize(self, event): if DV.os == u'posix': return # Do not do anything on Linux, let the window manager handle it Damnlog('DamnMainFrame iconize event fired. Is being minimized?', event.Iconized()) if self.isclosing: Damnlog('DamnMainFrame being closed, not interfering.') return if not event.Iconized(): Damnlog('DamnMainFrame being restored, doing nothing.') return if DV.prefs.get('minimizetotray')=='True': Damnlog('Minimize to tray preference is True, creating tray icon.') self.trayicon = DamnTrayIcon(self) else: Damnlog('Minimize to tray preference is False, doing nothing.') def onExit(self, event): self.Close(True) def onListSelect(self, event=None): sel = self.list.getAllSelectedItems() gotstuff = bool(len(sel)) count = self.list.GetItemCount() self.btnRename.Enable(len(sel) == 1) self.profiledropdown.Enable(bool(count)) if not count: self.profiledropdown.SetItems([DV.l('(None)')]) videosAffected = range(count) if gotstuff: videosAffected = sel self.deletebutton.SetLabel(DV.l('Remove')) self.deletebutton.Enable(self.converting not in sel) self.btnMoveUp.Enable(sel[0]) self.btnMoveDown.Enable(sel[-1] != self.list.GetItemCount() - 1) else: self.deletebutton.SetLabel(DV.l('Remove all')) self.deletebutton.Enable(self.converting == -1) self.btnMoveUp.Disable() self.btnMoveDown.Disable() if len(videosAffected): choices = [] uniprofile = int(self.meta[self.videos[videosAffected[0]]]['profile']) for i in videosAffected: if int(self.meta[self.videos[i]]['profile']) != uniprofile: uniprofile = -2 for p in range(-1, DV.prefs.profiles): choices.append(DV.l(DV.prefs.getp(p, 'name'), warn=False)) if uniprofile == -2: choices.insert(0, DV.l('(Multiple)')) self.profiledropdown.SetItems(choices) if uniprofile == -2: self.profiledropdown.SetSelection(0) else: self.profiledropdown.SetSelection(uniprofile + 1) def onListKeyDown(self, event): keycode = event.GetKeyCode() if (keycode == wx.WXK_BACK or keycode == wx.WXK_DELETE) and self.list.GetSelectedItemCount(): self.onDelSelection(event) elif (keycode == wx.WXK_F2 or keycode == wx.WXK_NUMPAD_F2) and self.list.GetSelectedItemCount() == 1: self.onRename(event) def onAddFile(self, event): d = os.getcwd() if os.path.exists(DV.prefs.get('LastFileDir')): if os.path.isdir(DV.prefs.get('LastFileDir')): d = DV.prefs.get('LastFileDir') elif os.path.exists(DV.prefs.expandPath('?DAMNVID_MY_VIDEOS?')): if os.path.isdir(DV.prefs.expandPath('?DAMNVID_MY_VIDEOS?')): d = DV.prefs.expandPath('?DAMNVID_MY_VIDEOS?') dlg = wx.FileDialog(self, DV.l('Choose a damn video.'), d, '', DV.l('locale:browse-video-files'), wx.OPEN | wx.FD_MULTIPLE) dlg.SetIcon(DV.icon) if dlg.ShowModal() == wx.ID_OK: vids = dlg.GetPaths() DV.prefs.set('LastFileDir', os.path.dirname(vids[0])) DV.prefs.save() self.addVid(vids) dlg.Destroy() def onOpenHistory(self, event=None): Damnlog('onOpenHistory event fired:', event) if self.historyDialog is None: self.historyDialog = DamnHistoryViewer(self) self.historyDialog.Show() def onCloseHistory(self, event=None): Damnlog('onCloseHistory event fired:', event) self.historyDialog = None def onAddURL(self, event=None): Damnlog('onAddURL event fired:',event) default = '' try: if not wx.TheClipboard.IsOpened(): if wx.TheClipboard.Open(): dataobject = wx.TextDataObject() wx.TheClipboard.GetData(dataobject) default = dataobject.GetText() wx.TheClipboard.Close() Damnlog('Text scavenged from clipboard:',default) if not self.validURI(default): default = '' # Only set that as default text if the clipboard's text content is not a URL except: default = '' try: wx.TheClipboard.Close() # In case there's been an error before the clipboard could be closed, try to close it now except: pass # There's probably wasn't any error, just pass self.addurl = DamnAddURLDialog(self, default) self.addurl.SetIcon(DV.icon) self.addurl.ShowModal() try: self.addurl.Destroy() except: pass # The addurl destroys itself, supposedly, and doing it again sometimes (sometimes!) generates errors. self.addurl = None def validURI(self, uri): if REGEX_HTTP_GENERIC.match(uri): for i in DamnIterModules(False): if i['class'](uri).validURI(): return 'Video site' return 'Online video' # Not necessarily true, but ffmpeg will tell elif os.path.exists(uri): return 'Local file' return None def getVidName(self, uri): try: html = DamnURLOpen(uri[3:]) for i in html: res = REGEX_HTTP_GENERIC_TITLE_EXTRACT.search(i) if res: return DamnHtmlEntities(res.group(1)).strip() except: pass # Can't grab this? Return Unknown title return DV.l('Unknown title') def onDropTargetClick(self, event): dlg = wx.MessageDialog(self, DV.l('This is a droptarget: You may drop video files and folders here (or in the big list as well).'), DV.l('DamnVid Droptarget'), wx.ICON_INFORMATION) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() def toggleLoading(self, show): isvisible = self.loadingvisible > 0 self.loadingvisible = max((0, self.loadingvisible + int(show) * 2 - 1)) if (isvisible and not self.loadingvisible) or (not isvisible and self.loadingvisible): DV.postEvent(self, DamnLoadingEvent(DV.evt_loading, -1, {'show':bool(self.loadingvisible)})) def onLoading(self, event): info = event.GetInfo() if info.has_key('show'): if info['show']: self.droptarget.LoadFile(DV.images_path + 'droptargetloading.gif') self.droptarget.Play() else: self.droptarget.Stop() self.droptarget.LoadFile(DV.images_path + 'droptarget.gif') if info.has_key('status'): self.SetStatusText(info['status']) if info.has_key('dialog'): dlg = wx.MessageDialog(self, info['dialog'][1], info['dialog'][0], info['dialog'][2]) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() if info.has_key('meta'): self.addValid(info['meta']) if info.has_key('go') and self.converting == -1: if info['go']: self.onGo() if info.has_key('updateinfo'): if info['updateinfo'].has_key('verbose'): verbose = info['updateinfo']['verbose'] else: verbose = True if info['updateinfo'].has_key('main'): if info['updateinfo']['main'] is not None: msg = None if DamnVersionCompare(info['updateinfo']['main'], DV.version) == 1 and type(info['updateinfo']['main']) is type(''): if DV.os != 'posix': dlg = wx.MessageDialog(self, DV.l('A new version (') + info['updateinfo']['main'] + DV.l(') is available! You are running DamnVid ') + DV.version + '.\n' + DV.l('Want to go to the download page and download the update?'), DV.l('Update available!'), wx.YES | wx.NO | wx.YES_DEFAULT | wx.ICON_INFORMATION) dlg.SetIcon(DV.icon) if dlg.ShowModal() == wx.ID_YES: webbrowser.open(DV.url_download, 2) dlg.Destroy() elif verbose and type(info['updateinfo']['main']) is type(''): if DV.version != info['updateinfo']['main']: versionwarning = DV.l(' However, your version (') + DV.version + DV.l(') seems different than the latest version available online. Where would you get that?') else: versionwarning = '' msg = (DV.l('DamnVid is up-to-date.'), DV.l('DamnVid is up-to-date! The latest version is ') + info['updateinfo']['main'] + '.' + versionwarning, wx.ICON_INFORMATION) elif verbose: msg = (DV.l('Error!'), DV.l('There was a problem while checking for updates. You are running DamnVid ') + DV.version + '.\n' + DV.l('Make sure you are connected to the Internet, and that no firewall is blocking DamnVid.'), wx.ICON_INFORMATION) if msg is not None: dlg = wx.MessageDialog(self, msg[1], msg[0], msg[2]) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() if info['updateinfo'].has_key('modules'): msg = [] for i in info['updateinfo']['modules'].iterkeys(): if type(info['updateinfo']['modules'][i]) is type(()): msg.append((True, DV.modules[i]['title'] + DV.l(' was updated to version ') + info['updateinfo']['modules'][i][0] + '.')) elif type(info['updateinfo']['modules'][i]) is type('') and verbose: if info['updateinfo']['modules'][i] == 'error': msg.append((False, DV.modules[i]['title'] + DV.l(' is up-to-date (version ') + DV.modules[i]['version'] + ').')) if len(msg): msgs = [] for i in msg: if i[0]: msgs.append(i[1]) if not len(msg) and verbose: msgs = msg if len(msgs): msg = DV.l('DamnVid also checked for updates to its modules.') + '\n' for i in msgs: msg += '\n' + i dlg = wx.MessageDialog(self, msg, DV.l('Module updates'), wx.ICON_INFORMATION) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() def updateHistory(self): if self.historyDialog is not None: self.historyDialog.update() def onAddHistoryVideo(self, uri, event=None): Damnlog('History video add event fired:', event,'with URI', uri) self.addVid([uri], DV.prefs.get('autoconvert') == 'True') def addVid(self, uris, thengo=False): if thengo is None: thengo = DV.prefs.get('autoconvert') == 'True' if type(uris) in (type(''), type(u'')): uris = [DamnUnicode(uris)] DV.videoLoader(self, uris, thengo).start() def addTohistory(self, uri, title, icon=None): uri = DamnUnicode(uri) title = DamnUnicode(title) icon = DamnUnicode(icon) Damnlog('Adding video to history:', uri, 'with title', title, 'and icon', icon) history = DV.prefs.geta('damnvid-videohistory', 'videos') histsize = int(DV.prefs.get('videohistorysize')) if not histsize: Damnlog('Histsize is zero, not touching anything.') return for i in history: tempvideo = i.split(DV.history_split) if len(tempvideo) != 3: Damnlog('Invalid entry in history:', i) continue if tempvideo[0].strip().lower() == uri.strip().lower(): Damnlog('URI',uri,'is already in history, not adding it to history again.') return history.reverse() while len(history) >= histsize: history = history[1:] history.append(DV.history_split.join([uri,title,icon])) history.reverse() DV.prefs.seta('damnvid-videohistory','videos',history) Damnlog('Video added successfully, rebuilding history menu.') self.updateHistory() def addValid(self, meta): Damnlog('Adding video to DamnList with meta:',meta) if type(meta['icon']) in (type(''), type(u'')): meta['icon'] = DamnGetListIcon(meta['icon']) self.addTohistory(meta['original'], meta['name'], DV.listicons.getHandle(meta['icon'])) curvid = len(self.videos) self.list.InsertStringItem(curvid, meta['name']) self.list.SetStringItem(curvid, ID_COL_VIDPROFILE, DV.l(DV.prefs.getp(meta['profile'], 'name'))) self.list.SetStringItem(curvid, ID_COL_VIDPATH, meta['dirname']) self.list.SetStringItem(curvid, ID_COL_VIDSTAT, meta['status']) self.list.SetItemImage(curvid, meta['icon'], meta['icon']) self.videos.append(meta['uri']) self.meta[meta['uri']] = meta self.SetStatusText(DV.l('Added ') + meta['name'] + '.') if self.addurl is not None: self.addurl.update(meta['original'], meta['name'], meta['icon']) self.onListSelect() def onProgress(self, event): info = event.GetInfo() if info.has_key('progress'): self.gauge1.SetValue(info['progress']) if info.has_key('statustext'): self.SetStatusText(info['statustext']) if info.has_key('status'): self.list.SetStringItem(self.converting, ID_COL_VIDSTAT, info['status']) if self.trayicon is not None: self.trayicon.setTooltip(DamnUnicode(self.meta[self.videos[self.converting]]['name'])+u': '+info['status']) if info.has_key('dialog'): dlg = wx.MessageDialog(self, info['dialog'][0], info['dialog'][1], info['dialog'][2]) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() if info.has_key('go'): self.go(info['go']) def go(self, aborted=False): Damnlog('Time to go. Aborted?', aborted) self.converting = -1 for i in range(len(self.videos)): if self.videos[i] not in self.thisvideo and self.meta[self.videos[i]]['status'] != DV.l('Success!'): self.converting = i break if self.converting != -1 and not aborted: # Let's go for the actual conversion... self.meta[self.videos[self.converting]]['status'] = DV.l('In progress...') self.list.SetStringItem(self.converting, ID_COL_VIDSTAT, DV.l('In progress...')) self.thisbatch = self.thisbatch + 1 self.thread = DamnConverter(parent=self) if self.trayicon is not None: self.trayicon.startAlternate() self.thread.start() else: if self.trayicon is not None: self.trayicon.stopAlternate() if not self.isclosing: self.SetStatusText(DV.l('DamnVid, waiting for instructions.')) dlg = DamnDoneDialog(content=self.resultlist, aborted=aborted, main=self) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() self.converting = -1 self.stopbutton.Disable() self.gobutton1.Enable() self.gauge1.SetValue(0.0) self.onListSelect() def onGo(self, event=None): if not len(self.videos): dlg = wx.MessageDialog(self, DV.l('Put some videos in the list first!'), DV.l('No videos!'), wx.ICON_EXCLAMATION | wx.OK) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() elif self.converting != -1: dlg = wx.MessageDialog(self, DV.l('DamnVid is already converting!'), DV.l('Already converting!'), wx.ICON_EXCLAMATION | wx.OK) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() else: success = 0 for i in self.videos: if self.meta[i]['status'] == DV.l('Success!'): success = success + 1 if success == len(self.videos): dlg = wx.MessageDialog(self, DV.l('All videos in the list have already been processed!'), DV.l('Already done'), wx.OK | wx.ICON_INFORMATION) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() else: self.thisbatch = 0 self.thisvideo = [] self.resultlist = [] self.stopbutton.Enable() self.gobutton1.Disable() self.go() self.onListSelect() def onStop(self, event): self.thread.abortProcess() def onRename(self, event): item = self.list.getAllSelectedItems() if len(item) > 1: dlg = wx.MessageDialog(self, DV.l('You can only rename one video at a time.'), DV.l('Multiple videos selected.'), wx.ICON_EXCLAMATION | wx.OK) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() elif not len(item): dlg = wx.MessageDialog(self, DV.l('Select a video in order to rename it.'), DV.l('No videos selected'), wx.ICON_EXCLAMATION | wx.OK) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() else: item = item[0] dlg = wx.TextEntryDialog(self, DV.l('Enter the new name for "') + self.meta[self.videos[item]]['name'] + '".', DV.l('Rename'), self.meta[self.videos[item]]['name']) dlg.SetIcon(DV.icon) dlg.ShowModal() newname = dlg.GetValue() self.meta[self.videos[item]]['name'] = newname self.list.SetStringItem(item, ID_COL_VIDNAME, newname) dlg.Destroy() history = DV.prefs.geta('damnvid-videohistory','videos') for i in range(len(history)): video = history[i].split(DV.history_split) if len(video) != 3: continue if video[0] == self.meta[self.videos[item]]['original']: video[1] = newname history[i] = DV.history_split.join(video) DV.prefs.seta('damnvid-videohistory','videos',history) DV.prefs.save() self.updateHistory() def onSearch(self, event): if not self.searchopen: self.searchopen = True self.searchdialog = DamnVidBrowser(self) self.searchdialog.Show() else: self.searchdialog.Raise() def invertVids(self, i1, i2): tmp = self.videos[i1] self.videos[i1] = self.videos[i2] self.videos[i2] = tmp tmp = self.list.IsSelected(i2) self.list.Select(i2, on=self.list.IsSelected(i1)) self.list.Select(i1, on=tmp) self.list.invertItems(i1, i2) if i1 == self.converting: self.converting = i2 elif i2 == self.converting: self.converting = i1 self.onListSelect() def onMoveUp(self, event): items = self.list.getAllSelectedItems() if len(items): if items[0]: for i in items: self.invertVids(i, i - 1) else: dlg = wx.MessageDialog(self, DV.l('You\'ve selected the first item in the list, which cannot be moved further up!'), DV.l('Invalid selection'), wx.OK | wx.ICON_EXCLAMATION) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() else: dlg = wx.MessageDialog(self, DV.l('Select some videos in the list first.'), DV.l('No videos selected!'), wx.OK | wx.ICON_EXCLAMATION) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() self.onListSelect() def onMoveDown(self, event): items = self.list.getAllSelectedItems() if len(items): if items[-1] < self.list.GetItemCount() - 1: for i in reversed(self.list.getAllSelectedItems()): self.invertVids(i, i + 1) else: dlg = wx.MessageDialog(self, DV.l('You\'ve selected the last item in the list, which cannot be moved further down!'), DV.l('Invalid selection'), wx.OK | wx.ICON_EXCLAMATION) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() else: dlg = wx.MessageDialog(self, DV.l('Select some videos in the list first.'), DV.l('No videos selected!'), wx.OK | wx.ICON_EXCLAMATION) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() self.onListSelect() def onChangeProfileDropdown(self, event): sel = self.profiledropdown.GetCurrentSelection() if self.profiledropdown.GetItems()[0] == '(Multiple)': sel -= 1 if sel != -1: self.onChangeProfile(sel - 1, event) def onChangeProfile(self, profile, event): items = self.list.getAllSelectedItems() if not len(items): items = range(self.list.GetItemCount()) for i in items: if self.meta[self.videos[i]]['profile'] != profile: self.meta[self.videos[i]]['profile'] = profile self.meta[self.videos[i]]['profilemodified'] = True self.list.SetStringItem(i, ID_COL_VIDPROFILE, DV.l(DV.prefs.getp(profile, 'name'))) self.onListSelect() def onResetStatus(self, event=None): items = self.list.getAllSelectedItems() for i in items: self.meta[self.videos[i]]['status'] = DV.l('Pending.') self.list.SetStringItem(i, ID_COL_VIDSTAT, DV.l('Pending.')) def onPrefs(self, event): self.reopenprefs = False prefs = DamnPrefEditor(self, -1, DV.l('DamnVid preferences'), main=self) prefs.ShowModal() prefs.Destroy() if self.reopenprefs: self.onPrefs(event) else: for i in range(len(self.videos)): if self.meta[self.videos[i]]['profile'] >= DV.prefs.profiles or not self.meta[self.videos[i]]['profilemodified']: # Yes, using icons as source identifiers, why not? Lol if self.meta[self.videos[i]].has_key('module'): self.meta[self.videos[i]]['profile'] = self.meta[self.videos[i]]['module'].getProfile() elif self.meta[self.videos[i]]['icon'] == DamnGetListIcon('damnvid'): self.meta[self.videos[i]]['profile'] = DV.prefs.get('defaultprofile') elif self.meta[self.videos[i]]['icon'] == DamnGetListIcon('generic'): self.meta[self.videos[i]]['profile'] = DV.prefs.get('defaultwebprofile') self.list.SetStringItem(i, ID_COL_VIDPROFILE, DV.l(DV.prefs.getp(self.meta[self.videos[i]]['profile'], 'name'))) try: del self.reopenprefs except: pass self.updateHistory() # In case history size changed self.onListSelect() def onOpenOutDir(self, event): if DV.os == 'nt': os.system('explorer.exe "' + DV.prefs.get('outdir') + '"') else: pass # Halp here? def onHalp(self, event): webbrowser.open(DV.url_halp, 2) def onReportBug(self, event): dlg = DamnReportBug(self, -1, main=self) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() def onCheckUpdates(self, event=None): updater = DamnVidUpdater(self, verbose=event is not None) updater.start() def onAboutDV(self, event): dlg = DamnAboutDamnVid(self, -1, main=self) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() def delVid(self, i): self.list.DeleteItem(i) for vid in range(len(self.thisvideo)): if self.thisvideo[vid] == self.videos[i]: self.thisvideo.pop(vid) self.thisbatch = self.thisbatch - 1 del self.meta[self.videos[i]] self.videos.pop(i) if self.converting > i: self.converting -= 1 def onDelete(self, event): Damnlog('onDelete event fired:', event) if len(self.list.getAllSelectedItems()): self.onDelSelection(event) else: self.onDelAll(event) def confirmDeletion(self): if DV.prefs.get('warnremove')!='True': return True dlg = wx.MessageDialog(self, DV.l('Are you sure? (This will not delete any files, it will just remove them from the list.)'), DV.l('Confirmation'), wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION) dlg.SetIcon(DV.icon) return dlg.ShowModal() == wx.ID_YES def onDelSelection(self, event): items = self.list.getAllSelectedItems() if len(items): if self.converting in items: dlg = wx.MessageDialog(self, DV.l('Stop the video conversion before deleting the video being converted.'), DV.l('Cannot delete this video'), wx.ICON_EXCLAMATION | wx.OK) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() elif self.confirmDeletion(): for i in reversed(items): # Sequence MUST be reversed, otherwise the first items get deleted first, which changes the indexes of the following items self.delVid(i) self.onListSelect() else: dlg = wx.MessageDialog(self, DV.l('You must select some videos from the list first!'), DV.l('Select some videos!'), wx.ICON_EXCLAMATION | wx.OK) dlg.SetIcon(DV.icon) dlg.ShowModal() dlg.Destroy() def onDelAll(self, event): if len(self.videos): if self.confirmDeletion(): if self.converting != -1: self.onStop(None) # Stop conversion if it's in progress self.list.DeleteAllItems() self.videos = [] self.thisvideo = [] self.thisbatch = 0 self.meta = {} else: dlg = wx.MessageDialog(self, DV.l('Add some videos in the list first.'), DV.l('No videos!'), wx.OK | wx.ICON_EXCLAMATION) dlg.SetIcon(DV.icon) dlg.Destroy() def onResize(self, event): self.Layout() def onClipboardTimer(self, event): self.clipboardtimer.Stop() try: if DV.gui_ok and DV.prefs.get('clipboard') == 'True' and not wx.TheClipboard.IsOpened(): if wx.TheClipboard.Open(): dataobject = wx.TextDataObject() wx.TheClipboard.GetData(dataobject) clip = dataobject.GetText() wx.TheClipboard.Close() clip = DamnUnicode(clip) if DV.oldclipboard != clip: DV.oldclipboard = clip Damnlog('Text scavenged from clipboard (in loop):', clip) if self.validURI(clip) == 'Video site' and clip not in self.clippedvideos: self.clippedvideos.append(clip) if self.addurl is not None: self.addurl.onAdd(val=clip) else: self.addVid([clip], DV.prefs.get('autoconvert') == 'True') except: Damnlog('Failed to open clipboard.') # The clipboard might not get opened properly, or the prefs object might not exist yet. Just silently pass, gonna catch up at next timer event. try: wx.TheClipboard.Close() # Try to close it, just in case it's left open. except: pass try: self.clipboardtimer.Start(1000) except: pass # Sometimes the timer can still live while DamnMainFrame is closed, and if EVT_TIMER is then raised, error! def onClose(self, event): Damnlog('Main window onClose event fired. Converting?', self.converting, '; Is already closing?', self.isclosing) if self.converting != -1: dlg = wx.MessageDialog(self, DV.l('DamnVid is currently converting a video! Closing DamnVid will cause it to abort the conversion.') + '\r\n' + DV.l('Continue?'), DV.l('Conversion in progress'), wx.YES_NO | wx.NO_DEFAULT | wx.ICON_EXCLAMATION) dlg.SetIcon(DV.icon) if dlg.ShowModal() == wx.ID_YES: Damnlog('User forced shutdown!') self.shutdown() else: self.shutdown() def shutdown(self): Damnlog('Main window got shutdown() call') if self.historyDialog is not None: self.historyDialog.onClose() try: Damnlog('Attempting to get window position/size information.') position = self.GetPositionTuple() size = self.GetSize() screen = wx.Display().GetGeometry()[2:] Damnlog('Position is',position,'; size is',size,'; resolution is',screen) DV.prefs.sets('damnvid-mainwindow','lastx',position[0]) DV.prefs.sets('damnvid-mainwindow','lasty',position[1]) DV.prefs.sets('damnvid-mainwindow','lastw',size[0]) DV.prefs.sets('damnvid-mainwindow','lasth',size[1]) DV.prefs.sets('damnvid-mainwindow','lastresw',screen[0]) DV.prefs.sets('damnvid-mainwindow','lastresh',screen[1]) except: Damnlog('Error while trying to grab position/size information.') self.isclosing = True self.clipboardtimer.Stop() self.Destroy()
gpl-3.0
daviddupont69/CouchPotatoServer
libs/subliminal/api.py
106
5646
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <[email protected]> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .core import (SERVICES, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE, create_list_tasks, consume_task, create_download_tasks, group_by_video, key_subtitles) from .language import language_set, language_list, LANGUAGES import logging __all__ = ['list_subtitles', 'download_subtitles'] logger = logging.getLogger(__name__) def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None): """List subtitles in given paths according to the criteria :param paths: path(s) to video file or folder :type paths: string or list :param languages: languages to search for, in preferred order :type languages: list of :class:`~subliminal.language.Language` or string :param list services: services to use for the search, in preferred order :param bool force: force searching for subtitles even if some are detected :param bool multi: search multiple languages for the same video :param string cache_dir: path to the cache directory to use :param int max_depth: maximum depth for scanning entries :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :return: found subtitles :rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`] """ services = services or SERVICES languages = language_set(languages) if languages is not None else language_set(LANGUAGES) if isinstance(paths, basestring): paths = [paths] if any([not isinstance(p, unicode) for p in paths]): logger.warning(u'Not all entries are unicode') results = [] service_instances = {} tasks = create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for task in tasks: try: result = consume_task(task, service_instances) results.append((task.video, result)) except: logger.error(u'Error consuming task %r' % task, exc_info=True) for service_instance in service_instances.itervalues(): service_instance.terminate() return group_by_video(results) def download_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None): """Download subtitles in given paths according to the criteria :param paths: path(s) to video file or folder :type paths: string or list :param languages: languages to search for, in preferred order :type languages: list of :class:`~subliminal.language.Language` or string :param list services: services to use for the search, in preferred order :param bool force: force searching for subtitles even if some are detected :param bool multi: search multiple languages for the same video :param string cache_dir: path to the cache directory to use :param int max_depth: maximum depth for scanning entries :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :param order: preferred order for subtitles sorting :type list: list of :data:`~subliminal.core.LANGUAGE_INDEX`, :data:`~subliminal.core.SERVICE_INDEX`, :data:`~subliminal.core.SERVICE_CONFIDENCE`, :data:`~subliminal.core.MATCHING_CONFIDENCE` :return: downloaded subtitles :rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`] .. note:: If you use ``multi=True``, :data:`~subliminal.core.LANGUAGE_INDEX` has to be the first item of the ``order`` list or you might get unexpected results. """ services = services or SERVICES languages = language_list(languages) if languages is not None else language_list(LANGUAGES) if isinstance(paths, basestring): paths = [paths] order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE] subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for video, subtitles in subtitles_by_video.iteritems(): subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True) results = [] service_instances = {} tasks = create_download_tasks(subtitles_by_video, languages, multi) for task in tasks: try: result = consume_task(task, service_instances) results.append((task.video, result)) except: logger.error(u'Error consuming task %r' % task, exc_info=True) for service_instance in service_instances.itervalues(): service_instance.terminate() return group_by_video(results)
gpl-3.0
mpetyx/palmdrop
venv/lib/python2.7/site-packages/html5lib/constants.py
963
87346
from __future__ import absolute_import, division, unicode_literals import string import gettext _ = gettext.gettext EOF = None E = { "null-character": _("Null character in input stream, replaced with U+FFFD."), "invalid-codepoint": _("Invalid codepoint in stream."), "incorrectly-placed-solidus": _("Solidus (/) incorrectly placed in tag."), "incorrect-cr-newline-entity": _("Incorrect CR newline entity, replaced with LF."), "illegal-windows-1252-entity": _("Entity used with illegal number (windows-1252 reference)."), "cant-convert-numeric-entity": _("Numeric entity couldn't be converted to character " "(codepoint U+%(charAsInt)08x)."), "illegal-codepoint-for-numeric-entity": _("Numeric entity represents an illegal codepoint: " "U+%(charAsInt)08x."), "numeric-entity-without-semicolon": _("Numeric entity didn't end with ';'."), "expected-numeric-entity-but-got-eof": _("Numeric entity expected. Got end of file instead."), "expected-numeric-entity": _("Numeric entity expected but none found."), "named-entity-without-semicolon": _("Named entity didn't end with ';'."), "expected-named-entity": _("Named entity expected. Got none."), "attributes-in-end-tag": _("End tag contains unexpected attributes."), 'self-closing-flag-on-end-tag': _("End tag contains unexpected self-closing flag."), "expected-tag-name-but-got-right-bracket": _("Expected tag name. Got '>' instead."), "expected-tag-name-but-got-question-mark": _("Expected tag name. Got '?' instead. (HTML doesn't " "support processing instructions.)"), "expected-tag-name": _("Expected tag name. Got something else instead"), "expected-closing-tag-but-got-right-bracket": _("Expected closing tag. Got '>' instead. Ignoring '</>'."), "expected-closing-tag-but-got-eof": _("Expected closing tag. Unexpected end of file."), "expected-closing-tag-but-got-char": _("Expected closing tag. Unexpected character '%(data)s' found."), "eof-in-tag-name": _("Unexpected end of file in the tag name."), "expected-attribute-name-but-got-eof": _("Unexpected end of file. Expected attribute name instead."), "eof-in-attribute-name": _("Unexpected end of file in attribute name."), "invalid-character-in-attribute-name": _("Invalid character in attribute name"), "duplicate-attribute": _("Dropped duplicate attribute on tag."), "expected-end-of-tag-name-but-got-eof": _("Unexpected end of file. Expected = or end of tag."), "expected-attribute-value-but-got-eof": _("Unexpected end of file. Expected attribute value."), "expected-attribute-value-but-got-right-bracket": _("Expected attribute value. Got '>' instead."), 'equals-in-unquoted-attribute-value': _("Unexpected = in unquoted attribute"), 'unexpected-character-in-unquoted-attribute-value': _("Unexpected character in unquoted attribute"), "invalid-character-after-attribute-name": _("Unexpected character after attribute name."), "unexpected-character-after-attribute-value": _("Unexpected character after attribute value."), "eof-in-attribute-value-double-quote": _("Unexpected end of file in attribute value (\")."), "eof-in-attribute-value-single-quote": _("Unexpected end of file in attribute value (')."), "eof-in-attribute-value-no-quotes": _("Unexpected end of file in attribute value."), "unexpected-EOF-after-solidus-in-tag": _("Unexpected end of file in tag. Expected >"), "unexpected-character-after-solidus-in-tag": _("Unexpected character after / in tag. Expected >"), "expected-dashes-or-doctype": _("Expected '--' or 'DOCTYPE'. Not found."), "unexpected-bang-after-double-dash-in-comment": _("Unexpected ! after -- in comment"), "unexpected-space-after-double-dash-in-comment": _("Unexpected space after -- in comment"), "incorrect-comment": _("Incorrect comment."), "eof-in-comment": _("Unexpected end of file in comment."), "eof-in-comment-end-dash": _("Unexpected end of file in comment (-)"), "unexpected-dash-after-double-dash-in-comment": _("Unexpected '-' after '--' found in comment."), "eof-in-comment-double-dash": _("Unexpected end of file in comment (--)."), "eof-in-comment-end-space-state": _("Unexpected end of file in comment."), "eof-in-comment-end-bang-state": _("Unexpected end of file in comment."), "unexpected-char-in-comment": _("Unexpected character in comment found."), "need-space-after-doctype": _("No space after literal string 'DOCTYPE'."), "expected-doctype-name-but-got-right-bracket": _("Unexpected > character. Expected DOCTYPE name."), "expected-doctype-name-but-got-eof": _("Unexpected end of file. Expected DOCTYPE name."), "eof-in-doctype-name": _("Unexpected end of file in DOCTYPE name."), "eof-in-doctype": _("Unexpected end of file in DOCTYPE."), "expected-space-or-right-bracket-in-doctype": _("Expected space or '>'. Got '%(data)s'"), "unexpected-end-of-doctype": _("Unexpected end of DOCTYPE."), "unexpected-char-in-doctype": _("Unexpected character in DOCTYPE."), "eof-in-innerhtml": _("XXX innerHTML EOF"), "unexpected-doctype": _("Unexpected DOCTYPE. Ignored."), "non-html-root": _("html needs to be the first start tag."), "expected-doctype-but-got-eof": _("Unexpected End of file. Expected DOCTYPE."), "unknown-doctype": _("Erroneous DOCTYPE."), "expected-doctype-but-got-chars": _("Unexpected non-space characters. Expected DOCTYPE."), "expected-doctype-but-got-start-tag": _("Unexpected start tag (%(name)s). Expected DOCTYPE."), "expected-doctype-but-got-end-tag": _("Unexpected end tag (%(name)s). Expected DOCTYPE."), "end-tag-after-implied-root": _("Unexpected end tag (%(name)s) after the (implied) root element."), "expected-named-closing-tag-but-got-eof": _("Unexpected end of file. Expected end tag (%(name)s)."), "two-heads-are-not-better-than-one": _("Unexpected start tag head in existing head. Ignored."), "unexpected-end-tag": _("Unexpected end tag (%(name)s). Ignored."), "unexpected-start-tag-out-of-my-head": _("Unexpected start tag (%(name)s) that can be in head. Moved."), "unexpected-start-tag": _("Unexpected start tag (%(name)s)."), "missing-end-tag": _("Missing end tag (%(name)s)."), "missing-end-tags": _("Missing end tags (%(name)s)."), "unexpected-start-tag-implies-end-tag": _("Unexpected start tag (%(startName)s) " "implies end tag (%(endName)s)."), "unexpected-start-tag-treated-as": _("Unexpected start tag (%(originalName)s). Treated as %(newName)s."), "deprecated-tag": _("Unexpected start tag %(name)s. Don't use it!"), "unexpected-start-tag-ignored": _("Unexpected start tag %(name)s. Ignored."), "expected-one-end-tag-but-got-another": _("Unexpected end tag (%(gotName)s). " "Missing end tag (%(expectedName)s)."), "end-tag-too-early": _("End tag (%(name)s) seen too early. Expected other end tag."), "end-tag-too-early-named": _("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."), "end-tag-too-early-ignored": _("End tag (%(name)s) seen too early. Ignored."), "adoption-agency-1.1": _("End tag (%(name)s) violates step 1, " "paragraph 1 of the adoption agency algorithm."), "adoption-agency-1.2": _("End tag (%(name)s) violates step 1, " "paragraph 2 of the adoption agency algorithm."), "adoption-agency-1.3": _("End tag (%(name)s) violates step 1, " "paragraph 3 of the adoption agency algorithm."), "adoption-agency-4.4": _("End tag (%(name)s) violates step 4, " "paragraph 4 of the adoption agency algorithm."), "unexpected-end-tag-treated-as": _("Unexpected end tag (%(originalName)s). Treated as %(newName)s."), "no-end-tag": _("This element (%(name)s) has no end tag."), "unexpected-implied-end-tag-in-table": _("Unexpected implied end tag (%(name)s) in the table phase."), "unexpected-implied-end-tag-in-table-body": _("Unexpected implied end tag (%(name)s) in the table body phase."), "unexpected-char-implies-table-voodoo": _("Unexpected non-space characters in " "table context caused voodoo mode."), "unexpected-hidden-input-in-table": _("Unexpected input with type hidden in table context."), "unexpected-form-in-table": _("Unexpected form in table context."), "unexpected-start-tag-implies-table-voodoo": _("Unexpected start tag (%(name)s) in " "table context caused voodoo mode."), "unexpected-end-tag-implies-table-voodoo": _("Unexpected end tag (%(name)s) in " "table context caused voodoo mode."), "unexpected-cell-in-table-body": _("Unexpected table cell start tag (%(name)s) " "in the table body phase."), "unexpected-cell-end-tag": _("Got table cell end tag (%(name)s) " "while required end tags are missing."), "unexpected-end-tag-in-table-body": _("Unexpected end tag (%(name)s) in the table body phase. Ignored."), "unexpected-implied-end-tag-in-table-row": _("Unexpected implied end tag (%(name)s) in the table row phase."), "unexpected-end-tag-in-table-row": _("Unexpected end tag (%(name)s) in the table row phase. Ignored."), "unexpected-select-in-select": _("Unexpected select start tag in the select phase " "treated as select end tag."), "unexpected-input-in-select": _("Unexpected input start tag in the select phase."), "unexpected-start-tag-in-select": _("Unexpected start tag token (%(name)s in the select phase. " "Ignored."), "unexpected-end-tag-in-select": _("Unexpected end tag (%(name)s) in the select phase. Ignored."), "unexpected-table-element-start-tag-in-select-in-table": _("Unexpected table element start tag (%(name)s) in the select in table phase."), "unexpected-table-element-end-tag-in-select-in-table": _("Unexpected table element end tag (%(name)s) in the select in table phase."), "unexpected-char-after-body": _("Unexpected non-space characters in the after body phase."), "unexpected-start-tag-after-body": _("Unexpected start tag token (%(name)s)" " in the after body phase."), "unexpected-end-tag-after-body": _("Unexpected end tag token (%(name)s)" " in the after body phase."), "unexpected-char-in-frameset": _("Unexpected characters in the frameset phase. Characters ignored."), "unexpected-start-tag-in-frameset": _("Unexpected start tag token (%(name)s)" " in the frameset phase. Ignored."), "unexpected-frameset-in-frameset-innerhtml": _("Unexpected end tag token (frameset) " "in the frameset phase (innerHTML)."), "unexpected-end-tag-in-frameset": _("Unexpected end tag token (%(name)s)" " in the frameset phase. Ignored."), "unexpected-char-after-frameset": _("Unexpected non-space characters in the " "after frameset phase. Ignored."), "unexpected-start-tag-after-frameset": _("Unexpected start tag (%(name)s)" " in the after frameset phase. Ignored."), "unexpected-end-tag-after-frameset": _("Unexpected end tag (%(name)s)" " in the after frameset phase. Ignored."), "unexpected-end-tag-after-body-innerhtml": _("Unexpected end tag after body(innerHtml)"), "expected-eof-but-got-char": _("Unexpected non-space characters. Expected end of file."), "expected-eof-but-got-start-tag": _("Unexpected start tag (%(name)s)" ". Expected end of file."), "expected-eof-but-got-end-tag": _("Unexpected end tag (%(name)s)" ". Expected end of file."), "eof-in-table": _("Unexpected end of file. Expected table content."), "eof-in-select": _("Unexpected end of file. Expected select content."), "eof-in-frameset": _("Unexpected end of file. Expected frameset content."), "eof-in-script-in-script": _("Unexpected end of file. Expected script content."), "eof-in-foreign-lands": _("Unexpected end of file. Expected foreign content"), "non-void-element-with-trailing-solidus": _("Trailing solidus not allowed on element %(name)s"), "unexpected-html-element-in-foreign-content": _("Element %(name)s not allowed in a non-html context"), "unexpected-end-tag-before-html": _("Unexpected end tag (%(name)s) before html."), "XXX-undefined-error": _("Undefined error (this sucks and should be fixed)"), } namespaces = { "html": "http://www.w3.org/1999/xhtml", "mathml": "http://www.w3.org/1998/Math/MathML", "svg": "http://www.w3.org/2000/svg", "xlink": "http://www.w3.org/1999/xlink", "xml": "http://www.w3.org/XML/1998/namespace", "xmlns": "http://www.w3.org/2000/xmlns/" } scopingElements = frozenset(( (namespaces["html"], "applet"), (namespaces["html"], "caption"), (namespaces["html"], "html"), (namespaces["html"], "marquee"), (namespaces["html"], "object"), (namespaces["html"], "table"), (namespaces["html"], "td"), (namespaces["html"], "th"), (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext"), (namespaces["mathml"], "annotation-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title"), )) formattingElements = frozenset(( (namespaces["html"], "a"), (namespaces["html"], "b"), (namespaces["html"], "big"), (namespaces["html"], "code"), (namespaces["html"], "em"), (namespaces["html"], "font"), (namespaces["html"], "i"), (namespaces["html"], "nobr"), (namespaces["html"], "s"), (namespaces["html"], "small"), (namespaces["html"], "strike"), (namespaces["html"], "strong"), (namespaces["html"], "tt"), (namespaces["html"], "u") )) specialElements = frozenset(( (namespaces["html"], "address"), (namespaces["html"], "applet"), (namespaces["html"], "area"), (namespaces["html"], "article"), (namespaces["html"], "aside"), (namespaces["html"], "base"), (namespaces["html"], "basefont"), (namespaces["html"], "bgsound"), (namespaces["html"], "blockquote"), (namespaces["html"], "body"), (namespaces["html"], "br"), (namespaces["html"], "button"), (namespaces["html"], "caption"), (namespaces["html"], "center"), (namespaces["html"], "col"), (namespaces["html"], "colgroup"), (namespaces["html"], "command"), (namespaces["html"], "dd"), (namespaces["html"], "details"), (namespaces["html"], "dir"), (namespaces["html"], "div"), (namespaces["html"], "dl"), (namespaces["html"], "dt"), (namespaces["html"], "embed"), (namespaces["html"], "fieldset"), (namespaces["html"], "figure"), (namespaces["html"], "footer"), (namespaces["html"], "form"), (namespaces["html"], "frame"), (namespaces["html"], "frameset"), (namespaces["html"], "h1"), (namespaces["html"], "h2"), (namespaces["html"], "h3"), (namespaces["html"], "h4"), (namespaces["html"], "h5"), (namespaces["html"], "h6"), (namespaces["html"], "head"), (namespaces["html"], "header"), (namespaces["html"], "hr"), (namespaces["html"], "html"), (namespaces["html"], "iframe"), # Note that image is commented out in the spec as "this isn't an # element that can end up on the stack, so it doesn't matter," (namespaces["html"], "image"), (namespaces["html"], "img"), (namespaces["html"], "input"), (namespaces["html"], "isindex"), (namespaces["html"], "li"), (namespaces["html"], "link"), (namespaces["html"], "listing"), (namespaces["html"], "marquee"), (namespaces["html"], "menu"), (namespaces["html"], "meta"), (namespaces["html"], "nav"), (namespaces["html"], "noembed"), (namespaces["html"], "noframes"), (namespaces["html"], "noscript"), (namespaces["html"], "object"), (namespaces["html"], "ol"), (namespaces["html"], "p"), (namespaces["html"], "param"), (namespaces["html"], "plaintext"), (namespaces["html"], "pre"), (namespaces["html"], "script"), (namespaces["html"], "section"), (namespaces["html"], "select"), (namespaces["html"], "style"), (namespaces["html"], "table"), (namespaces["html"], "tbody"), (namespaces["html"], "td"), (namespaces["html"], "textarea"), (namespaces["html"], "tfoot"), (namespaces["html"], "th"), (namespaces["html"], "thead"), (namespaces["html"], "title"), (namespaces["html"], "tr"), (namespaces["html"], "ul"), (namespaces["html"], "wbr"), (namespaces["html"], "xmp"), (namespaces["svg"], "foreignObject") )) htmlIntegrationPointElements = frozenset(( (namespaces["mathml"], "annotaion-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title") )) mathmlTextIntegrationPointElements = frozenset(( (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext") )) adjustForeignAttributes = { "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), "xlink:href": ("xlink", "href", namespaces["xlink"]), "xlink:role": ("xlink", "role", namespaces["xlink"]), "xlink:show": ("xlink", "show", namespaces["xlink"]), "xlink:title": ("xlink", "title", namespaces["xlink"]), "xlink:type": ("xlink", "type", namespaces["xlink"]), "xml:base": ("xml", "base", namespaces["xml"]), "xml:lang": ("xml", "lang", namespaces["xml"]), "xml:space": ("xml", "space", namespaces["xml"]), "xmlns": (None, "xmlns", namespaces["xmlns"]), "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) } unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in adjustForeignAttributes.items()]) spaceCharacters = frozenset(( "\t", "\n", "\u000C", " ", "\r" )) tableInsertModeElements = frozenset(( "table", "tbody", "tfoot", "thead", "tr" )) asciiLowercase = frozenset(string.ascii_lowercase) asciiUppercase = frozenset(string.ascii_uppercase) asciiLetters = frozenset(string.ascii_letters) digits = frozenset(string.digits) hexDigits = frozenset(string.hexdigits) asciiUpper2Lower = dict([(ord(c), ord(c.lower())) for c in string.ascii_uppercase]) # Heading elements need to be ordered headingElements = ( "h1", "h2", "h3", "h4", "h5", "h6" ) voidElements = frozenset(( "base", "command", "event-source", "link", "meta", "hr", "br", "img", "embed", "param", "area", "col", "input", "source", "track" )) cdataElements = frozenset(('title', 'textarea')) rcdataElements = frozenset(( 'style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript' )) booleanAttributes = { "": frozenset(("irrelevant",)), "style": frozenset(("scoped",)), "img": frozenset(("ismap",)), "audio": frozenset(("autoplay", "controls")), "video": frozenset(("autoplay", "controls")), "script": frozenset(("defer", "async")), "details": frozenset(("open",)), "datagrid": frozenset(("multiple", "disabled")), "command": frozenset(("hidden", "disabled", "checked", "default")), "hr": frozenset(("noshade")), "menu": frozenset(("autosubmit",)), "fieldset": frozenset(("disabled", "readonly")), "option": frozenset(("disabled", "readonly", "selected")), "optgroup": frozenset(("disabled", "readonly")), "button": frozenset(("disabled", "autofocus")), "input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")), "select": frozenset(("disabled", "readonly", "autofocus", "multiple")), "output": frozenset(("disabled", "readonly")), } # entitiesWindows1252 has to be _ordered_ and needs to have an index. It # therefore can't be a frozenset. entitiesWindows1252 = ( 8364, # 0x80 0x20AC EURO SIGN 65533, # 0x81 UNDEFINED 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS 8224, # 0x86 0x2020 DAGGER 8225, # 0x87 0x2021 DOUBLE DAGGER 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT 8240, # 0x89 0x2030 PER MILLE SIGN 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE 65533, # 0x8D UNDEFINED 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON 65533, # 0x8F UNDEFINED 65533, # 0x90 UNDEFINED 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK 8226, # 0x95 0x2022 BULLET 8211, # 0x96 0x2013 EN DASH 8212, # 0x97 0x2014 EM DASH 732, # 0x98 0x02DC SMALL TILDE 8482, # 0x99 0x2122 TRADE MARK SIGN 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE 65533, # 0x9D UNDEFINED 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS ) xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;')) entities = { "AElig": "\xc6", "AElig;": "\xc6", "AMP": "&", "AMP;": "&", "Aacute": "\xc1", "Aacute;": "\xc1", "Abreve;": "\u0102", "Acirc": "\xc2", "Acirc;": "\xc2", "Acy;": "\u0410", "Afr;": "\U0001d504", "Agrave": "\xc0", "Agrave;": "\xc0", "Alpha;": "\u0391", "Amacr;": "\u0100", "And;": "\u2a53", "Aogon;": "\u0104", "Aopf;": "\U0001d538", "ApplyFunction;": "\u2061", "Aring": "\xc5", "Aring;": "\xc5", "Ascr;": "\U0001d49c", "Assign;": "\u2254", "Atilde": "\xc3", "Atilde;": "\xc3", "Auml": "\xc4", "Auml;": "\xc4", "Backslash;": "\u2216", "Barv;": "\u2ae7", "Barwed;": "\u2306", "Bcy;": "\u0411", "Because;": "\u2235", "Bernoullis;": "\u212c", "Beta;": "\u0392", "Bfr;": "\U0001d505", "Bopf;": "\U0001d539", "Breve;": "\u02d8", "Bscr;": "\u212c", "Bumpeq;": "\u224e", "CHcy;": "\u0427", "COPY": "\xa9", "COPY;": "\xa9", "Cacute;": "\u0106", "Cap;": "\u22d2", "CapitalDifferentialD;": "\u2145", "Cayleys;": "\u212d", "Ccaron;": "\u010c", "Ccedil": "\xc7", "Ccedil;": "\xc7", "Ccirc;": "\u0108", "Cconint;": "\u2230", "Cdot;": "\u010a", "Cedilla;": "\xb8", "CenterDot;": "\xb7", "Cfr;": "\u212d", "Chi;": "\u03a7", "CircleDot;": "\u2299", "CircleMinus;": "\u2296", "CirclePlus;": "\u2295", "CircleTimes;": "\u2297", "ClockwiseContourIntegral;": "\u2232", "CloseCurlyDoubleQuote;": "\u201d", "CloseCurlyQuote;": "\u2019", "Colon;": "\u2237", "Colone;": "\u2a74", "Congruent;": "\u2261", "Conint;": "\u222f", "ContourIntegral;": "\u222e", "Copf;": "\u2102", "Coproduct;": "\u2210", "CounterClockwiseContourIntegral;": "\u2233", "Cross;": "\u2a2f", "Cscr;": "\U0001d49e", "Cup;": "\u22d3", "CupCap;": "\u224d", "DD;": "\u2145", "DDotrahd;": "\u2911", "DJcy;": "\u0402", "DScy;": "\u0405", "DZcy;": "\u040f", "Dagger;": "\u2021", "Darr;": "\u21a1", "Dashv;": "\u2ae4", "Dcaron;": "\u010e", "Dcy;": "\u0414", "Del;": "\u2207", "Delta;": "\u0394", "Dfr;": "\U0001d507", "DiacriticalAcute;": "\xb4", "DiacriticalDot;": "\u02d9", "DiacriticalDoubleAcute;": "\u02dd", "DiacriticalGrave;": "`", "DiacriticalTilde;": "\u02dc", "Diamond;": "\u22c4", "DifferentialD;": "\u2146", "Dopf;": "\U0001d53b", "Dot;": "\xa8", "DotDot;": "\u20dc", "DotEqual;": "\u2250", "DoubleContourIntegral;": "\u222f", "DoubleDot;": "\xa8", "DoubleDownArrow;": "\u21d3", "DoubleLeftArrow;": "\u21d0", "DoubleLeftRightArrow;": "\u21d4", "DoubleLeftTee;": "\u2ae4", "DoubleLongLeftArrow;": "\u27f8", "DoubleLongLeftRightArrow;": "\u27fa", "DoubleLongRightArrow;": "\u27f9", "DoubleRightArrow;": "\u21d2", "DoubleRightTee;": "\u22a8", "DoubleUpArrow;": "\u21d1", "DoubleUpDownArrow;": "\u21d5", "DoubleVerticalBar;": "\u2225", "DownArrow;": "\u2193", "DownArrowBar;": "\u2913", "DownArrowUpArrow;": "\u21f5", "DownBreve;": "\u0311", "DownLeftRightVector;": "\u2950", "DownLeftTeeVector;": "\u295e", "DownLeftVector;": "\u21bd", "DownLeftVectorBar;": "\u2956", "DownRightTeeVector;": "\u295f", "DownRightVector;": "\u21c1", "DownRightVectorBar;": "\u2957", "DownTee;": "\u22a4", "DownTeeArrow;": "\u21a7", "Downarrow;": "\u21d3", "Dscr;": "\U0001d49f", "Dstrok;": "\u0110", "ENG;": "\u014a", "ETH": "\xd0", "ETH;": "\xd0", "Eacute": "\xc9", "Eacute;": "\xc9", "Ecaron;": "\u011a", "Ecirc": "\xca", "Ecirc;": "\xca", "Ecy;": "\u042d", "Edot;": "\u0116", "Efr;": "\U0001d508", "Egrave": "\xc8", "Egrave;": "\xc8", "Element;": "\u2208", "Emacr;": "\u0112", "EmptySmallSquare;": "\u25fb", "EmptyVerySmallSquare;": "\u25ab", "Eogon;": "\u0118", "Eopf;": "\U0001d53c", "Epsilon;": "\u0395", "Equal;": "\u2a75", "EqualTilde;": "\u2242", "Equilibrium;": "\u21cc", "Escr;": "\u2130", "Esim;": "\u2a73", "Eta;": "\u0397", "Euml": "\xcb", "Euml;": "\xcb", "Exists;": "\u2203", "ExponentialE;": "\u2147", "Fcy;": "\u0424", "Ffr;": "\U0001d509", "FilledSmallSquare;": "\u25fc", "FilledVerySmallSquare;": "\u25aa", "Fopf;": "\U0001d53d", "ForAll;": "\u2200", "Fouriertrf;": "\u2131", "Fscr;": "\u2131", "GJcy;": "\u0403", "GT": ">", "GT;": ">", "Gamma;": "\u0393", "Gammad;": "\u03dc", "Gbreve;": "\u011e", "Gcedil;": "\u0122", "Gcirc;": "\u011c", "Gcy;": "\u0413", "Gdot;": "\u0120", "Gfr;": "\U0001d50a", "Gg;": "\u22d9", "Gopf;": "\U0001d53e", "GreaterEqual;": "\u2265", "GreaterEqualLess;": "\u22db", "GreaterFullEqual;": "\u2267", "GreaterGreater;": "\u2aa2", "GreaterLess;": "\u2277", "GreaterSlantEqual;": "\u2a7e", "GreaterTilde;": "\u2273", "Gscr;": "\U0001d4a2", "Gt;": "\u226b", "HARDcy;": "\u042a", "Hacek;": "\u02c7", "Hat;": "^", "Hcirc;": "\u0124", "Hfr;": "\u210c", "HilbertSpace;": "\u210b", "Hopf;": "\u210d", "HorizontalLine;": "\u2500", "Hscr;": "\u210b", "Hstrok;": "\u0126", "HumpDownHump;": "\u224e", "HumpEqual;": "\u224f", "IEcy;": "\u0415", "IJlig;": "\u0132", "IOcy;": "\u0401", "Iacute": "\xcd", "Iacute;": "\xcd", "Icirc": "\xce", "Icirc;": "\xce", "Icy;": "\u0418", "Idot;": "\u0130", "Ifr;": "\u2111", "Igrave": "\xcc", "Igrave;": "\xcc", "Im;": "\u2111", "Imacr;": "\u012a", "ImaginaryI;": "\u2148", "Implies;": "\u21d2", "Int;": "\u222c", "Integral;": "\u222b", "Intersection;": "\u22c2", "InvisibleComma;": "\u2063", "InvisibleTimes;": "\u2062", "Iogon;": "\u012e", "Iopf;": "\U0001d540", "Iota;": "\u0399", "Iscr;": "\u2110", "Itilde;": "\u0128", "Iukcy;": "\u0406", "Iuml": "\xcf", "Iuml;": "\xcf", "Jcirc;": "\u0134", "Jcy;": "\u0419", "Jfr;": "\U0001d50d", "Jopf;": "\U0001d541", "Jscr;": "\U0001d4a5", "Jsercy;": "\u0408", "Jukcy;": "\u0404", "KHcy;": "\u0425", "KJcy;": "\u040c", "Kappa;": "\u039a", "Kcedil;": "\u0136", "Kcy;": "\u041a", "Kfr;": "\U0001d50e", "Kopf;": "\U0001d542", "Kscr;": "\U0001d4a6", "LJcy;": "\u0409", "LT": "<", "LT;": "<", "Lacute;": "\u0139", "Lambda;": "\u039b", "Lang;": "\u27ea", "Laplacetrf;": "\u2112", "Larr;": "\u219e", "Lcaron;": "\u013d", "Lcedil;": "\u013b", "Lcy;": "\u041b", "LeftAngleBracket;": "\u27e8", "LeftArrow;": "\u2190", "LeftArrowBar;": "\u21e4", "LeftArrowRightArrow;": "\u21c6", "LeftCeiling;": "\u2308", "LeftDoubleBracket;": "\u27e6", "LeftDownTeeVector;": "\u2961", "LeftDownVector;": "\u21c3", "LeftDownVectorBar;": "\u2959", "LeftFloor;": "\u230a", "LeftRightArrow;": "\u2194", "LeftRightVector;": "\u294e", "LeftTee;": "\u22a3", "LeftTeeArrow;": "\u21a4", "LeftTeeVector;": "\u295a", "LeftTriangle;": "\u22b2", "LeftTriangleBar;": "\u29cf", "LeftTriangleEqual;": "\u22b4", "LeftUpDownVector;": "\u2951", "LeftUpTeeVector;": "\u2960", "LeftUpVector;": "\u21bf", "LeftUpVectorBar;": "\u2958", "LeftVector;": "\u21bc", "LeftVectorBar;": "\u2952", "Leftarrow;": "\u21d0", "Leftrightarrow;": "\u21d4", "LessEqualGreater;": "\u22da", "LessFullEqual;": "\u2266", "LessGreater;": "\u2276", "LessLess;": "\u2aa1", "LessSlantEqual;": "\u2a7d", "LessTilde;": "\u2272", "Lfr;": "\U0001d50f", "Ll;": "\u22d8", "Lleftarrow;": "\u21da", "Lmidot;": "\u013f", "LongLeftArrow;": "\u27f5", "LongLeftRightArrow;": "\u27f7", "LongRightArrow;": "\u27f6", "Longleftarrow;": "\u27f8", "Longleftrightarrow;": "\u27fa", "Longrightarrow;": "\u27f9", "Lopf;": "\U0001d543", "LowerLeftArrow;": "\u2199", "LowerRightArrow;": "\u2198", "Lscr;": "\u2112", "Lsh;": "\u21b0", "Lstrok;": "\u0141", "Lt;": "\u226a", "Map;": "\u2905", "Mcy;": "\u041c", "MediumSpace;": "\u205f", "Mellintrf;": "\u2133", "Mfr;": "\U0001d510", "MinusPlus;": "\u2213", "Mopf;": "\U0001d544", "Mscr;": "\u2133", "Mu;": "\u039c", "NJcy;": "\u040a", "Nacute;": "\u0143", "Ncaron;": "\u0147", "Ncedil;": "\u0145", "Ncy;": "\u041d", "NegativeMediumSpace;": "\u200b", "NegativeThickSpace;": "\u200b", "NegativeThinSpace;": "\u200b", "NegativeVeryThinSpace;": "\u200b", "NestedGreaterGreater;": "\u226b", "NestedLessLess;": "\u226a", "NewLine;": "\n", "Nfr;": "\U0001d511", "NoBreak;": "\u2060", "NonBreakingSpace;": "\xa0", "Nopf;": "\u2115", "Not;": "\u2aec", "NotCongruent;": "\u2262", "NotCupCap;": "\u226d", "NotDoubleVerticalBar;": "\u2226", "NotElement;": "\u2209", "NotEqual;": "\u2260", "NotEqualTilde;": "\u2242\u0338", "NotExists;": "\u2204", "NotGreater;": "\u226f", "NotGreaterEqual;": "\u2271", "NotGreaterFullEqual;": "\u2267\u0338", "NotGreaterGreater;": "\u226b\u0338", "NotGreaterLess;": "\u2279", "NotGreaterSlantEqual;": "\u2a7e\u0338", "NotGreaterTilde;": "\u2275", "NotHumpDownHump;": "\u224e\u0338", "NotHumpEqual;": "\u224f\u0338", "NotLeftTriangle;": "\u22ea", "NotLeftTriangleBar;": "\u29cf\u0338", "NotLeftTriangleEqual;": "\u22ec", "NotLess;": "\u226e", "NotLessEqual;": "\u2270", "NotLessGreater;": "\u2278", "NotLessLess;": "\u226a\u0338", "NotLessSlantEqual;": "\u2a7d\u0338", "NotLessTilde;": "\u2274", "NotNestedGreaterGreater;": "\u2aa2\u0338", "NotNestedLessLess;": "\u2aa1\u0338", "NotPrecedes;": "\u2280", "NotPrecedesEqual;": "\u2aaf\u0338", "NotPrecedesSlantEqual;": "\u22e0", "NotReverseElement;": "\u220c", "NotRightTriangle;": "\u22eb", "NotRightTriangleBar;": "\u29d0\u0338", "NotRightTriangleEqual;": "\u22ed", "NotSquareSubset;": "\u228f\u0338", "NotSquareSubsetEqual;": "\u22e2", "NotSquareSuperset;": "\u2290\u0338", "NotSquareSupersetEqual;": "\u22e3", "NotSubset;": "\u2282\u20d2", "NotSubsetEqual;": "\u2288", "NotSucceeds;": "\u2281", "NotSucceedsEqual;": "\u2ab0\u0338", "NotSucceedsSlantEqual;": "\u22e1", "NotSucceedsTilde;": "\u227f\u0338", "NotSuperset;": "\u2283\u20d2", "NotSupersetEqual;": "\u2289", "NotTilde;": "\u2241", "NotTildeEqual;": "\u2244", "NotTildeFullEqual;": "\u2247", "NotTildeTilde;": "\u2249", "NotVerticalBar;": "\u2224", "Nscr;": "\U0001d4a9", "Ntilde": "\xd1", "Ntilde;": "\xd1", "Nu;": "\u039d", "OElig;": "\u0152", "Oacute": "\xd3", "Oacute;": "\xd3", "Ocirc": "\xd4", "Ocirc;": "\xd4", "Ocy;": "\u041e", "Odblac;": "\u0150", "Ofr;": "\U0001d512", "Ograve": "\xd2", "Ograve;": "\xd2", "Omacr;": "\u014c", "Omega;": "\u03a9", "Omicron;": "\u039f", "Oopf;": "\U0001d546", "OpenCurlyDoubleQuote;": "\u201c", "OpenCurlyQuote;": "\u2018", "Or;": "\u2a54", "Oscr;": "\U0001d4aa", "Oslash": "\xd8", "Oslash;": "\xd8", "Otilde": "\xd5", "Otilde;": "\xd5", "Otimes;": "\u2a37", "Ouml": "\xd6", "Ouml;": "\xd6", "OverBar;": "\u203e", "OverBrace;": "\u23de", "OverBracket;": "\u23b4", "OverParenthesis;": "\u23dc", "PartialD;": "\u2202", "Pcy;": "\u041f", "Pfr;": "\U0001d513", "Phi;": "\u03a6", "Pi;": "\u03a0", "PlusMinus;": "\xb1", "Poincareplane;": "\u210c", "Popf;": "\u2119", "Pr;": "\u2abb", "Precedes;": "\u227a", "PrecedesEqual;": "\u2aaf", "PrecedesSlantEqual;": "\u227c", "PrecedesTilde;": "\u227e", "Prime;": "\u2033", "Product;": "\u220f", "Proportion;": "\u2237", "Proportional;": "\u221d", "Pscr;": "\U0001d4ab", "Psi;": "\u03a8", "QUOT": "\"", "QUOT;": "\"", "Qfr;": "\U0001d514", "Qopf;": "\u211a", "Qscr;": "\U0001d4ac", "RBarr;": "\u2910", "REG": "\xae", "REG;": "\xae", "Racute;": "\u0154", "Rang;": "\u27eb", "Rarr;": "\u21a0", "Rarrtl;": "\u2916", "Rcaron;": "\u0158", "Rcedil;": "\u0156", "Rcy;": "\u0420", "Re;": "\u211c", "ReverseElement;": "\u220b", "ReverseEquilibrium;": "\u21cb", "ReverseUpEquilibrium;": "\u296f", "Rfr;": "\u211c", "Rho;": "\u03a1", "RightAngleBracket;": "\u27e9", "RightArrow;": "\u2192", "RightArrowBar;": "\u21e5", "RightArrowLeftArrow;": "\u21c4", "RightCeiling;": "\u2309", "RightDoubleBracket;": "\u27e7", "RightDownTeeVector;": "\u295d", "RightDownVector;": "\u21c2", "RightDownVectorBar;": "\u2955", "RightFloor;": "\u230b", "RightTee;": "\u22a2", "RightTeeArrow;": "\u21a6", "RightTeeVector;": "\u295b", "RightTriangle;": "\u22b3", "RightTriangleBar;": "\u29d0", "RightTriangleEqual;": "\u22b5", "RightUpDownVector;": "\u294f", "RightUpTeeVector;": "\u295c", "RightUpVector;": "\u21be", "RightUpVectorBar;": "\u2954", "RightVector;": "\u21c0", "RightVectorBar;": "\u2953", "Rightarrow;": "\u21d2", "Ropf;": "\u211d", "RoundImplies;": "\u2970", "Rrightarrow;": "\u21db", "Rscr;": "\u211b", "Rsh;": "\u21b1", "RuleDelayed;": "\u29f4", "SHCHcy;": "\u0429", "SHcy;": "\u0428", "SOFTcy;": "\u042c", "Sacute;": "\u015a", "Sc;": "\u2abc", "Scaron;": "\u0160", "Scedil;": "\u015e", "Scirc;": "\u015c", "Scy;": "\u0421", "Sfr;": "\U0001d516", "ShortDownArrow;": "\u2193", "ShortLeftArrow;": "\u2190", "ShortRightArrow;": "\u2192", "ShortUpArrow;": "\u2191", "Sigma;": "\u03a3", "SmallCircle;": "\u2218", "Sopf;": "\U0001d54a", "Sqrt;": "\u221a", "Square;": "\u25a1", "SquareIntersection;": "\u2293", "SquareSubset;": "\u228f", "SquareSubsetEqual;": "\u2291", "SquareSuperset;": "\u2290", "SquareSupersetEqual;": "\u2292", "SquareUnion;": "\u2294", "Sscr;": "\U0001d4ae", "Star;": "\u22c6", "Sub;": "\u22d0", "Subset;": "\u22d0", "SubsetEqual;": "\u2286", "Succeeds;": "\u227b", "SucceedsEqual;": "\u2ab0", "SucceedsSlantEqual;": "\u227d", "SucceedsTilde;": "\u227f", "SuchThat;": "\u220b", "Sum;": "\u2211", "Sup;": "\u22d1", "Superset;": "\u2283", "SupersetEqual;": "\u2287", "Supset;": "\u22d1", "THORN": "\xde", "THORN;": "\xde", "TRADE;": "\u2122", "TSHcy;": "\u040b", "TScy;": "\u0426", "Tab;": "\t", "Tau;": "\u03a4", "Tcaron;": "\u0164", "Tcedil;": "\u0162", "Tcy;": "\u0422", "Tfr;": "\U0001d517", "Therefore;": "\u2234", "Theta;": "\u0398", "ThickSpace;": "\u205f\u200a", "ThinSpace;": "\u2009", "Tilde;": "\u223c", "TildeEqual;": "\u2243", "TildeFullEqual;": "\u2245", "TildeTilde;": "\u2248", "Topf;": "\U0001d54b", "TripleDot;": "\u20db", "Tscr;": "\U0001d4af", "Tstrok;": "\u0166", "Uacute": "\xda", "Uacute;": "\xda", "Uarr;": "\u219f", "Uarrocir;": "\u2949", "Ubrcy;": "\u040e", "Ubreve;": "\u016c", "Ucirc": "\xdb", "Ucirc;": "\xdb", "Ucy;": "\u0423", "Udblac;": "\u0170", "Ufr;": "\U0001d518", "Ugrave": "\xd9", "Ugrave;": "\xd9", "Umacr;": "\u016a", "UnderBar;": "_", "UnderBrace;": "\u23df", "UnderBracket;": "\u23b5", "UnderParenthesis;": "\u23dd", "Union;": "\u22c3", "UnionPlus;": "\u228e", "Uogon;": "\u0172", "Uopf;": "\U0001d54c", "UpArrow;": "\u2191", "UpArrowBar;": "\u2912", "UpArrowDownArrow;": "\u21c5", "UpDownArrow;": "\u2195", "UpEquilibrium;": "\u296e", "UpTee;": "\u22a5", "UpTeeArrow;": "\u21a5", "Uparrow;": "\u21d1", "Updownarrow;": "\u21d5", "UpperLeftArrow;": "\u2196", "UpperRightArrow;": "\u2197", "Upsi;": "\u03d2", "Upsilon;": "\u03a5", "Uring;": "\u016e", "Uscr;": "\U0001d4b0", "Utilde;": "\u0168", "Uuml": "\xdc", "Uuml;": "\xdc", "VDash;": "\u22ab", "Vbar;": "\u2aeb", "Vcy;": "\u0412", "Vdash;": "\u22a9", "Vdashl;": "\u2ae6", "Vee;": "\u22c1", "Verbar;": "\u2016", "Vert;": "\u2016", "VerticalBar;": "\u2223", "VerticalLine;": "|", "VerticalSeparator;": "\u2758", "VerticalTilde;": "\u2240", "VeryThinSpace;": "\u200a", "Vfr;": "\U0001d519", "Vopf;": "\U0001d54d", "Vscr;": "\U0001d4b1", "Vvdash;": "\u22aa", "Wcirc;": "\u0174", "Wedge;": "\u22c0", "Wfr;": "\U0001d51a", "Wopf;": "\U0001d54e", "Wscr;": "\U0001d4b2", "Xfr;": "\U0001d51b", "Xi;": "\u039e", "Xopf;": "\U0001d54f", "Xscr;": "\U0001d4b3", "YAcy;": "\u042f", "YIcy;": "\u0407", "YUcy;": "\u042e", "Yacute": "\xdd", "Yacute;": "\xdd", "Ycirc;": "\u0176", "Ycy;": "\u042b", "Yfr;": "\U0001d51c", "Yopf;": "\U0001d550", "Yscr;": "\U0001d4b4", "Yuml;": "\u0178", "ZHcy;": "\u0416", "Zacute;": "\u0179", "Zcaron;": "\u017d", "Zcy;": "\u0417", "Zdot;": "\u017b", "ZeroWidthSpace;": "\u200b", "Zeta;": "\u0396", "Zfr;": "\u2128", "Zopf;": "\u2124", "Zscr;": "\U0001d4b5", "aacute": "\xe1", "aacute;": "\xe1", "abreve;": "\u0103", "ac;": "\u223e", "acE;": "\u223e\u0333", "acd;": "\u223f", "acirc": "\xe2", "acirc;": "\xe2", "acute": "\xb4", "acute;": "\xb4", "acy;": "\u0430", "aelig": "\xe6", "aelig;": "\xe6", "af;": "\u2061", "afr;": "\U0001d51e", "agrave": "\xe0", "agrave;": "\xe0", "alefsym;": "\u2135", "aleph;": "\u2135", "alpha;": "\u03b1", "amacr;": "\u0101", "amalg;": "\u2a3f", "amp": "&", "amp;": "&", "and;": "\u2227", "andand;": "\u2a55", "andd;": "\u2a5c", "andslope;": "\u2a58", "andv;": "\u2a5a", "ang;": "\u2220", "ange;": "\u29a4", "angle;": "\u2220", "angmsd;": "\u2221", "angmsdaa;": "\u29a8", "angmsdab;": "\u29a9", "angmsdac;": "\u29aa", "angmsdad;": "\u29ab", "angmsdae;": "\u29ac", "angmsdaf;": "\u29ad", "angmsdag;": "\u29ae", "angmsdah;": "\u29af", "angrt;": "\u221f", "angrtvb;": "\u22be", "angrtvbd;": "\u299d", "angsph;": "\u2222", "angst;": "\xc5", "angzarr;": "\u237c", "aogon;": "\u0105", "aopf;": "\U0001d552", "ap;": "\u2248", "apE;": "\u2a70", "apacir;": "\u2a6f", "ape;": "\u224a", "apid;": "\u224b", "apos;": "'", "approx;": "\u2248", "approxeq;": "\u224a", "aring": "\xe5", "aring;": "\xe5", "ascr;": "\U0001d4b6", "ast;": "*", "asymp;": "\u2248", "asympeq;": "\u224d", "atilde": "\xe3", "atilde;": "\xe3", "auml": "\xe4", "auml;": "\xe4", "awconint;": "\u2233", "awint;": "\u2a11", "bNot;": "\u2aed", "backcong;": "\u224c", "backepsilon;": "\u03f6", "backprime;": "\u2035", "backsim;": "\u223d", "backsimeq;": "\u22cd", "barvee;": "\u22bd", "barwed;": "\u2305", "barwedge;": "\u2305", "bbrk;": "\u23b5", "bbrktbrk;": "\u23b6", "bcong;": "\u224c", "bcy;": "\u0431", "bdquo;": "\u201e", "becaus;": "\u2235", "because;": "\u2235", "bemptyv;": "\u29b0", "bepsi;": "\u03f6", "bernou;": "\u212c", "beta;": "\u03b2", "beth;": "\u2136", "between;": "\u226c", "bfr;": "\U0001d51f", "bigcap;": "\u22c2", "bigcirc;": "\u25ef", "bigcup;": "\u22c3", "bigodot;": "\u2a00", "bigoplus;": "\u2a01", "bigotimes;": "\u2a02", "bigsqcup;": "\u2a06", "bigstar;": "\u2605", "bigtriangledown;": "\u25bd", "bigtriangleup;": "\u25b3", "biguplus;": "\u2a04", "bigvee;": "\u22c1", "bigwedge;": "\u22c0", "bkarow;": "\u290d", "blacklozenge;": "\u29eb", "blacksquare;": "\u25aa", "blacktriangle;": "\u25b4", "blacktriangledown;": "\u25be", "blacktriangleleft;": "\u25c2", "blacktriangleright;": "\u25b8", "blank;": "\u2423", "blk12;": "\u2592", "blk14;": "\u2591", "blk34;": "\u2593", "block;": "\u2588", "bne;": "=\u20e5", "bnequiv;": "\u2261\u20e5", "bnot;": "\u2310", "bopf;": "\U0001d553", "bot;": "\u22a5", "bottom;": "\u22a5", "bowtie;": "\u22c8", "boxDL;": "\u2557", "boxDR;": "\u2554", "boxDl;": "\u2556", "boxDr;": "\u2553", "boxH;": "\u2550", "boxHD;": "\u2566", "boxHU;": "\u2569", "boxHd;": "\u2564", "boxHu;": "\u2567", "boxUL;": "\u255d", "boxUR;": "\u255a", "boxUl;": "\u255c", "boxUr;": "\u2559", "boxV;": "\u2551", "boxVH;": "\u256c", "boxVL;": "\u2563", "boxVR;": "\u2560", "boxVh;": "\u256b", "boxVl;": "\u2562", "boxVr;": "\u255f", "boxbox;": "\u29c9", "boxdL;": "\u2555", "boxdR;": "\u2552", "boxdl;": "\u2510", "boxdr;": "\u250c", "boxh;": "\u2500", "boxhD;": "\u2565", "boxhU;": "\u2568", "boxhd;": "\u252c", "boxhu;": "\u2534", "boxminus;": "\u229f", "boxplus;": "\u229e", "boxtimes;": "\u22a0", "boxuL;": "\u255b", "boxuR;": "\u2558", "boxul;": "\u2518", "boxur;": "\u2514", "boxv;": "\u2502", "boxvH;": "\u256a", "boxvL;": "\u2561", "boxvR;": "\u255e", "boxvh;": "\u253c", "boxvl;": "\u2524", "boxvr;": "\u251c", "bprime;": "\u2035", "breve;": "\u02d8", "brvbar": "\xa6", "brvbar;": "\xa6", "bscr;": "\U0001d4b7", "bsemi;": "\u204f", "bsim;": "\u223d", "bsime;": "\u22cd", "bsol;": "\\", "bsolb;": "\u29c5", "bsolhsub;": "\u27c8", "bull;": "\u2022", "bullet;": "\u2022", "bump;": "\u224e", "bumpE;": "\u2aae", "bumpe;": "\u224f", "bumpeq;": "\u224f", "cacute;": "\u0107", "cap;": "\u2229", "capand;": "\u2a44", "capbrcup;": "\u2a49", "capcap;": "\u2a4b", "capcup;": "\u2a47", "capdot;": "\u2a40", "caps;": "\u2229\ufe00", "caret;": "\u2041", "caron;": "\u02c7", "ccaps;": "\u2a4d", "ccaron;": "\u010d", "ccedil": "\xe7", "ccedil;": "\xe7", "ccirc;": "\u0109", "ccups;": "\u2a4c", "ccupssm;": "\u2a50", "cdot;": "\u010b", "cedil": "\xb8", "cedil;": "\xb8", "cemptyv;": "\u29b2", "cent": "\xa2", "cent;": "\xa2", "centerdot;": "\xb7", "cfr;": "\U0001d520", "chcy;": "\u0447", "check;": "\u2713", "checkmark;": "\u2713", "chi;": "\u03c7", "cir;": "\u25cb", "cirE;": "\u29c3", "circ;": "\u02c6", "circeq;": "\u2257", "circlearrowleft;": "\u21ba", "circlearrowright;": "\u21bb", "circledR;": "\xae", "circledS;": "\u24c8", "circledast;": "\u229b", "circledcirc;": "\u229a", "circleddash;": "\u229d", "cire;": "\u2257", "cirfnint;": "\u2a10", "cirmid;": "\u2aef", "cirscir;": "\u29c2", "clubs;": "\u2663", "clubsuit;": "\u2663", "colon;": ":", "colone;": "\u2254", "coloneq;": "\u2254", "comma;": ",", "commat;": "@", "comp;": "\u2201", "compfn;": "\u2218", "complement;": "\u2201", "complexes;": "\u2102", "cong;": "\u2245", "congdot;": "\u2a6d", "conint;": "\u222e", "copf;": "\U0001d554", "coprod;": "\u2210", "copy": "\xa9", "copy;": "\xa9", "copysr;": "\u2117", "crarr;": "\u21b5", "cross;": "\u2717", "cscr;": "\U0001d4b8", "csub;": "\u2acf", "csube;": "\u2ad1", "csup;": "\u2ad0", "csupe;": "\u2ad2", "ctdot;": "\u22ef", "cudarrl;": "\u2938", "cudarrr;": "\u2935", "cuepr;": "\u22de", "cuesc;": "\u22df", "cularr;": "\u21b6", "cularrp;": "\u293d", "cup;": "\u222a", "cupbrcap;": "\u2a48", "cupcap;": "\u2a46", "cupcup;": "\u2a4a", "cupdot;": "\u228d", "cupor;": "\u2a45", "cups;": "\u222a\ufe00", "curarr;": "\u21b7", "curarrm;": "\u293c", "curlyeqprec;": "\u22de", "curlyeqsucc;": "\u22df", "curlyvee;": "\u22ce", "curlywedge;": "\u22cf", "curren": "\xa4", "curren;": "\xa4", "curvearrowleft;": "\u21b6", "curvearrowright;": "\u21b7", "cuvee;": "\u22ce", "cuwed;": "\u22cf", "cwconint;": "\u2232", "cwint;": "\u2231", "cylcty;": "\u232d", "dArr;": "\u21d3", "dHar;": "\u2965", "dagger;": "\u2020", "daleth;": "\u2138", "darr;": "\u2193", "dash;": "\u2010", "dashv;": "\u22a3", "dbkarow;": "\u290f", "dblac;": "\u02dd", "dcaron;": "\u010f", "dcy;": "\u0434", "dd;": "\u2146", "ddagger;": "\u2021", "ddarr;": "\u21ca", "ddotseq;": "\u2a77", "deg": "\xb0", "deg;": "\xb0", "delta;": "\u03b4", "demptyv;": "\u29b1", "dfisht;": "\u297f", "dfr;": "\U0001d521", "dharl;": "\u21c3", "dharr;": "\u21c2", "diam;": "\u22c4", "diamond;": "\u22c4", "diamondsuit;": "\u2666", "diams;": "\u2666", "die;": "\xa8", "digamma;": "\u03dd", "disin;": "\u22f2", "div;": "\xf7", "divide": "\xf7", "divide;": "\xf7", "divideontimes;": "\u22c7", "divonx;": "\u22c7", "djcy;": "\u0452", "dlcorn;": "\u231e", "dlcrop;": "\u230d", "dollar;": "$", "dopf;": "\U0001d555", "dot;": "\u02d9", "doteq;": "\u2250", "doteqdot;": "\u2251", "dotminus;": "\u2238", "dotplus;": "\u2214", "dotsquare;": "\u22a1", "doublebarwedge;": "\u2306", "downarrow;": "\u2193", "downdownarrows;": "\u21ca", "downharpoonleft;": "\u21c3", "downharpoonright;": "\u21c2", "drbkarow;": "\u2910", "drcorn;": "\u231f", "drcrop;": "\u230c", "dscr;": "\U0001d4b9", "dscy;": "\u0455", "dsol;": "\u29f6", "dstrok;": "\u0111", "dtdot;": "\u22f1", "dtri;": "\u25bf", "dtrif;": "\u25be", "duarr;": "\u21f5", "duhar;": "\u296f", "dwangle;": "\u29a6", "dzcy;": "\u045f", "dzigrarr;": "\u27ff", "eDDot;": "\u2a77", "eDot;": "\u2251", "eacute": "\xe9", "eacute;": "\xe9", "easter;": "\u2a6e", "ecaron;": "\u011b", "ecir;": "\u2256", "ecirc": "\xea", "ecirc;": "\xea", "ecolon;": "\u2255", "ecy;": "\u044d", "edot;": "\u0117", "ee;": "\u2147", "efDot;": "\u2252", "efr;": "\U0001d522", "eg;": "\u2a9a", "egrave": "\xe8", "egrave;": "\xe8", "egs;": "\u2a96", "egsdot;": "\u2a98", "el;": "\u2a99", "elinters;": "\u23e7", "ell;": "\u2113", "els;": "\u2a95", "elsdot;": "\u2a97", "emacr;": "\u0113", "empty;": "\u2205", "emptyset;": "\u2205", "emptyv;": "\u2205", "emsp13;": "\u2004", "emsp14;": "\u2005", "emsp;": "\u2003", "eng;": "\u014b", "ensp;": "\u2002", "eogon;": "\u0119", "eopf;": "\U0001d556", "epar;": "\u22d5", "eparsl;": "\u29e3", "eplus;": "\u2a71", "epsi;": "\u03b5", "epsilon;": "\u03b5", "epsiv;": "\u03f5", "eqcirc;": "\u2256", "eqcolon;": "\u2255", "eqsim;": "\u2242", "eqslantgtr;": "\u2a96", "eqslantless;": "\u2a95", "equals;": "=", "equest;": "\u225f", "equiv;": "\u2261", "equivDD;": "\u2a78", "eqvparsl;": "\u29e5", "erDot;": "\u2253", "erarr;": "\u2971", "escr;": "\u212f", "esdot;": "\u2250", "esim;": "\u2242", "eta;": "\u03b7", "eth": "\xf0", "eth;": "\xf0", "euml": "\xeb", "euml;": "\xeb", "euro;": "\u20ac", "excl;": "!", "exist;": "\u2203", "expectation;": "\u2130", "exponentiale;": "\u2147", "fallingdotseq;": "\u2252", "fcy;": "\u0444", "female;": "\u2640", "ffilig;": "\ufb03", "fflig;": "\ufb00", "ffllig;": "\ufb04", "ffr;": "\U0001d523", "filig;": "\ufb01", "fjlig;": "fj", "flat;": "\u266d", "fllig;": "\ufb02", "fltns;": "\u25b1", "fnof;": "\u0192", "fopf;": "\U0001d557", "forall;": "\u2200", "fork;": "\u22d4", "forkv;": "\u2ad9", "fpartint;": "\u2a0d", "frac12": "\xbd", "frac12;": "\xbd", "frac13;": "\u2153", "frac14": "\xbc", "frac14;": "\xbc", "frac15;": "\u2155", "frac16;": "\u2159", "frac18;": "\u215b", "frac23;": "\u2154", "frac25;": "\u2156", "frac34": "\xbe", "frac34;": "\xbe", "frac35;": "\u2157", "frac38;": "\u215c", "frac45;": "\u2158", "frac56;": "\u215a", "frac58;": "\u215d", "frac78;": "\u215e", "frasl;": "\u2044", "frown;": "\u2322", "fscr;": "\U0001d4bb", "gE;": "\u2267", "gEl;": "\u2a8c", "gacute;": "\u01f5", "gamma;": "\u03b3", "gammad;": "\u03dd", "gap;": "\u2a86", "gbreve;": "\u011f", "gcirc;": "\u011d", "gcy;": "\u0433", "gdot;": "\u0121", "ge;": "\u2265", "gel;": "\u22db", "geq;": "\u2265", "geqq;": "\u2267", "geqslant;": "\u2a7e", "ges;": "\u2a7e", "gescc;": "\u2aa9", "gesdot;": "\u2a80", "gesdoto;": "\u2a82", "gesdotol;": "\u2a84", "gesl;": "\u22db\ufe00", "gesles;": "\u2a94", "gfr;": "\U0001d524", "gg;": "\u226b", "ggg;": "\u22d9", "gimel;": "\u2137", "gjcy;": "\u0453", "gl;": "\u2277", "glE;": "\u2a92", "gla;": "\u2aa5", "glj;": "\u2aa4", "gnE;": "\u2269", "gnap;": "\u2a8a", "gnapprox;": "\u2a8a", "gne;": "\u2a88", "gneq;": "\u2a88", "gneqq;": "\u2269", "gnsim;": "\u22e7", "gopf;": "\U0001d558", "grave;": "`", "gscr;": "\u210a", "gsim;": "\u2273", "gsime;": "\u2a8e", "gsiml;": "\u2a90", "gt": ">", "gt;": ">", "gtcc;": "\u2aa7", "gtcir;": "\u2a7a", "gtdot;": "\u22d7", "gtlPar;": "\u2995", "gtquest;": "\u2a7c", "gtrapprox;": "\u2a86", "gtrarr;": "\u2978", "gtrdot;": "\u22d7", "gtreqless;": "\u22db", "gtreqqless;": "\u2a8c", "gtrless;": "\u2277", "gtrsim;": "\u2273", "gvertneqq;": "\u2269\ufe00", "gvnE;": "\u2269\ufe00", "hArr;": "\u21d4", "hairsp;": "\u200a", "half;": "\xbd", "hamilt;": "\u210b", "hardcy;": "\u044a", "harr;": "\u2194", "harrcir;": "\u2948", "harrw;": "\u21ad", "hbar;": "\u210f", "hcirc;": "\u0125", "hearts;": "\u2665", "heartsuit;": "\u2665", "hellip;": "\u2026", "hercon;": "\u22b9", "hfr;": "\U0001d525", "hksearow;": "\u2925", "hkswarow;": "\u2926", "hoarr;": "\u21ff", "homtht;": "\u223b", "hookleftarrow;": "\u21a9", "hookrightarrow;": "\u21aa", "hopf;": "\U0001d559", "horbar;": "\u2015", "hscr;": "\U0001d4bd", "hslash;": "\u210f", "hstrok;": "\u0127", "hybull;": "\u2043", "hyphen;": "\u2010", "iacute": "\xed", "iacute;": "\xed", "ic;": "\u2063", "icirc": "\xee", "icirc;": "\xee", "icy;": "\u0438", "iecy;": "\u0435", "iexcl": "\xa1", "iexcl;": "\xa1", "iff;": "\u21d4", "ifr;": "\U0001d526", "igrave": "\xec", "igrave;": "\xec", "ii;": "\u2148", "iiiint;": "\u2a0c", "iiint;": "\u222d", "iinfin;": "\u29dc", "iiota;": "\u2129", "ijlig;": "\u0133", "imacr;": "\u012b", "image;": "\u2111", "imagline;": "\u2110", "imagpart;": "\u2111", "imath;": "\u0131", "imof;": "\u22b7", "imped;": "\u01b5", "in;": "\u2208", "incare;": "\u2105", "infin;": "\u221e", "infintie;": "\u29dd", "inodot;": "\u0131", "int;": "\u222b", "intcal;": "\u22ba", "integers;": "\u2124", "intercal;": "\u22ba", "intlarhk;": "\u2a17", "intprod;": "\u2a3c", "iocy;": "\u0451", "iogon;": "\u012f", "iopf;": "\U0001d55a", "iota;": "\u03b9", "iprod;": "\u2a3c", "iquest": "\xbf", "iquest;": "\xbf", "iscr;": "\U0001d4be", "isin;": "\u2208", "isinE;": "\u22f9", "isindot;": "\u22f5", "isins;": "\u22f4", "isinsv;": "\u22f3", "isinv;": "\u2208", "it;": "\u2062", "itilde;": "\u0129", "iukcy;": "\u0456", "iuml": "\xef", "iuml;": "\xef", "jcirc;": "\u0135", "jcy;": "\u0439", "jfr;": "\U0001d527", "jmath;": "\u0237", "jopf;": "\U0001d55b", "jscr;": "\U0001d4bf", "jsercy;": "\u0458", "jukcy;": "\u0454", "kappa;": "\u03ba", "kappav;": "\u03f0", "kcedil;": "\u0137", "kcy;": "\u043a", "kfr;": "\U0001d528", "kgreen;": "\u0138", "khcy;": "\u0445", "kjcy;": "\u045c", "kopf;": "\U0001d55c", "kscr;": "\U0001d4c0", "lAarr;": "\u21da", "lArr;": "\u21d0", "lAtail;": "\u291b", "lBarr;": "\u290e", "lE;": "\u2266", "lEg;": "\u2a8b", "lHar;": "\u2962", "lacute;": "\u013a", "laemptyv;": "\u29b4", "lagran;": "\u2112", "lambda;": "\u03bb", "lang;": "\u27e8", "langd;": "\u2991", "langle;": "\u27e8", "lap;": "\u2a85", "laquo": "\xab", "laquo;": "\xab", "larr;": "\u2190", "larrb;": "\u21e4", "larrbfs;": "\u291f", "larrfs;": "\u291d", "larrhk;": "\u21a9", "larrlp;": "\u21ab", "larrpl;": "\u2939", "larrsim;": "\u2973", "larrtl;": "\u21a2", "lat;": "\u2aab", "latail;": "\u2919", "late;": "\u2aad", "lates;": "\u2aad\ufe00", "lbarr;": "\u290c", "lbbrk;": "\u2772", "lbrace;": "{", "lbrack;": "[", "lbrke;": "\u298b", "lbrksld;": "\u298f", "lbrkslu;": "\u298d", "lcaron;": "\u013e", "lcedil;": "\u013c", "lceil;": "\u2308", "lcub;": "{", "lcy;": "\u043b", "ldca;": "\u2936", "ldquo;": "\u201c", "ldquor;": "\u201e", "ldrdhar;": "\u2967", "ldrushar;": "\u294b", "ldsh;": "\u21b2", "le;": "\u2264", "leftarrow;": "\u2190", "leftarrowtail;": "\u21a2", "leftharpoondown;": "\u21bd", "leftharpoonup;": "\u21bc", "leftleftarrows;": "\u21c7", "leftrightarrow;": "\u2194", "leftrightarrows;": "\u21c6", "leftrightharpoons;": "\u21cb", "leftrightsquigarrow;": "\u21ad", "leftthreetimes;": "\u22cb", "leg;": "\u22da", "leq;": "\u2264", "leqq;": "\u2266", "leqslant;": "\u2a7d", "les;": "\u2a7d", "lescc;": "\u2aa8", "lesdot;": "\u2a7f", "lesdoto;": "\u2a81", "lesdotor;": "\u2a83", "lesg;": "\u22da\ufe00", "lesges;": "\u2a93", "lessapprox;": "\u2a85", "lessdot;": "\u22d6", "lesseqgtr;": "\u22da", "lesseqqgtr;": "\u2a8b", "lessgtr;": "\u2276", "lesssim;": "\u2272", "lfisht;": "\u297c", "lfloor;": "\u230a", "lfr;": "\U0001d529", "lg;": "\u2276", "lgE;": "\u2a91", "lhard;": "\u21bd", "lharu;": "\u21bc", "lharul;": "\u296a", "lhblk;": "\u2584", "ljcy;": "\u0459", "ll;": "\u226a", "llarr;": "\u21c7", "llcorner;": "\u231e", "llhard;": "\u296b", "lltri;": "\u25fa", "lmidot;": "\u0140", "lmoust;": "\u23b0", "lmoustache;": "\u23b0", "lnE;": "\u2268", "lnap;": "\u2a89", "lnapprox;": "\u2a89", "lne;": "\u2a87", "lneq;": "\u2a87", "lneqq;": "\u2268", "lnsim;": "\u22e6", "loang;": "\u27ec", "loarr;": "\u21fd", "lobrk;": "\u27e6", "longleftarrow;": "\u27f5", "longleftrightarrow;": "\u27f7", "longmapsto;": "\u27fc", "longrightarrow;": "\u27f6", "looparrowleft;": "\u21ab", "looparrowright;": "\u21ac", "lopar;": "\u2985", "lopf;": "\U0001d55d", "loplus;": "\u2a2d", "lotimes;": "\u2a34", "lowast;": "\u2217", "lowbar;": "_", "loz;": "\u25ca", "lozenge;": "\u25ca", "lozf;": "\u29eb", "lpar;": "(", "lparlt;": "\u2993", "lrarr;": "\u21c6", "lrcorner;": "\u231f", "lrhar;": "\u21cb", "lrhard;": "\u296d", "lrm;": "\u200e", "lrtri;": "\u22bf", "lsaquo;": "\u2039", "lscr;": "\U0001d4c1", "lsh;": "\u21b0", "lsim;": "\u2272", "lsime;": "\u2a8d", "lsimg;": "\u2a8f", "lsqb;": "[", "lsquo;": "\u2018", "lsquor;": "\u201a", "lstrok;": "\u0142", "lt": "<", "lt;": "<", "ltcc;": "\u2aa6", "ltcir;": "\u2a79", "ltdot;": "\u22d6", "lthree;": "\u22cb", "ltimes;": "\u22c9", "ltlarr;": "\u2976", "ltquest;": "\u2a7b", "ltrPar;": "\u2996", "ltri;": "\u25c3", "ltrie;": "\u22b4", "ltrif;": "\u25c2", "lurdshar;": "\u294a", "luruhar;": "\u2966", "lvertneqq;": "\u2268\ufe00", "lvnE;": "\u2268\ufe00", "mDDot;": "\u223a", "macr": "\xaf", "macr;": "\xaf", "male;": "\u2642", "malt;": "\u2720", "maltese;": "\u2720", "map;": "\u21a6", "mapsto;": "\u21a6", "mapstodown;": "\u21a7", "mapstoleft;": "\u21a4", "mapstoup;": "\u21a5", "marker;": "\u25ae", "mcomma;": "\u2a29", "mcy;": "\u043c", "mdash;": "\u2014", "measuredangle;": "\u2221", "mfr;": "\U0001d52a", "mho;": "\u2127", "micro": "\xb5", "micro;": "\xb5", "mid;": "\u2223", "midast;": "*", "midcir;": "\u2af0", "middot": "\xb7", "middot;": "\xb7", "minus;": "\u2212", "minusb;": "\u229f", "minusd;": "\u2238", "minusdu;": "\u2a2a", "mlcp;": "\u2adb", "mldr;": "\u2026", "mnplus;": "\u2213", "models;": "\u22a7", "mopf;": "\U0001d55e", "mp;": "\u2213", "mscr;": "\U0001d4c2", "mstpos;": "\u223e", "mu;": "\u03bc", "multimap;": "\u22b8", "mumap;": "\u22b8", "nGg;": "\u22d9\u0338", "nGt;": "\u226b\u20d2", "nGtv;": "\u226b\u0338", "nLeftarrow;": "\u21cd", "nLeftrightarrow;": "\u21ce", "nLl;": "\u22d8\u0338", "nLt;": "\u226a\u20d2", "nLtv;": "\u226a\u0338", "nRightarrow;": "\u21cf", "nVDash;": "\u22af", "nVdash;": "\u22ae", "nabla;": "\u2207", "nacute;": "\u0144", "nang;": "\u2220\u20d2", "nap;": "\u2249", "napE;": "\u2a70\u0338", "napid;": "\u224b\u0338", "napos;": "\u0149", "napprox;": "\u2249", "natur;": "\u266e", "natural;": "\u266e", "naturals;": "\u2115", "nbsp": "\xa0", "nbsp;": "\xa0", "nbump;": "\u224e\u0338", "nbumpe;": "\u224f\u0338", "ncap;": "\u2a43", "ncaron;": "\u0148", "ncedil;": "\u0146", "ncong;": "\u2247", "ncongdot;": "\u2a6d\u0338", "ncup;": "\u2a42", "ncy;": "\u043d", "ndash;": "\u2013", "ne;": "\u2260", "neArr;": "\u21d7", "nearhk;": "\u2924", "nearr;": "\u2197", "nearrow;": "\u2197", "nedot;": "\u2250\u0338", "nequiv;": "\u2262", "nesear;": "\u2928", "nesim;": "\u2242\u0338", "nexist;": "\u2204", "nexists;": "\u2204", "nfr;": "\U0001d52b", "ngE;": "\u2267\u0338", "nge;": "\u2271", "ngeq;": "\u2271", "ngeqq;": "\u2267\u0338", "ngeqslant;": "\u2a7e\u0338", "nges;": "\u2a7e\u0338", "ngsim;": "\u2275", "ngt;": "\u226f", "ngtr;": "\u226f", "nhArr;": "\u21ce", "nharr;": "\u21ae", "nhpar;": "\u2af2", "ni;": "\u220b", "nis;": "\u22fc", "nisd;": "\u22fa", "niv;": "\u220b", "njcy;": "\u045a", "nlArr;": "\u21cd", "nlE;": "\u2266\u0338", "nlarr;": "\u219a", "nldr;": "\u2025", "nle;": "\u2270", "nleftarrow;": "\u219a", "nleftrightarrow;": "\u21ae", "nleq;": "\u2270", "nleqq;": "\u2266\u0338", "nleqslant;": "\u2a7d\u0338", "nles;": "\u2a7d\u0338", "nless;": "\u226e", "nlsim;": "\u2274", "nlt;": "\u226e", "nltri;": "\u22ea", "nltrie;": "\u22ec", "nmid;": "\u2224", "nopf;": "\U0001d55f", "not": "\xac", "not;": "\xac", "notin;": "\u2209", "notinE;": "\u22f9\u0338", "notindot;": "\u22f5\u0338", "notinva;": "\u2209", "notinvb;": "\u22f7", "notinvc;": "\u22f6", "notni;": "\u220c", "notniva;": "\u220c", "notnivb;": "\u22fe", "notnivc;": "\u22fd", "npar;": "\u2226", "nparallel;": "\u2226", "nparsl;": "\u2afd\u20e5", "npart;": "\u2202\u0338", "npolint;": "\u2a14", "npr;": "\u2280", "nprcue;": "\u22e0", "npre;": "\u2aaf\u0338", "nprec;": "\u2280", "npreceq;": "\u2aaf\u0338", "nrArr;": "\u21cf", "nrarr;": "\u219b", "nrarrc;": "\u2933\u0338", "nrarrw;": "\u219d\u0338", "nrightarrow;": "\u219b", "nrtri;": "\u22eb", "nrtrie;": "\u22ed", "nsc;": "\u2281", "nsccue;": "\u22e1", "nsce;": "\u2ab0\u0338", "nscr;": "\U0001d4c3", "nshortmid;": "\u2224", "nshortparallel;": "\u2226", "nsim;": "\u2241", "nsime;": "\u2244", "nsimeq;": "\u2244", "nsmid;": "\u2224", "nspar;": "\u2226", "nsqsube;": "\u22e2", "nsqsupe;": "\u22e3", "nsub;": "\u2284", "nsubE;": "\u2ac5\u0338", "nsube;": "\u2288", "nsubset;": "\u2282\u20d2", "nsubseteq;": "\u2288", "nsubseteqq;": "\u2ac5\u0338", "nsucc;": "\u2281", "nsucceq;": "\u2ab0\u0338", "nsup;": "\u2285", "nsupE;": "\u2ac6\u0338", "nsupe;": "\u2289", "nsupset;": "\u2283\u20d2", "nsupseteq;": "\u2289", "nsupseteqq;": "\u2ac6\u0338", "ntgl;": "\u2279", "ntilde": "\xf1", "ntilde;": "\xf1", "ntlg;": "\u2278", "ntriangleleft;": "\u22ea", "ntrianglelefteq;": "\u22ec", "ntriangleright;": "\u22eb", "ntrianglerighteq;": "\u22ed", "nu;": "\u03bd", "num;": "#", "numero;": "\u2116", "numsp;": "\u2007", "nvDash;": "\u22ad", "nvHarr;": "\u2904", "nvap;": "\u224d\u20d2", "nvdash;": "\u22ac", "nvge;": "\u2265\u20d2", "nvgt;": ">\u20d2", "nvinfin;": "\u29de", "nvlArr;": "\u2902", "nvle;": "\u2264\u20d2", "nvlt;": "<\u20d2", "nvltrie;": "\u22b4\u20d2", "nvrArr;": "\u2903", "nvrtrie;": "\u22b5\u20d2", "nvsim;": "\u223c\u20d2", "nwArr;": "\u21d6", "nwarhk;": "\u2923", "nwarr;": "\u2196", "nwarrow;": "\u2196", "nwnear;": "\u2927", "oS;": "\u24c8", "oacute": "\xf3", "oacute;": "\xf3", "oast;": "\u229b", "ocir;": "\u229a", "ocirc": "\xf4", "ocirc;": "\xf4", "ocy;": "\u043e", "odash;": "\u229d", "odblac;": "\u0151", "odiv;": "\u2a38", "odot;": "\u2299", "odsold;": "\u29bc", "oelig;": "\u0153", "ofcir;": "\u29bf", "ofr;": "\U0001d52c", "ogon;": "\u02db", "ograve": "\xf2", "ograve;": "\xf2", "ogt;": "\u29c1", "ohbar;": "\u29b5", "ohm;": "\u03a9", "oint;": "\u222e", "olarr;": "\u21ba", "olcir;": "\u29be", "olcross;": "\u29bb", "oline;": "\u203e", "olt;": "\u29c0", "omacr;": "\u014d", "omega;": "\u03c9", "omicron;": "\u03bf", "omid;": "\u29b6", "ominus;": "\u2296", "oopf;": "\U0001d560", "opar;": "\u29b7", "operp;": "\u29b9", "oplus;": "\u2295", "or;": "\u2228", "orarr;": "\u21bb", "ord;": "\u2a5d", "order;": "\u2134", "orderof;": "\u2134", "ordf": "\xaa", "ordf;": "\xaa", "ordm": "\xba", "ordm;": "\xba", "origof;": "\u22b6", "oror;": "\u2a56", "orslope;": "\u2a57", "orv;": "\u2a5b", "oscr;": "\u2134", "oslash": "\xf8", "oslash;": "\xf8", "osol;": "\u2298", "otilde": "\xf5", "otilde;": "\xf5", "otimes;": "\u2297", "otimesas;": "\u2a36", "ouml": "\xf6", "ouml;": "\xf6", "ovbar;": "\u233d", "par;": "\u2225", "para": "\xb6", "para;": "\xb6", "parallel;": "\u2225", "parsim;": "\u2af3", "parsl;": "\u2afd", "part;": "\u2202", "pcy;": "\u043f", "percnt;": "%", "period;": ".", "permil;": "\u2030", "perp;": "\u22a5", "pertenk;": "\u2031", "pfr;": "\U0001d52d", "phi;": "\u03c6", "phiv;": "\u03d5", "phmmat;": "\u2133", "phone;": "\u260e", "pi;": "\u03c0", "pitchfork;": "\u22d4", "piv;": "\u03d6", "planck;": "\u210f", "planckh;": "\u210e", "plankv;": "\u210f", "plus;": "+", "plusacir;": "\u2a23", "plusb;": "\u229e", "pluscir;": "\u2a22", "plusdo;": "\u2214", "plusdu;": "\u2a25", "pluse;": "\u2a72", "plusmn": "\xb1", "plusmn;": "\xb1", "plussim;": "\u2a26", "plustwo;": "\u2a27", "pm;": "\xb1", "pointint;": "\u2a15", "popf;": "\U0001d561", "pound": "\xa3", "pound;": "\xa3", "pr;": "\u227a", "prE;": "\u2ab3", "prap;": "\u2ab7", "prcue;": "\u227c", "pre;": "\u2aaf", "prec;": "\u227a", "precapprox;": "\u2ab7", "preccurlyeq;": "\u227c", "preceq;": "\u2aaf", "precnapprox;": "\u2ab9", "precneqq;": "\u2ab5", "precnsim;": "\u22e8", "precsim;": "\u227e", "prime;": "\u2032", "primes;": "\u2119", "prnE;": "\u2ab5", "prnap;": "\u2ab9", "prnsim;": "\u22e8", "prod;": "\u220f", "profalar;": "\u232e", "profline;": "\u2312", "profsurf;": "\u2313", "prop;": "\u221d", "propto;": "\u221d", "prsim;": "\u227e", "prurel;": "\u22b0", "pscr;": "\U0001d4c5", "psi;": "\u03c8", "puncsp;": "\u2008", "qfr;": "\U0001d52e", "qint;": "\u2a0c", "qopf;": "\U0001d562", "qprime;": "\u2057", "qscr;": "\U0001d4c6", "quaternions;": "\u210d", "quatint;": "\u2a16", "quest;": "?", "questeq;": "\u225f", "quot": "\"", "quot;": "\"", "rAarr;": "\u21db", "rArr;": "\u21d2", "rAtail;": "\u291c", "rBarr;": "\u290f", "rHar;": "\u2964", "race;": "\u223d\u0331", "racute;": "\u0155", "radic;": "\u221a", "raemptyv;": "\u29b3", "rang;": "\u27e9", "rangd;": "\u2992", "range;": "\u29a5", "rangle;": "\u27e9", "raquo": "\xbb", "raquo;": "\xbb", "rarr;": "\u2192", "rarrap;": "\u2975", "rarrb;": "\u21e5", "rarrbfs;": "\u2920", "rarrc;": "\u2933", "rarrfs;": "\u291e", "rarrhk;": "\u21aa", "rarrlp;": "\u21ac", "rarrpl;": "\u2945", "rarrsim;": "\u2974", "rarrtl;": "\u21a3", "rarrw;": "\u219d", "ratail;": "\u291a", "ratio;": "\u2236", "rationals;": "\u211a", "rbarr;": "\u290d", "rbbrk;": "\u2773", "rbrace;": "}", "rbrack;": "]", "rbrke;": "\u298c", "rbrksld;": "\u298e", "rbrkslu;": "\u2990", "rcaron;": "\u0159", "rcedil;": "\u0157", "rceil;": "\u2309", "rcub;": "}", "rcy;": "\u0440", "rdca;": "\u2937", "rdldhar;": "\u2969", "rdquo;": "\u201d", "rdquor;": "\u201d", "rdsh;": "\u21b3", "real;": "\u211c", "realine;": "\u211b", "realpart;": "\u211c", "reals;": "\u211d", "rect;": "\u25ad", "reg": "\xae", "reg;": "\xae", "rfisht;": "\u297d", "rfloor;": "\u230b", "rfr;": "\U0001d52f", "rhard;": "\u21c1", "rharu;": "\u21c0", "rharul;": "\u296c", "rho;": "\u03c1", "rhov;": "\u03f1", "rightarrow;": "\u2192", "rightarrowtail;": "\u21a3", "rightharpoondown;": "\u21c1", "rightharpoonup;": "\u21c0", "rightleftarrows;": "\u21c4", "rightleftharpoons;": "\u21cc", "rightrightarrows;": "\u21c9", "rightsquigarrow;": "\u219d", "rightthreetimes;": "\u22cc", "ring;": "\u02da", "risingdotseq;": "\u2253", "rlarr;": "\u21c4", "rlhar;": "\u21cc", "rlm;": "\u200f", "rmoust;": "\u23b1", "rmoustache;": "\u23b1", "rnmid;": "\u2aee", "roang;": "\u27ed", "roarr;": "\u21fe", "robrk;": "\u27e7", "ropar;": "\u2986", "ropf;": "\U0001d563", "roplus;": "\u2a2e", "rotimes;": "\u2a35", "rpar;": ")", "rpargt;": "\u2994", "rppolint;": "\u2a12", "rrarr;": "\u21c9", "rsaquo;": "\u203a", "rscr;": "\U0001d4c7", "rsh;": "\u21b1", "rsqb;": "]", "rsquo;": "\u2019", "rsquor;": "\u2019", "rthree;": "\u22cc", "rtimes;": "\u22ca", "rtri;": "\u25b9", "rtrie;": "\u22b5", "rtrif;": "\u25b8", "rtriltri;": "\u29ce", "ruluhar;": "\u2968", "rx;": "\u211e", "sacute;": "\u015b", "sbquo;": "\u201a", "sc;": "\u227b", "scE;": "\u2ab4", "scap;": "\u2ab8", "scaron;": "\u0161", "sccue;": "\u227d", "sce;": "\u2ab0", "scedil;": "\u015f", "scirc;": "\u015d", "scnE;": "\u2ab6", "scnap;": "\u2aba", "scnsim;": "\u22e9", "scpolint;": "\u2a13", "scsim;": "\u227f", "scy;": "\u0441", "sdot;": "\u22c5", "sdotb;": "\u22a1", "sdote;": "\u2a66", "seArr;": "\u21d8", "searhk;": "\u2925", "searr;": "\u2198", "searrow;": "\u2198", "sect": "\xa7", "sect;": "\xa7", "semi;": ";", "seswar;": "\u2929", "setminus;": "\u2216", "setmn;": "\u2216", "sext;": "\u2736", "sfr;": "\U0001d530", "sfrown;": "\u2322", "sharp;": "\u266f", "shchcy;": "\u0449", "shcy;": "\u0448", "shortmid;": "\u2223", "shortparallel;": "\u2225", "shy": "\xad", "shy;": "\xad", "sigma;": "\u03c3", "sigmaf;": "\u03c2", "sigmav;": "\u03c2", "sim;": "\u223c", "simdot;": "\u2a6a", "sime;": "\u2243", "simeq;": "\u2243", "simg;": "\u2a9e", "simgE;": "\u2aa0", "siml;": "\u2a9d", "simlE;": "\u2a9f", "simne;": "\u2246", "simplus;": "\u2a24", "simrarr;": "\u2972", "slarr;": "\u2190", "smallsetminus;": "\u2216", "smashp;": "\u2a33", "smeparsl;": "\u29e4", "smid;": "\u2223", "smile;": "\u2323", "smt;": "\u2aaa", "smte;": "\u2aac", "smtes;": "\u2aac\ufe00", "softcy;": "\u044c", "sol;": "/", "solb;": "\u29c4", "solbar;": "\u233f", "sopf;": "\U0001d564", "spades;": "\u2660", "spadesuit;": "\u2660", "spar;": "\u2225", "sqcap;": "\u2293", "sqcaps;": "\u2293\ufe00", "sqcup;": "\u2294", "sqcups;": "\u2294\ufe00", "sqsub;": "\u228f", "sqsube;": "\u2291", "sqsubset;": "\u228f", "sqsubseteq;": "\u2291", "sqsup;": "\u2290", "sqsupe;": "\u2292", "sqsupset;": "\u2290", "sqsupseteq;": "\u2292", "squ;": "\u25a1", "square;": "\u25a1", "squarf;": "\u25aa", "squf;": "\u25aa", "srarr;": "\u2192", "sscr;": "\U0001d4c8", "ssetmn;": "\u2216", "ssmile;": "\u2323", "sstarf;": "\u22c6", "star;": "\u2606", "starf;": "\u2605", "straightepsilon;": "\u03f5", "straightphi;": "\u03d5", "strns;": "\xaf", "sub;": "\u2282", "subE;": "\u2ac5", "subdot;": "\u2abd", "sube;": "\u2286", "subedot;": "\u2ac3", "submult;": "\u2ac1", "subnE;": "\u2acb", "subne;": "\u228a", "subplus;": "\u2abf", "subrarr;": "\u2979", "subset;": "\u2282", "subseteq;": "\u2286", "subseteqq;": "\u2ac5", "subsetneq;": "\u228a", "subsetneqq;": "\u2acb", "subsim;": "\u2ac7", "subsub;": "\u2ad5", "subsup;": "\u2ad3", "succ;": "\u227b", "succapprox;": "\u2ab8", "succcurlyeq;": "\u227d", "succeq;": "\u2ab0", "succnapprox;": "\u2aba", "succneqq;": "\u2ab6", "succnsim;": "\u22e9", "succsim;": "\u227f", "sum;": "\u2211", "sung;": "\u266a", "sup1": "\xb9", "sup1;": "\xb9", "sup2": "\xb2", "sup2;": "\xb2", "sup3": "\xb3", "sup3;": "\xb3", "sup;": "\u2283", "supE;": "\u2ac6", "supdot;": "\u2abe", "supdsub;": "\u2ad8", "supe;": "\u2287", "supedot;": "\u2ac4", "suphsol;": "\u27c9", "suphsub;": "\u2ad7", "suplarr;": "\u297b", "supmult;": "\u2ac2", "supnE;": "\u2acc", "supne;": "\u228b", "supplus;": "\u2ac0", "supset;": "\u2283", "supseteq;": "\u2287", "supseteqq;": "\u2ac6", "supsetneq;": "\u228b", "supsetneqq;": "\u2acc", "supsim;": "\u2ac8", "supsub;": "\u2ad4", "supsup;": "\u2ad6", "swArr;": "\u21d9", "swarhk;": "\u2926", "swarr;": "\u2199", "swarrow;": "\u2199", "swnwar;": "\u292a", "szlig": "\xdf", "szlig;": "\xdf", "target;": "\u2316", "tau;": "\u03c4", "tbrk;": "\u23b4", "tcaron;": "\u0165", "tcedil;": "\u0163", "tcy;": "\u0442", "tdot;": "\u20db", "telrec;": "\u2315", "tfr;": "\U0001d531", "there4;": "\u2234", "therefore;": "\u2234", "theta;": "\u03b8", "thetasym;": "\u03d1", "thetav;": "\u03d1", "thickapprox;": "\u2248", "thicksim;": "\u223c", "thinsp;": "\u2009", "thkap;": "\u2248", "thksim;": "\u223c", "thorn": "\xfe", "thorn;": "\xfe", "tilde;": "\u02dc", "times": "\xd7", "times;": "\xd7", "timesb;": "\u22a0", "timesbar;": "\u2a31", "timesd;": "\u2a30", "tint;": "\u222d", "toea;": "\u2928", "top;": "\u22a4", "topbot;": "\u2336", "topcir;": "\u2af1", "topf;": "\U0001d565", "topfork;": "\u2ada", "tosa;": "\u2929", "tprime;": "\u2034", "trade;": "\u2122", "triangle;": "\u25b5", "triangledown;": "\u25bf", "triangleleft;": "\u25c3", "trianglelefteq;": "\u22b4", "triangleq;": "\u225c", "triangleright;": "\u25b9", "trianglerighteq;": "\u22b5", "tridot;": "\u25ec", "trie;": "\u225c", "triminus;": "\u2a3a", "triplus;": "\u2a39", "trisb;": "\u29cd", "tritime;": "\u2a3b", "trpezium;": "\u23e2", "tscr;": "\U0001d4c9", "tscy;": "\u0446", "tshcy;": "\u045b", "tstrok;": "\u0167", "twixt;": "\u226c", "twoheadleftarrow;": "\u219e", "twoheadrightarrow;": "\u21a0", "uArr;": "\u21d1", "uHar;": "\u2963", "uacute": "\xfa", "uacute;": "\xfa", "uarr;": "\u2191", "ubrcy;": "\u045e", "ubreve;": "\u016d", "ucirc": "\xfb", "ucirc;": "\xfb", "ucy;": "\u0443", "udarr;": "\u21c5", "udblac;": "\u0171", "udhar;": "\u296e", "ufisht;": "\u297e", "ufr;": "\U0001d532", "ugrave": "\xf9", "ugrave;": "\xf9", "uharl;": "\u21bf", "uharr;": "\u21be", "uhblk;": "\u2580", "ulcorn;": "\u231c", "ulcorner;": "\u231c", "ulcrop;": "\u230f", "ultri;": "\u25f8", "umacr;": "\u016b", "uml": "\xa8", "uml;": "\xa8", "uogon;": "\u0173", "uopf;": "\U0001d566", "uparrow;": "\u2191", "updownarrow;": "\u2195", "upharpoonleft;": "\u21bf", "upharpoonright;": "\u21be", "uplus;": "\u228e", "upsi;": "\u03c5", "upsih;": "\u03d2", "upsilon;": "\u03c5", "upuparrows;": "\u21c8", "urcorn;": "\u231d", "urcorner;": "\u231d", "urcrop;": "\u230e", "uring;": "\u016f", "urtri;": "\u25f9", "uscr;": "\U0001d4ca", "utdot;": "\u22f0", "utilde;": "\u0169", "utri;": "\u25b5", "utrif;": "\u25b4", "uuarr;": "\u21c8", "uuml": "\xfc", "uuml;": "\xfc", "uwangle;": "\u29a7", "vArr;": "\u21d5", "vBar;": "\u2ae8", "vBarv;": "\u2ae9", "vDash;": "\u22a8", "vangrt;": "\u299c", "varepsilon;": "\u03f5", "varkappa;": "\u03f0", "varnothing;": "\u2205", "varphi;": "\u03d5", "varpi;": "\u03d6", "varpropto;": "\u221d", "varr;": "\u2195", "varrho;": "\u03f1", "varsigma;": "\u03c2", "varsubsetneq;": "\u228a\ufe00", "varsubsetneqq;": "\u2acb\ufe00", "varsupsetneq;": "\u228b\ufe00", "varsupsetneqq;": "\u2acc\ufe00", "vartheta;": "\u03d1", "vartriangleleft;": "\u22b2", "vartriangleright;": "\u22b3", "vcy;": "\u0432", "vdash;": "\u22a2", "vee;": "\u2228", "veebar;": "\u22bb", "veeeq;": "\u225a", "vellip;": "\u22ee", "verbar;": "|", "vert;": "|", "vfr;": "\U0001d533", "vltri;": "\u22b2", "vnsub;": "\u2282\u20d2", "vnsup;": "\u2283\u20d2", "vopf;": "\U0001d567", "vprop;": "\u221d", "vrtri;": "\u22b3", "vscr;": "\U0001d4cb", "vsubnE;": "\u2acb\ufe00", "vsubne;": "\u228a\ufe00", "vsupnE;": "\u2acc\ufe00", "vsupne;": "\u228b\ufe00", "vzigzag;": "\u299a", "wcirc;": "\u0175", "wedbar;": "\u2a5f", "wedge;": "\u2227", "wedgeq;": "\u2259", "weierp;": "\u2118", "wfr;": "\U0001d534", "wopf;": "\U0001d568", "wp;": "\u2118", "wr;": "\u2240", "wreath;": "\u2240", "wscr;": "\U0001d4cc", "xcap;": "\u22c2", "xcirc;": "\u25ef", "xcup;": "\u22c3", "xdtri;": "\u25bd", "xfr;": "\U0001d535", "xhArr;": "\u27fa", "xharr;": "\u27f7", "xi;": "\u03be", "xlArr;": "\u27f8", "xlarr;": "\u27f5", "xmap;": "\u27fc", "xnis;": "\u22fb", "xodot;": "\u2a00", "xopf;": "\U0001d569", "xoplus;": "\u2a01", "xotime;": "\u2a02", "xrArr;": "\u27f9", "xrarr;": "\u27f6", "xscr;": "\U0001d4cd", "xsqcup;": "\u2a06", "xuplus;": "\u2a04", "xutri;": "\u25b3", "xvee;": "\u22c1", "xwedge;": "\u22c0", "yacute": "\xfd", "yacute;": "\xfd", "yacy;": "\u044f", "ycirc;": "\u0177", "ycy;": "\u044b", "yen": "\xa5", "yen;": "\xa5", "yfr;": "\U0001d536", "yicy;": "\u0457", "yopf;": "\U0001d56a", "yscr;": "\U0001d4ce", "yucy;": "\u044e", "yuml": "\xff", "yuml;": "\xff", "zacute;": "\u017a", "zcaron;": "\u017e", "zcy;": "\u0437", "zdot;": "\u017c", "zeetrf;": "\u2128", "zeta;": "\u03b6", "zfr;": "\U0001d537", "zhcy;": "\u0436", "zigrarr;": "\u21dd", "zopf;": "\U0001d56b", "zscr;": "\U0001d4cf", "zwj;": "\u200d", "zwnj;": "\u200c", } replacementCharacters = { 0x0: "\uFFFD", 0x0d: "\u000D", 0x80: "\u20AC", 0x81: "\u0081", 0x81: "\u0081", 0x82: "\u201A", 0x83: "\u0192", 0x84: "\u201E", 0x85: "\u2026", 0x86: "\u2020", 0x87: "\u2021", 0x88: "\u02C6", 0x89: "\u2030", 0x8A: "\u0160", 0x8B: "\u2039", 0x8C: "\u0152", 0x8D: "\u008D", 0x8E: "\u017D", 0x8F: "\u008F", 0x90: "\u0090", 0x91: "\u2018", 0x92: "\u2019", 0x93: "\u201C", 0x94: "\u201D", 0x95: "\u2022", 0x96: "\u2013", 0x97: "\u2014", 0x98: "\u02DC", 0x99: "\u2122", 0x9A: "\u0161", 0x9B: "\u203A", 0x9C: "\u0153", 0x9D: "\u009D", 0x9E: "\u017E", 0x9F: "\u0178", } encodings = { '437': 'cp437', '850': 'cp850', '852': 'cp852', '855': 'cp855', '857': 'cp857', '860': 'cp860', '861': 'cp861', '862': 'cp862', '863': 'cp863', '865': 'cp865', '866': 'cp866', '869': 'cp869', 'ansix341968': 'ascii', 'ansix341986': 'ascii', 'arabic': 'iso8859-6', 'ascii': 'ascii', 'asmo708': 'iso8859-6', 'big5': 'big5', 'big5hkscs': 'big5hkscs', 'chinese': 'gbk', 'cp037': 'cp037', 'cp1026': 'cp1026', 'cp154': 'ptcp154', 'cp367': 'ascii', 'cp424': 'cp424', 'cp437': 'cp437', 'cp500': 'cp500', 'cp775': 'cp775', 'cp819': 'windows-1252', 'cp850': 'cp850', 'cp852': 'cp852', 'cp855': 'cp855', 'cp857': 'cp857', 'cp860': 'cp860', 'cp861': 'cp861', 'cp862': 'cp862', 'cp863': 'cp863', 'cp864': 'cp864', 'cp865': 'cp865', 'cp866': 'cp866', 'cp869': 'cp869', 'cp936': 'gbk', 'cpgr': 'cp869', 'cpis': 'cp861', 'csascii': 'ascii', 'csbig5': 'big5', 'cseuckr': 'cp949', 'cseucpkdfmtjapanese': 'euc_jp', 'csgb2312': 'gbk', 'cshproman8': 'hp-roman8', 'csibm037': 'cp037', 'csibm1026': 'cp1026', 'csibm424': 'cp424', 'csibm500': 'cp500', 'csibm855': 'cp855', 'csibm857': 'cp857', 'csibm860': 'cp860', 'csibm861': 'cp861', 'csibm863': 'cp863', 'csibm864': 'cp864', 'csibm865': 'cp865', 'csibm866': 'cp866', 'csibm869': 'cp869', 'csiso2022jp': 'iso2022_jp', 'csiso2022jp2': 'iso2022_jp_2', 'csiso2022kr': 'iso2022_kr', 'csiso58gb231280': 'gbk', 'csisolatin1': 'windows-1252', 'csisolatin2': 'iso8859-2', 'csisolatin3': 'iso8859-3', 'csisolatin4': 'iso8859-4', 'csisolatin5': 'windows-1254', 'csisolatin6': 'iso8859-10', 'csisolatinarabic': 'iso8859-6', 'csisolatincyrillic': 'iso8859-5', 'csisolatingreek': 'iso8859-7', 'csisolatinhebrew': 'iso8859-8', 'cskoi8r': 'koi8-r', 'csksc56011987': 'cp949', 'cspc775baltic': 'cp775', 'cspc850multilingual': 'cp850', 'cspc862latinhebrew': 'cp862', 'cspc8codepage437': 'cp437', 'cspcp852': 'cp852', 'csptcp154': 'ptcp154', 'csshiftjis': 'shift_jis', 'csunicode11utf7': 'utf-7', 'cyrillic': 'iso8859-5', 'cyrillicasian': 'ptcp154', 'ebcdiccpbe': 'cp500', 'ebcdiccpca': 'cp037', 'ebcdiccpch': 'cp500', 'ebcdiccphe': 'cp424', 'ebcdiccpnl': 'cp037', 'ebcdiccpus': 'cp037', 'ebcdiccpwt': 'cp037', 'ecma114': 'iso8859-6', 'ecma118': 'iso8859-7', 'elot928': 'iso8859-7', 'eucjp': 'euc_jp', 'euckr': 'cp949', 'extendedunixcodepackedformatforjapanese': 'euc_jp', 'gb18030': 'gb18030', 'gb2312': 'gbk', 'gb231280': 'gbk', 'gbk': 'gbk', 'greek': 'iso8859-7', 'greek8': 'iso8859-7', 'hebrew': 'iso8859-8', 'hproman8': 'hp-roman8', 'hzgb2312': 'hz', 'ibm037': 'cp037', 'ibm1026': 'cp1026', 'ibm367': 'ascii', 'ibm424': 'cp424', 'ibm437': 'cp437', 'ibm500': 'cp500', 'ibm775': 'cp775', 'ibm819': 'windows-1252', 'ibm850': 'cp850', 'ibm852': 'cp852', 'ibm855': 'cp855', 'ibm857': 'cp857', 'ibm860': 'cp860', 'ibm861': 'cp861', 'ibm862': 'cp862', 'ibm863': 'cp863', 'ibm864': 'cp864', 'ibm865': 'cp865', 'ibm866': 'cp866', 'ibm869': 'cp869', 'iso2022jp': 'iso2022_jp', 'iso2022jp2': 'iso2022_jp_2', 'iso2022kr': 'iso2022_kr', 'iso646irv1991': 'ascii', 'iso646us': 'ascii', 'iso88591': 'windows-1252', 'iso885910': 'iso8859-10', 'iso8859101992': 'iso8859-10', 'iso885911987': 'windows-1252', 'iso885913': 'iso8859-13', 'iso885914': 'iso8859-14', 'iso8859141998': 'iso8859-14', 'iso885915': 'iso8859-15', 'iso885916': 'iso8859-16', 'iso8859162001': 'iso8859-16', 'iso88592': 'iso8859-2', 'iso885921987': 'iso8859-2', 'iso88593': 'iso8859-3', 'iso885931988': 'iso8859-3', 'iso88594': 'iso8859-4', 'iso885941988': 'iso8859-4', 'iso88595': 'iso8859-5', 'iso885951988': 'iso8859-5', 'iso88596': 'iso8859-6', 'iso885961987': 'iso8859-6', 'iso88597': 'iso8859-7', 'iso885971987': 'iso8859-7', 'iso88598': 'iso8859-8', 'iso885981988': 'iso8859-8', 'iso88599': 'windows-1254', 'iso885991989': 'windows-1254', 'isoceltic': 'iso8859-14', 'isoir100': 'windows-1252', 'isoir101': 'iso8859-2', 'isoir109': 'iso8859-3', 'isoir110': 'iso8859-4', 'isoir126': 'iso8859-7', 'isoir127': 'iso8859-6', 'isoir138': 'iso8859-8', 'isoir144': 'iso8859-5', 'isoir148': 'windows-1254', 'isoir149': 'cp949', 'isoir157': 'iso8859-10', 'isoir199': 'iso8859-14', 'isoir226': 'iso8859-16', 'isoir58': 'gbk', 'isoir6': 'ascii', 'koi8r': 'koi8-r', 'koi8u': 'koi8-u', 'korean': 'cp949', 'ksc5601': 'cp949', 'ksc56011987': 'cp949', 'ksc56011989': 'cp949', 'l1': 'windows-1252', 'l10': 'iso8859-16', 'l2': 'iso8859-2', 'l3': 'iso8859-3', 'l4': 'iso8859-4', 'l5': 'windows-1254', 'l6': 'iso8859-10', 'l8': 'iso8859-14', 'latin1': 'windows-1252', 'latin10': 'iso8859-16', 'latin2': 'iso8859-2', 'latin3': 'iso8859-3', 'latin4': 'iso8859-4', 'latin5': 'windows-1254', 'latin6': 'iso8859-10', 'latin8': 'iso8859-14', 'latin9': 'iso8859-15', 'ms936': 'gbk', 'mskanji': 'shift_jis', 'pt154': 'ptcp154', 'ptcp154': 'ptcp154', 'r8': 'hp-roman8', 'roman8': 'hp-roman8', 'shiftjis': 'shift_jis', 'tis620': 'cp874', 'unicode11utf7': 'utf-7', 'us': 'ascii', 'usascii': 'ascii', 'utf16': 'utf-16', 'utf16be': 'utf-16-be', 'utf16le': 'utf-16-le', 'utf8': 'utf-8', 'windows1250': 'cp1250', 'windows1251': 'cp1251', 'windows1252': 'cp1252', 'windows1253': 'cp1253', 'windows1254': 'cp1254', 'windows1255': 'cp1255', 'windows1256': 'cp1256', 'windows1257': 'cp1257', 'windows1258': 'cp1258', 'windows936': 'gbk', 'x-x-big5': 'big5'} tokenTypes = { "Doctype": 0, "Characters": 1, "SpaceCharacters": 2, "StartTag": 3, "EndTag": 4, "EmptyTag": 5, "Comment": 6, "ParseError": 7 } tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"])) prefixes = dict([(v, k) for k, v in namespaces.items()]) prefixes["http://www.w3.org/1998/Math/MathML"] = "math" class DataLossWarning(UserWarning): pass class ReparseException(Exception): pass
apache-2.0
DVM-BITS-Pilani/BITS-BOSM-2015
bosm2015/events/migrations/0001_initial.py
2
1132
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import ckeditor.fields class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='EventNew', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=100)), ('content', ckeditor.fields.RichTextField()), ('description', models.CharField(max_length=140, blank=True)), ('icon', models.ImageField(upload_to=b'icons', blank=True)), ('date', models.CharField(default=b'TBA', max_length=100)), ('time', models.CharField(default=b'TBA', max_length=100)), ('venue', models.CharField(default=b'TBA', max_length=100)), ('endtime', models.CharField(default=b'TBA', max_length=100)), ], options={ 'verbose_name_plural': 'events', }, ), ]
gpl-2.0
ibinti/intellij-community
python/helpers/pycharm/django_manage_commands_provider/_xml.py
78
6083
# coding=utf-8 """ This module exports information about manage commands and options from django to PyCharm. Information is provided in XML (to prevent encoding troubles and simplify deserialization on java side). Right after xml declaration, before root tag it contains following comment: <!--jb pycharm data start--> Use it to make sure you found correct XML It does not have schema (yet!) but here is XML format it uses. <commandInfo-array> -- root <commandInfo args="args description" help="human readable text" name="command name"> -- info about command <option help="option help" numberOfArgs="number of values (nargs)" type="option_type (see below)"> -- one entry for each option <longNames>--each-for-one-long-opt-name</longNames> <shortNames>-each-for-one-short-name</shortNames> <choices>--each-for-one-available-value</choices> </option> </commandInfo> </commandInfo-array> "option_type" is only set if "numberOfArgs" > 0, and it can be: "int" (means integer), "choices" (means opt can have one of the values, provided in choices) or "str" that means "string" (option may have any value) Classes like DjangoCommandsInfo is used on Java side. TODO: Since Django 1.8 we can fetch much more info from argparse like positional argument names, nargs etc. Use it! """ from xml.dom import minidom from xml.dom.minidom import Element from _jb_utils import VersionAgnosticUtils __author__ = 'Ilya.Kazakevich' class XmlDumper(object): """" Creates an API to generate XML provided in this package. How to use: * dumper.start_command(..) * dumper.add_command_option(..) # optional * dumper.close_command() * print(dumper.xml) """ __command_info_tag = "commandInfo" # Name of main tag def __init__(self): self.__document = minidom.Document() self.__root = self.__document.createElement("{0}-array".format(XmlDumper.__command_info_tag)) self.__document.appendChild(self.__document.createComment("jb pycharm data start")) self.__document.appendChild(self.__root) self.__command_element = None def __create_text_array(self, parent, tag_name, values): """ Creates array of text elements and adds them to parent :type parent Element :type tag_name str :type values list of str :param parent destination to add new elements :param tag_name name tag to create to hold text :param values list of values to add """ for value in values: tag = self.__document.createElement(tag_name) text = self.__document.createTextNode(str(value)) tag.appendChild(text) parent.appendChild(tag) def start_command(self, command_name, command_help_text): """ Starts manage command :param command_name: command name :param command_help_text: command help """ assert not bool(self.__command_element), "Already in command" self.__command_element = self.__document.createElement(XmlDumper.__command_info_tag) self.__command_element.setAttribute("name", command_name) self.__command_element.setAttribute("help", command_help_text) self.__root.appendChild(self.__command_element) def set_arguments(self, command_args_text): """ Adds "arguments help" to command. TODO: Use real list of arguments instead of this text when people migrate to argparse (Dj. 1.8) :param command_args_text: command text for args :type command_args_text str """ assert bool(self.__command_element), "Not in a a command" self.__command_element.setAttribute("args", VersionAgnosticUtils().to_unicode(command_args_text)) def add_command_option(self, long_opt_names, short_opt_names, help_text, argument_info): """ Adds command option :param argument_info: None if option does not accept any arguments or tuple of (num_of_args, type_info) \ where num_of_args is int > 0 and type_info is str, representing type (only "int" and "string" are supported) \ or list of available types in case of choices :param long_opt_names: list of long opt names :param short_opt_names: list of short opt names :param help_text: help text :type long_opt_names iterable of str :type short_opt_names iterable of str :type help_text str :type argument_info tuple or None """ assert isinstance(self.__command_element, Element), "Add option in command only" option = self.__document.createElement("option") opt_type_to_report = None num_of_args = 0 if argument_info: (num_of_args, type_info) = argument_info if isinstance(type_info, list): self.__create_text_array(option, "choices", type_info) opt_type_to_report = "choices" else: opt_type_to_report = "int" if str(type_info) == "int" else "str" if long_opt_names: self.__create_text_array(option, "longNames", long_opt_names) if short_opt_names: self.__create_text_array(option, "shortNames", short_opt_names) if opt_type_to_report: option.setAttribute("type", opt_type_to_report) option.setAttribute("help", help_text) if num_of_args: option.setAttribute("numberOfArgs", str(num_of_args)) self.__command_element.appendChild(option) def close_command(self): """ Closes currently opened command """ assert bool(self.__command_element), "No command to close" self.__command_element = None pass @property def xml(self): """ :return: current commands as XML as described in package :rtype str """ document = self.__document.toxml(encoding="utf-8") return VersionAgnosticUtils().to_unicode(document.decode("utf-8") if isinstance(document, bytes) else document)
apache-2.0
redhat-openstack/django
django/contrib/formtools/tests/wizard/wizardtests/forms.py
313
2203
import os import tempfile from django import forms from django.contrib.auth.models import User from django.core.files.storage import FileSystemStorage from django.forms.formsets import formset_factory from django.forms.models import modelformset_factory from django.http import HttpResponse from django.template import Template, Context from django.contrib.auth.models import User from django.contrib.formtools.wizard.views import WizardView temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR')) temp_storage = FileSystemStorage(location=temp_storage_location) class Page1(forms.Form): name = forms.CharField(max_length=100) user = forms.ModelChoiceField(queryset=User.objects.all()) thirsty = forms.NullBooleanField() class Page2(forms.Form): address1 = forms.CharField(max_length=100) address2 = forms.CharField(max_length=100) file1 = forms.FileField() class Page3(forms.Form): random_crap = forms.CharField(max_length=100) Page4 = formset_factory(Page3, extra=2) class ContactWizard(WizardView): file_storage = temp_storage def done(self, form_list, **kwargs): c = Context({ 'form_list': [x.cleaned_data for x in form_list], 'all_cleaned_data': self.get_all_cleaned_data(), }) for form in self.form_list.keys(): c[form] = self.get_cleaned_data_for_step(form) c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail') return HttpResponse(Template('').render(c)) def get_context_data(self, form, **kwargs): context = super(ContactWizard, self).get_context_data(form, **kwargs) if self.storage.current_step == 'form2': context.update({'another_var': True}) return context class UserForm(forms.ModelForm): class Meta: model = User fields = ('username', 'email') UserFormSet = modelformset_factory(User, form=UserForm) class SessionContactWizard(ContactWizard): storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage' class CookieContactWizard(ContactWizard): storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
bsd-3-clause
zaeleus/rust
src/grammar/testparser.py
80
2564
#!/usr/bin/env python # # Copyright 2015 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. # # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # option. This file may not be copied, modified, or distributed # except according to those terms. # ignore-tidy-linelength import sys import os import subprocess import argparse # usage: testparser.py [-h] [-p PARSER [PARSER ...]] -s SOURCE_DIR # Parsers should read from stdin and return exit status 0 for a # successful parse, and nonzero for an unsuccessful parse parser = argparse.ArgumentParser() parser.add_argument('-p', '--parser', nargs='+') parser.add_argument('-s', '--source-dir', nargs=1, required=True) args = parser.parse_args(sys.argv[1:]) total = 0 ok = {} bad = {} for parser in args.parser: ok[parser] = 0 bad[parser] = [] devnull = open(os.devnull, 'w') print("\n") for base, dirs, files in os.walk(args.source_dir[0]): for f in filter(lambda p: p.endswith('.rs'), files): p = os.path.join(base, f) parse_fail = 'parse-fail' in p if sys.version_info.major == 3: lines = open(p, encoding='utf-8').readlines() else: lines = open(p).readlines() if any('ignore-test' in line or 'ignore-lexer-test' in line for line in lines): continue total += 1 for parser in args.parser: if subprocess.call(parser, stdin=open(p), stderr=subprocess.STDOUT, stdout=devnull) == 0: if parse_fail: bad[parser].append(p) else: ok[parser] += 1 else: if parse_fail: ok[parser] += 1 else: bad[parser].append(p) parser_stats = ', '.join(['{}: {}'.format(parser, ok[parser]) for parser in args.parser]) sys.stdout.write("\033[K\r total: {}, {}, scanned {}" .format(total, os.path.relpath(parser_stats), os.path.relpath(p))) devnull.close() print("\n") for parser in args.parser: filename = os.path.basename(parser) + '.bad' print("writing {} files that did not yield the correct result with {} to {}".format(len(bad[parser]), parser, filename)) with open(filename, "w") as f: for p in bad[parser]: f.write(p) f.write("\n")
apache-2.0
Yipit/pyeqs
tests/functional/test_connection.py
1
1657
# -*- coding: utf-8 -*- from __future__ import unicode_literals from sure import scenario from pyeqs import QuerySet from tests.helpers import prepare_data, cleanup_data, add_document @scenario(prepare_data, cleanup_data) def test_simple_search_with_host_string(context): """ Connect with host string """ # When create a queryset t = QuerySet("localhost", index="foo") # And there are records add_document("foo", {"bar": "baz"}) # And I do a search results = t[0:1] # Then I get a the expected results len(results).should.equal(1) results[0]['_source'].should.equal({"bar": "baz"}) @scenario(prepare_data, cleanup_data) def test_simple_search_with_host_dict(context): """ Connect with host dict """ # When create a queryset connection_info = {"host": "localhost", "port": 9200} t = QuerySet(connection_info, index="foo") # And there are records add_document("foo", {"bar": "baz"}) # And I do a search results = t[0:1] # Then I get a the expected results len(results).should.equal(1) results[0]['_source'].should.equal({"bar": "baz"}) @scenario(prepare_data, cleanup_data) def test_simple_search_with_host_list(context): """ Connect with host list """ # When create a queryset connection_info = [{"host": "localhost", "port": 9200}] t = QuerySet(connection_info, index="foo") # And there are records add_document("foo", {"bar": "baz"}) # And I do a search results = t[0:1] # Then I get a the expected results len(results).should.equal(1) results[0]['_source'].should.equal({"bar": "baz"})
mit
jawilson/home-assistant
homeassistant/components/camera/bloomsky.py
28
2160
""" Support for a camera of a BloomSky weather station. For more details about this component, please refer to the documentation at https://home-assistant.io/components/camera.bloomsky/ """ import logging import requests from homeassistant.components.camera import Camera from homeassistant.loader import get_component DEPENDENCIES = ['bloomsky'] # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """Setup access to BloomSky cameras.""" bloomsky = get_component('bloomsky') for device in bloomsky.BLOOMSKY.devices.values(): add_devices([BloomSkyCamera(bloomsky.BLOOMSKY, device)]) class BloomSkyCamera(Camera): """Representation of the images published from the BloomSky's camera.""" def __init__(self, bs, device): """Setup for access to the BloomSky camera images.""" super(BloomSkyCamera, self).__init__() self._name = device['DeviceName'] self._id = device['DeviceID'] self._bloomsky = bs self._url = "" self._last_url = "" # _last_image will store images as they are downloaded so that the # frequent updates in home-assistant don't keep poking the server # to download the same image over and over. self._last_image = "" self._logger = logging.getLogger(__name__) def camera_image(self): """Update the camera's image if it has changed.""" try: self._url = self._bloomsky.devices[self._id]['Data']['ImageURL'] self._bloomsky.refresh_devices() # If the URL hasn't changed then the image hasn't changed. if self._url != self._last_url: response = requests.get(self._url, timeout=10) self._last_url = self._url self._last_image = response.content except requests.exceptions.RequestException as error: self._logger.error("Error getting bloomsky image: %s", error) return None return self._last_image @property def name(self): """Return the name of this BloomSky device.""" return self._name
mit
jsoref/django
tests/postgres_tests/test_aggregates.py
307
11910
from django.contrib.postgres.aggregates import ( ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, RegrAvgX, RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope, RegrSXX, RegrSXY, RegrSYY, StatAggregate, StringAgg, ) from django.db.models.expressions import F, Value from django.test.utils import Approximate from . import PostgreSQLTestCase from .models import AggregateTestModel, StatTestModel class TestGeneralAggregate(PostgreSQLTestCase): @classmethod def setUpTestData(cls): AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0) AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1) AggregateTestModel.objects.create(boolean_field=False, char_field='Foo3', integer_field=2) AggregateTestModel.objects.create(boolean_field=True, char_field='Foo4', integer_field=0) def test_array_agg_charfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field')) self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']}) def test_array_agg_integerfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field')) self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]}) def test_array_agg_booleanfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field')) self.assertEqual(values, {'arrayagg': [True, False, False, True]}) def test_array_agg_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field')) self.assertEqual(values, {'arrayagg': []}) values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field')) self.assertEqual(values, {'arrayagg': []}) values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field')) self.assertEqual(values, {'arrayagg': []}) def test_bit_and_general(self): values = AggregateTestModel.objects.filter( integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 0}) def test_bit_and_on_only_true_values(self): values = AggregateTestModel.objects.filter( integer_field=1).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 1}) def test_bit_and_on_only_false_values(self): values = AggregateTestModel.objects.filter( integer_field=0).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 0}) def test_bit_and_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': None}) def test_bit_or_general(self): values = AggregateTestModel.objects.filter( integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 1}) def test_bit_or_on_only_true_values(self): values = AggregateTestModel.objects.filter( integer_field=1).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 1}) def test_bit_or_on_only_false_values(self): values = AggregateTestModel.objects.filter( integer_field=0).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 0}) def test_bit_or_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': None}) def test_bool_and_general(self): values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field')) self.assertEqual(values, {'booland': False}) def test_bool_and_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field')) self.assertEqual(values, {'booland': None}) def test_bool_or_general(self): values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field')) self.assertEqual(values, {'boolor': True}) def test_bool_or_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field')) self.assertEqual(values, {'boolor': None}) def test_string_agg_requires_delimiter(self): with self.assertRaises(TypeError): AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field')) def test_string_agg_charfield(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';')) self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo3;Foo4'}) def test_string_agg_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';')) self.assertEqual(values, {'stringagg': ''}) class TestStatisticsAggregate(PostgreSQLTestCase): @classmethod def setUpTestData(cls): StatTestModel.objects.create( int1=1, int2=3, related_field=AggregateTestModel.objects.create(integer_field=0), ) StatTestModel.objects.create( int1=2, int2=2, related_field=AggregateTestModel.objects.create(integer_field=1), ) StatTestModel.objects.create( int1=3, int2=1, related_field=AggregateTestModel.objects.create(integer_field=2), ) # Tests for base class (StatAggregate) def test_missing_arguments_raises_exception(self): with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'): StatAggregate(x=None, y=None) def test_correct_source_expressions(self): func = StatAggregate(x='test', y=13) self.assertIsInstance(func.source_expressions[0], Value) self.assertIsInstance(func.source_expressions[1], F) def test_alias_is_required(self): class SomeFunc(StatAggregate): function = 'TEST' with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1')) # Test aggregates def test_corr_general(self): values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1')) self.assertEqual(values, {'corr': -1.0}) def test_corr_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1')) self.assertEqual(values, {'corr': None}) def test_covar_pop_general(self): values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1')) self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)}) def test_covar_pop_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1')) self.assertEqual(values, {'covarpop': None}) def test_covar_pop_sample(self): values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True)) self.assertEqual(values, {'covarpop': -1.0}) def test_covar_pop_sample_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True)) self.assertEqual(values, {'covarpop': None}) def test_regr_avgx_general(self): values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1')) self.assertEqual(values, {'regravgx': 2.0}) def test_regr_avgx_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1')) self.assertEqual(values, {'regravgx': None}) def test_regr_avgy_general(self): values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1')) self.assertEqual(values, {'regravgy': 2.0}) def test_regr_avgy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1')) self.assertEqual(values, {'regravgy': None}) def test_regr_count_general(self): values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1')) self.assertEqual(values, {'regrcount': 3}) def test_regr_count_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1')) self.assertEqual(values, {'regrcount': 0}) def test_regr_intercept_general(self): values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1')) self.assertEqual(values, {'regrintercept': 4}) def test_regr_intercept_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1')) self.assertEqual(values, {'regrintercept': None}) def test_regr_r2_general(self): values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1')) self.assertEqual(values, {'regrr2': 1}) def test_regr_r2_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1')) self.assertEqual(values, {'regrr2': None}) def test_regr_slope_general(self): values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1')) self.assertEqual(values, {'regrslope': -1}) def test_regr_slope_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1')) self.assertEqual(values, {'regrslope': None}) def test_regr_sxx_general(self): values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1')) self.assertEqual(values, {'regrsxx': 2.0}) def test_regr_sxx_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1')) self.assertEqual(values, {'regrsxx': None}) def test_regr_sxy_general(self): values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1')) self.assertEqual(values, {'regrsxy': -2.0}) def test_regr_sxy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1')) self.assertEqual(values, {'regrsxy': None}) def test_regr_syy_general(self): values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1')) self.assertEqual(values, {'regrsyy': 2.0}) def test_regr_syy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1')) self.assertEqual(values, {'regrsyy': None}) def test_regr_avgx_with_related_obj_and_number_as_argument(self): """ This is more complex test to check if JOIN on field and number as argument works as expected. """ values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field')) self.assertEqual(values, {'complex_regravgx': 1.0})
bsd-3-clause
xbot/alfred-pushbullet
lib/requests/packages/chardet/jisfreq.py
3131
47315
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # Sampling from about 20M text materials include literature and computer technology # # Japanese frequency table, applied to both S-JIS and EUC-JP # They are sorted in order. # 128 --> 0.77094 # 256 --> 0.85710 # 512 --> 0.92635 # 1024 --> 0.97130 # 2048 --> 0.99431 # # Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58 # Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191 # # Typical Distribution Ratio, 25% of IDR JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0 # Char to FreqOrder table , JIS_TABLE_SIZE = 4368 JISCharToFreqOrder = ( 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48 2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64 2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80 5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96 1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112 5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128 5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144 5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160 5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176 5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192 5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208 1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224 1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240 1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256 2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272 3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288 3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336 1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368 5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464 5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480 5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496 5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512 4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528 5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544 5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560 5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576 5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592 5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608 5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624 5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640 5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656 5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672 3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688 5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704 5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720 5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736 5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752 5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768 5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784 5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800 5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816 5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832 5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848 5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864 5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880 5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896 5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912 5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928 5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944 5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960 5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976 5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992 5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008 5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024 5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040 5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056 5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072 5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088 5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104 5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120 5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136 5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152 5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168 5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184 5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200 5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216 5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232 5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248 5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264 5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280 5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296 6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312 6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328 6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344 6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360 6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376 6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392 6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408 6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424 4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472 1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488 1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520 3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536 3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568 3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584 3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616 2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648 3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664 1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696 1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728 2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744 2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760 2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776 2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792 1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808 1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824 1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840 1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856 2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872 1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888 2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904 1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920 1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936 1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952 1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968 1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984 1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032 1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048 2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064 2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080 2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096 3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112 3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144 3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160 1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192 2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208 1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240 3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256 4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272 2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288 1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304 2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320 1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368 1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384 2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400 2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416 2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432 3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448 1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464 2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528 1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544 2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576 1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592 1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624 1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640 1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656 1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688 2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720 2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736 3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752 2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768 1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784 6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800 1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816 2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832 1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880 3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896 3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912 1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928 1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944 1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960 1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008 2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040 3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056 2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088 1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104 2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136 1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168 4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184 2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200 1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232 1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248 2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280 6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296 1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312 1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328 2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344 3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376 3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392 1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424 1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456 3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488 2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520 4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536 2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552 1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568 1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584 1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616 1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632 3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648 1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664 3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728 2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744 1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776 1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808 1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872 1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888 1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904 2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920 4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952 1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984 1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000 3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016 1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032 2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048 2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064 1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080 1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096 2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128 2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144 1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160 1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176 1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192 1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208 3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224 2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240 2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272 3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288 3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304 1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320 2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336 1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352 2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512 #Everything below is of no interest for detection purpose 2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384 6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400 6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416 6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432 6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448 4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464 4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480 3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496 3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512 4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528 3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544 6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560 4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576 6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592 6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608 6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624 6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640 6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656 6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672 3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688 3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704 6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720 2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736 4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752 4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768 4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784 6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800 3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816 4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832 4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848 6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864 4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880 6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896 3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912 2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928 4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944 2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960 6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976 4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992 6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008 6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024 6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040 4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056 6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072 2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088 6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104 4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120 6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136 4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152 4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168 6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184 6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200 6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216 3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232 1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248 3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264 3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280 4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296 6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312 3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328 6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344 3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360 3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376 2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392 6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408 6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424 3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440 6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456 3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472 6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488 6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504 6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520 4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536 6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552 4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568 3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584 3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600 6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616 6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632 4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648 6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664 6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680 6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696 6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712 6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728 6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744 4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760 4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776 3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792 6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808 4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824 2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840 6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856 6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872 4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888 2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904 4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920 2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936 4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952 4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968 4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984 6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000 3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016 6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032 3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048 6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064 2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080 3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096 7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112 2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128 3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144 3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160 3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176 3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192 7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208 7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224 7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240 7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256 7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272 4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288 3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304 3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320 4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336 3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352 3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368 7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384 4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400 7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416 7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432 7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448 7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464 7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480 4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496 4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512 7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528 3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544 4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560 7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576 7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592 4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608 3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624 3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640 7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656 4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672 4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688 4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704 4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720 4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736 4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752 7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768 7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784 7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800 7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816 7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832 2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848 3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864 7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880 7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896 3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912 4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928 3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944 3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960 2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976 7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992 7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008 4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024 3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040 3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056 7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072 7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088 7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104 4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120 7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136 2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152 3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168 4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184 7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200 4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216 4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232 7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248 7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264 5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280 7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296 7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312 7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328 7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344 7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360 5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376 5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392 7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408 3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424 7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440 7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456 3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472 7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488 7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504 1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520 3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536 4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552 2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568 3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584 2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600 5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616 4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632 4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648 5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664 7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680 7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696 7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712 7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728 3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744 7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760 3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776 7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792 4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808 7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824 7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840 7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856 7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872 7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888 7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904 7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920 7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936 7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952 7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968 7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984 7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000 8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016 8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032 8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048 8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064 8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080 8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096 8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112 8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128 8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144 8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160 8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176 8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192 8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208 8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224 8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240 8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256 8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272 # flake8: noqa
mit
SmartPeople/zulip
zerver/webhooks/stash/view.py
13
1862
# Webhooks for external integrations. from __future__ import absolute_import from django.http import HttpRequest, HttpResponse from django.utils.translation import ugettext as _ from zerver.models import get_client from zerver.lib.actions import check_send_message from zerver.lib.response import json_success, json_error from zerver.decorator import REQ, has_request_variables, authenticated_rest_api_view from zerver.models import UserProfile import ujson from typing import Any, Dict, Text @authenticated_rest_api_view(is_webhook=True) @has_request_variables def api_stash_webhook(request, user_profile, payload=REQ(argument_type='body'), stream=REQ(default='commits')): # type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse # We don't get who did the push, or we'd try to report that. try: repo_name = payload["repository"]["name"] project_name = payload["repository"]["project"]["name"] branch_name = payload["refChanges"][0]["refId"].split("/")[-1] commit_entries = payload["changesets"]["values"] commits = [(entry["toCommit"]["displayId"], entry["toCommit"]["message"].split("\n")[0]) for entry in commit_entries] head_ref = commit_entries[-1]["toCommit"]["displayId"] except KeyError as e: return json_error(_("Missing key %s in JSON") % (str(e),)) subject = "%s/%s: %s" % (project_name, repo_name, branch_name) content = "`%s` was pushed to **%s** in **%s/%s** with:\n\n" % ( head_ref, branch_name, project_name, repo_name) content += "\n".join("* `%s`: %s" % ( commit[0], commit[1]) for commit in commits) check_send_message(user_profile, get_client("ZulipStashWebhook"), "stream", [stream], subject, content) return json_success()
apache-2.0
ltsimps/metis_ros
vendor/googletest/googletest/scripts/common.py
1180
2919
# Copyright 2013 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Shared utilities for writing scripts for Google Test/Mock.""" __author__ = '[email protected] (Zhanyong Wan)' import os import re # Matches the line from 'svn info .' output that describes what SVN # path the current local directory corresponds to. For example, in # a googletest SVN workspace's trunk/test directory, the output will be: # # URL: https://googletest.googlecode.com/svn/trunk/test _SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)') def GetCommandOutput(command): """Runs the shell command and returns its stdout as a list of lines.""" f = os.popen(command, 'r') lines = [line.strip() for line in f.readlines()] f.close() return lines def GetSvnInfo(): """Returns the project name and the current SVN workspace's root path.""" for line in GetCommandOutput('svn info .'): m = _SVN_INFO_URL_RE.match(line) if m: project = m.group(1) # googletest or googlemock rel_path = m.group(2) root = os.path.realpath(rel_path.count('/') * '../') return project, root return None, None def GetSvnTrunk(): """Returns the current SVN workspace's trunk root path.""" _, root = GetSvnInfo() return root + '/trunk' if root else None def IsInGTestSvn(): project, _ = GetSvnInfo() return project == 'googletest' def IsInGMockSvn(): project, _ = GetSvnInfo() return project == 'googlemock'
mit
EarToEarOak/RTLSDR-Scanner
nsis/test_urls.py
3
1992
# # rtlsdr_scan # # http://eartoearoak.com/software/rtlsdr-scanner # # Copyright 2012 - 2015 Al Brown # # A frequency scanning GUI for the OsmoSDR rtl-sdr library at # http://sdr.osmocom.org/trac/wiki/rtl-sdr # # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import re from pip._vendor import requests def __load_nsis(): files = [f for f in os.listdir('.') if os.path.isfile(f)] files = [f for f in files if os.path.splitext(f)[1] == '.nsi' ] return files[0] def __find_urls(nsis): rePath = re.compile('StrCpy \$UriPath \"(http[s]{0,1}.*?)\".*?StrCpy \$UriFile \"(.*?)\"', re.DOTALL | re.MULTILINE) reInetc = re.compile('inetc::get \"(http[s]{0,1}.+?)\"', re.MULTILINE) f = open(nsis, 'r') data = f.read() matchPath = rePath.findall(data) urlsPath = [f + '/' + p for f, p in matchPath] urlsInetc = reInetc.findall(data) urlsPath.extend(urlsInetc) return urlsPath def __test_urls(urls): ok = True for url in urls: request = requests.head(url) ok &= request.ok print '{} - {} - {}'.format(request.ok, url, request.status_code) if ok: print 'Passed' else: print 'Failed' if __name__ == '__main__': print 'Testing installer URLs\n' nsis = __load_nsis() urls = __find_urls(nsis) __test_urls(urls)
gpl-3.0
projectexpert/pmis
analytic_resource_plan/wizard/resource_plan_line_make_purchase.py
1
6559
# Copyright 2019 LUXIM, Slovenia (Matjaž Mozetič) # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). from odoo import api, fields, models from odoo.tools.translate import _ from odoo.exceptions import ValidationError class ResourcePlanLineMakePurchase(models.TransientModel): _name = "resource.plan.line.make.purchase" _description = "Analytic resource plan line make purchase" @api.multi def default_partner(self): context = self.env.context case_id = context and context.get('active_ids', []) or [] case_id = case_id and case_id[0] or False crm_id = self.env['crm.lead'].browse(case_id) return crm_id and crm_id.partner_id.id or '' partner_id = fields.Many2one( comodel_name='res.partner', string='Customer', default=default_partner ) update_quotation = fields.Boolean( string='Update existing quotation' ) @api.multi def make_order(self): context = self.env.context case_id = context and context.get('active_ids', []) or [] case_id = case_id and case_id[0] or False crm_id = self.env['crm.lead'].browse(case_id) if self.update_quotation and crm_id and crm_id.order_ids: for order in crm_id.order_ids: if order.order_line: order.order_line.unlink() if crm_id and crm_id.account_id: partner = crm_id.partner_id purchase_order = self.env['purchase.order'] # TODO: check vendor pricelist for purchases field name pricelist = partner.property_product_pricelist.id partner_address = partner.address_get( [ 'default', 'invoice', 'delivery', 'contact' ] ) purchase_order_values = { 'partner_id': partner.id, 'opportunity_id': crm_id.id, 'partner_invoice_id': partner_address['invoice'], 'partner_shipping_id': partner_address['delivery'], 'date_order': fields.datetime.now(), } for resource in crm_id.account_id.resource_ids: purchase_order_values.update({ 'client_order_ref': ( resource.account_id.name), 'origin': resource.account_id.code, 'account_id': resource.account_id.id }) if resource: purchase_order_values.update({ 'pricelist_id': pricelist }) # if resource and crm_id.account_id.pricelist_id: # purchase_order_values.update({ # 'pricelist_id': resource.pricelist_id.id # }) # else: # purchase_order_values.update({ # 'pricelist_id': pricelist # }) order_id = purchase_order.create(purchase_order_values) order_lines = self.prepare_purchase_order_line(case_id, order_id.id) self.create_purchase_order_line(order_lines) return { 'domain': str([('id', 'in', [order_id.id])]), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'purchase.order', 'view_id': False, 'type': 'ir.actions.act_window', 'name': _('Quotation'), 'res_id': order_id.id } if crm_id and crm_id.order_ids: return { 'domain': str([('id', 'in', crm_id.order_ids.ids)]), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'purchase.order', 'view_id': False, 'type': 'ir.actions.act_window', 'name': _('Quotation'), 'res_ids': crm_id.order_ids.ids } def prepare_purchase_order_line(self, case_id, order_id): lines = [] case = self.env['crm.lead'].browse(case_id) order_id = self.env['purchase.order'].browse(order_id) linked_resources = ( case.account_id and case.account_id.resource_ids or [] ) if not linked_resources: raise ValidationError( _("There is no available resource to " "make purchase order!") ) for resource in linked_resources: if resource.state in 'draft': continue for resource_line in resource: vals = { 'order_id': order_id and order_id.id, 'product_id': resource_line.product_id.id, 'name': resource_line.name, 'product_qty': resource_line.unit_amount, 'product_uom': resource_line.product_uom_id.id, 'price_unit': resource_line.price_unit, 'date_planned': resource_line.date, 'account_analytic_id': resource_line.account_id.id, 'resource_id': self } lines.append(vals) return lines def create_purchase_order_line(self, order_lines): purchaseorder_line_obj = self.env['purchase.order.line'] for line in order_lines: purchaseorder_line_obj.create(line) # noinspection PyAttributeOutsideInit class CrmLead(models.Model): _inherit = "crm.lead" # project_id = fields.Many2one( # comodel_name='project.project', # string='Project', # ondelete='set null', # ) planned_cost_total = fields.Float( compute='_compute_planned_cost_total', string='Total planned cost' ) account_id = fields.Many2one( comodel_name='account.analytic.account', string='Project Account', ondelete='set null', ) @api.multi def _compute_planned_cost_total(self): self.ensure_one() self.planned_cost_total = sum( [resource.price_total for resource in self.account_id and self.account_id.resource_ids if resource.state not in 'draft']) @api.multi @api.onchange('account_id') def account_id_change(self): self.ensure_one() if self.account_id: self.partner_id = self.account_id.partner_id.id
agpl-3.0
robbiet480/home-assistant
homeassistant/components/zestimate/sensor.py
19
4377
"""Support for zestimate data from zillow.com.""" from datetime import timedelta import logging import requests import voluptuous as vol import xmltodict from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) _RESOURCE = "http://www.zillow.com/webservice/GetZestimate.htm" ATTRIBUTION = "Data provided by Zillow.com" CONF_ZPID = "zpid" DEFAULT_NAME = "Zestimate" NAME = "zestimate" ZESTIMATE = f"{DEFAULT_NAME}:{NAME}" ICON = "mdi:home-variant" ATTR_AMOUNT = "amount" ATTR_CHANGE = "amount_change_30_days" ATTR_CURRENCY = "amount_currency" ATTR_LAST_UPDATED = "amount_last_updated" ATTR_VAL_HI = "valuation_range_high" ATTR_VAL_LOW = "valuation_range_low" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_ZPID): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) SCAN_INTERVAL = timedelta(minutes=30) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Zestimate sensor.""" name = config.get(CONF_NAME) properties = config[CONF_ZPID] sensors = [] for zpid in properties: params = {"zws-id": config[CONF_API_KEY]} params["zpid"] = zpid sensors.append(ZestimateDataSensor(name, params)) add_entities(sensors, True) class ZestimateDataSensor(Entity): """Implementation of a Zestimate sensor.""" def __init__(self, name, params): """Initialize the sensor.""" self._name = name self.params = params self.data = None self.address = None self._state = None @property def unique_id(self): """Return the ZPID.""" return self.params["zpid"] @property def name(self): """Return the name of the sensor.""" return f"{self._name} {self.address}" @property def state(self): """Return the state of the sensor.""" try: return round(float(self._state), 1) except ValueError: return None @property def device_state_attributes(self): """Return the state attributes.""" attributes = {} if self.data is not None: attributes = self.data attributes["address"] = self.address attributes[ATTR_ATTRIBUTION] = ATTRIBUTION return attributes @property def icon(self): """Icon to use in the frontend, if any.""" return ICON def update(self): """Get the latest data and update the states.""" try: response = requests.get(_RESOURCE, params=self.params, timeout=5) data = response.content.decode("utf-8") data_dict = xmltodict.parse(data).get(ZESTIMATE) error_code = int(data_dict["message"]["code"]) if error_code != 0: _LOGGER.error("The API returned: %s", data_dict["message"]["text"]) return except requests.exceptions.ConnectionError: _LOGGER.error("Unable to retrieve data from %s", _RESOURCE) return data = data_dict["response"][NAME] details = {} if "amount" in data and data["amount"] is not None: details[ATTR_AMOUNT] = data["amount"]["#text"] details[ATTR_CURRENCY] = data["amount"]["@currency"] if "last-updated" in data and data["last-updated"] is not None: details[ATTR_LAST_UPDATED] = data["last-updated"] if "valueChange" in data and data["valueChange"] is not None: details[ATTR_CHANGE] = int(data["valueChange"]["#text"]) if "valuationRange" in data and data["valuationRange"] is not None: details[ATTR_VAL_HI] = int(data["valuationRange"]["high"]["#text"]) details[ATTR_VAL_LOW] = int(data["valuationRange"]["low"]["#text"]) self.address = data_dict["response"]["address"]["street"] self.data = details if self.data is not None: self._state = self.data[ATTR_AMOUNT] else: self._state = None _LOGGER.error("Unable to parase Zestimate data from response")
apache-2.0
batisteo/pasportaservo
blog/admin.py
3
1434
from django.contrib import admin from django.contrib.auth import get_user_model from django.utils.translation import ugettext_lazy as _ from .models import Post @admin.register(Post) class PostAdmin(admin.ModelAdmin): list_display = ['title', 'content', 'pub_date', 'published'] fields = ( ('title', 'slug', 'author'), 'content', 'body', 'description', 'pub_date', ) prepopulated_fields = {'slug': ('title',)} readonly_fields = ('body', 'description',) date_hierarchy = 'created' def published(self, obj): return obj.published published.short_description = _("Published") published.admin_order_field = 'pub_date' published.boolean = True def formfield_for_foreignkey(self, db_field, request, **kwargs): if db_field.name == 'author': kwargs['queryset'] = get_user_model().objects.filter(username=request.user.username) return super().formfield_for_foreignkey(db_field, request, **kwargs) def get_readonly_fields(self, request, obj=None): if obj is not None: return self.readonly_fields + ('author',) return self.readonly_fields def add_view(self, request, form_url="", extra_context=None): data = request.GET.copy() data['author'] = request.user request.GET = data return super().add_view(request, form_url="", extra_context=extra_context)
agpl-3.0
ridfrustum/lettuce
tests/integration/lib/Django-1.3/tests/regressiontests/modeladmin/tests.py
49
41260
from datetime import date from django import forms from django.conf import settings from django.contrib.admin.options import ModelAdmin, TabularInline, \ HORIZONTAL, VERTICAL from django.contrib.admin.sites import AdminSite from django.contrib.admin.validation import validate from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect from django.core.exceptions import ImproperlyConfigured from django.forms.models import BaseModelFormSet from django.forms.widgets import Select from django.test import TestCase from django.utils import unittest from models import Band, Concert, ValidationTestModel, \ ValidationTestInlineModel # None of the following tests really depend on the content of the request, # so we'll just pass in None. request = None class ModelAdminTests(TestCase): def setUp(self): self.band = Band.objects.create( name='The Doors', bio='', sign_date=date(1965, 1, 1), ) self.site = AdminSite() # form/fields/fieldsets interaction ############################## def test_default_fields(self): ma = ModelAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name', 'bio', 'sign_date']) def test_default_fieldsets(self): # fieldsets_add and fieldsets_change should return a special data structure that # is used in the templates. They should generate the "right thing" whether we # have specified a custom form, the fields argument, or nothing at all. # # Here's the default case. There are no custom form_add/form_change methods, # no fields argument, and no fieldsets argument. ma = ModelAdmin(Band, self.site) self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})]) self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})]) def test_field_arguments(self): # If we specify the fields argument, fieldsets_add and fielsets_change should # just stick the fields into a formsets structure and return it. class BandAdmin(ModelAdmin): fields = ['name'] ma = BandAdmin(Band, self.site) self.assertEqual( ma.get_fieldsets(request), [(None, {'fields': ['name']})]) self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})]) def test_field_arguments_restricted_on_form(self): # If we specify fields or fieldsets, it should exclude fields on the Form class # to the fields specified. This may cause errors to be raised in the db layer if # required model fields arent in fields/fieldsets, but that's preferable to # ghost errors where you have a field in your Form class that isn't being # displayed because you forgot to add it to fields/fieldsets # Using `fields`. class BandAdmin(ModelAdmin): fields = ['name'] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name']) self.assertEqual(ma.get_form(request, self.band).base_fields.keys(), ['name']) # Using `fieldsets`. class BandAdmin(ModelAdmin): fieldsets = [(None, {'fields': ['name']})] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name']) self.assertEqual(ma.get_form(request, self.band).base_fields.keys(), ['name']) # Using `exclude`. class BandAdmin(ModelAdmin): exclude = ['bio'] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name', 'sign_date']) # You can also pass a tuple to `exclude`. class BandAdmin(ModelAdmin): exclude = ('bio',) ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name', 'sign_date']) # Using `fields` and `exclude`. class BandAdmin(ModelAdmin): fields = ['name', 'bio'] exclude = ['bio'] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name']) def test_custom_form_validation(self): # If we specify a form, it should use it allowing custom validation to work # properly. This won't, however, break any of the admin widgets or media. class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class Meta: model = Band class BandAdmin(ModelAdmin): form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name', 'bio', 'sign_date', 'delete']) self.assertEqual( type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget) def test_queryset_override(self): # If we need to override the queryset of a ModelChoiceField in our custom form # make sure that RelatedFieldWidgetWrapper doesn't mess that up. band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1)) band2.save() class ConcertAdmin(ModelAdmin): pass ma = ConcertAdmin(Concert, self.site) form = ma.get_form(request)() self.assertEqual(str(form["main_band"]), '<select name="main_band" id="id_main_band">\n' '<option value="" selected="selected">---------</option>\n' '<option value="%d">The Beatles</option>\n' '<option value="%d">The Doors</option>\n' '</select>' % (band2.id, self.band.id)) class AdminConcertForm(forms.ModelForm): class Meta: model = Concert def __init__(self, *args, **kwargs): super(AdminConcertForm, self).__init__(*args, **kwargs) self.fields["main_band"].queryset = Band.objects.filter(name='The Doors') class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) form = ma.get_form(request)() self.assertEqual(str(form["main_band"]), '<select name="main_band" id="id_main_band">\n' '<option value="" selected="selected">---------</option>\n' '<option value="%d">The Doors</option>\n' '</select>' % self.band.id) # radio_fields behavior ########################################### def test_default_foreign_key_widget(self): # First, without any radio_fields specified, the widgets for ForeignKey # and fields with choices specified ought to be a basic Select widget. # ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so # they need to be handled properly when type checking. For Select fields, all of # the choices lists have a first entry of dashes. cma = ModelAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select) self.assertEqual( list(cmafa.base_fields['main_band'].widget.choices), [(u'', u'---------'), (self.band.id, u'The Doors')]) self.assertEqual( type(cmafa.base_fields['opening_band'].widget.widget), Select) self.assertEqual( list(cmafa.base_fields['opening_band'].widget.choices), [(u'', u'---------'), (self.band.id, u'The Doors')]) self.assertEqual(type(cmafa.base_fields['day'].widget), Select) self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [('', '---------'), (1, 'Fri'), (2, 'Sat')]) self.assertEqual(type(cmafa.base_fields['transport'].widget), Select) self.assertEqual( list(cmafa.base_fields['transport'].widget.choices), [('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]) def test_foreign_key_as_radio_field(self): # Now specify all the fields as radio_fields. Widgets should now be # RadioSelect, and the choices list should have a first entry of 'None' if # blank=True for the model field. Finally, the widget should have the # 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL. class ConcertAdmin(ModelAdmin): radio_fields = { 'main_band': HORIZONTAL, 'opening_band': VERTICAL, 'day': VERTICAL, 'transport': HORIZONTAL, } cma = ConcertAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'}) self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices), [(self.band.id, u'The Doors')]) self.assertEqual( type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'}) self.assertEqual( list(cmafa.base_fields['opening_band'].widget.choices), [(u'', u'None'), (self.band.id, u'The Doors')]) self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'}) self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')]) self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'}) self.assertEqual(list(cmafa.base_fields['transport'].widget.choices), [('', u'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]) class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ('transport',) class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['main_band', 'opening_band', 'day']) class AdminConcertForm(forms.ModelForm): extra = forms.CharField() class Meta: model = Concert fields = ['extra', 'transport'] class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['extra', 'transport']) class ConcertInline(TabularInline): form = AdminConcertForm model = Concert fk_name = 'main_band' can_delete = True class BandAdmin(ModelAdmin): inlines = [ ConcertInline ] ma = BandAdmin(Band, self.site) self.assertEqual( list(ma.get_formsets(request))[0]().forms[0].fields.keys(), ['extra', 'transport', 'id', 'DELETE', 'main_band']) class ValidationTests(unittest.TestCase): def test_validation_only_runs_in_debug(self): # Ensure validation only runs when DEBUG = True try: settings.DEBUG = True class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = 10 site = AdminSite() self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.", site.register, ValidationTestModel, ValidationTestModelAdmin, ) finally: settings.DEBUG = False site = AdminSite() site.register(ValidationTestModel, ValidationTestModelAdmin) def test_raw_id_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = ('non_existent_field',) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = ('name',) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields\[0\]', 'name' must be either a ForeignKey or ManyToManyField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = ('users',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_fieldsets_validation(self): class ValidationTestModelAdmin(ModelAdmin): fieldsets = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = ({},) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets\[0\]' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = ((),) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets\[0\]' does not have exactly two elements.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", ()),) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets\[0\]\[1\]' must be a dictionary.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {}),) self.assertRaisesRegexp( ImproperlyConfigured, "'fields' key is required in ValidationTestModelAdmin.fieldsets\[0\]\[1\] field options dict.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {"fields": ("non_existent_field",)}),) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {"fields": ("name",)}),) validate(ValidationTestModelAdmin, ValidationTestModel) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {"fields": ("name",)}),) fields = ["name",] self.assertRaisesRegexp( ImproperlyConfigured, "Both fieldsets and fields are specified in ValidationTestModelAdmin.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = [(None, {'fields': ['name', 'name']})] self.assertRaisesRegexp( ImproperlyConfigured, "There are duplicate field\(s\) in ValidationTestModelAdmin.fieldsets", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fields = ["name", "name"] self.assertRaisesRegexp( ImproperlyConfigured, "There are duplicate field\(s\) in ValidationTestModelAdmin.fields", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_form_validation(self): class FakeForm(object): pass class ValidationTestModelAdmin(ModelAdmin): form = FakeForm self.assertRaisesRegexp( ImproperlyConfigured, "ValidationTestModelAdmin.form does not inherit from BaseModelForm.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_fieldsets_with_custom_form_validation(self): class BandAdmin(ModelAdmin): fieldsets = ( ('Band', { 'fields': ('non_existent_field',) }), ) self.assertRaisesRegexp( ImproperlyConfigured, "'BandAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.", validate, BandAdmin, Band, ) class BandAdmin(ModelAdmin): fieldsets = ( ('Band', { 'fields': ('name',) }), ) validate(BandAdmin, Band) class AdminBandForm(forms.ModelForm): class Meta: model = Band class BandAdmin(ModelAdmin): form = AdminBandForm fieldsets = ( ('Band', { 'fields': ('non_existent_field',) }), ) self.assertRaisesRegexp( ImproperlyConfigured, "'BandAdmin.fieldsets\[0]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.", validate, BandAdmin, Band, ) class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class Meta: model = Band class BandAdmin(ModelAdmin): form = AdminBandForm fieldsets = ( ('Band', { 'fields': ('name', 'bio', 'sign_date', 'delete') }), ) validate(BandAdmin, Band) def test_filter_vertical_validation(self): class ValidationTestModelAdmin(ModelAdmin): filter_vertical = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_vertical = ("non_existent_field",) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_vertical = ("name",) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_vertical\[0\]' must be a ManyToManyField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_vertical = ("users",) validate(ValidationTestModelAdmin, ValidationTestModel) def test_filter_horizontal_validation(self): class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = ("non_existent_field",) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = ("name",) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_horizontal\[0\]' must be a ManyToManyField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = ("users",) validate(ValidationTestModelAdmin, ValidationTestModel) def test_radio_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): radio_fields = () self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields' must be a dictionary.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"non_existent_field": None} self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"name": None} self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields\['name'\]' is neither an instance of ForeignKey nor does have choices set.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"state": None} self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields\['state'\]' is neither admin.HORIZONTAL nor admin.VERTICAL.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"state": VERTICAL} validate(ValidationTestModelAdmin, ValidationTestModel) def test_prepopulated_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = () self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"non_existent_field": None} self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"slug": ("non_existent_field",)} self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields\['slug'\]\[0\]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"users": ("name",)} self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields\['users'\]' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"slug": ("name",)} validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_display_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_display = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display = ('non_existent_field',) self.assertRaisesRegexp( ImproperlyConfigured, "ValidationTestModelAdmin.list_display\[0\], 'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display = ('users',) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display\[0\]', 'users' is a ManyToManyField which is not supported.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def a_callable(obj): pass class ValidationTestModelAdmin(ModelAdmin): def a_method(self, obj): pass list_display = ('name', 'decade_published_in', 'a_method', a_callable) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_display_links_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_display_links = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display_links' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display_links = ('non_existent_field',) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'non_existent_field' which is not defined in 'list_display'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display_links = ('name',) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'name' which is not defined in 'list_display'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def a_callable(obj): pass class ValidationTestModelAdmin(ModelAdmin): def a_method(self, obj): pass list_display = ('name', 'decade_published_in', 'a_method', a_callable) list_display_links = ('name', 'decade_published_in', 'a_method', a_callable) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_filter_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_filter = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_filter' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_filter = ('non_existent_field',) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_filter\[0\]' refers to 'non_existent_field' which does not refer to a Field.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_filter = ('is_active',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_per_page_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_per_page = 'hello' self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_per_page' should be a integer.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_per_page = 100 validate(ValidationTestModelAdmin, ValidationTestModel) def test_search_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): search_fields = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.search_fields' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_date_hierarchy_validation(self): class ValidationTestModelAdmin(ModelAdmin): date_hierarchy = 'non_existent_field' self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): date_hierarchy = 'name' self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): date_hierarchy = 'pub_date' validate(ValidationTestModelAdmin, ValidationTestModel) def test_ordering_validation(self): class ValidationTestModelAdmin(ModelAdmin): ordering = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.ordering' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): ordering = ('non_existent_field',) self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.ordering\[0\]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): ordering = ('?', 'name') self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.ordering' has the random ordering marker '\?', but contains other fields as well. Please either remove '\?' or the other fields.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): ordering = ('?',) validate(ValidationTestModelAdmin, ValidationTestModel) class ValidationTestModelAdmin(ModelAdmin): ordering = ('band__name',) validate(ValidationTestModelAdmin, ValidationTestModel) class ValidationTestModelAdmin(ModelAdmin): ordering = ('name',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_select_related_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_select_related = 1 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.list_select_related' should be a boolean.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_select_related = False validate(ValidationTestModelAdmin, ValidationTestModel) def test_save_as_validation(self): class ValidationTestModelAdmin(ModelAdmin): save_as = 1 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.save_as' should be a boolean.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): save_as = True validate(ValidationTestModelAdmin, ValidationTestModel) def test_save_on_top_validation(self): class ValidationTestModelAdmin(ModelAdmin): save_on_top = 1 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.save_on_top' should be a boolean.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): save_on_top = True validate(ValidationTestModelAdmin, ValidationTestModel) def test_inlines_validation(self): class ValidationTestModelAdmin(ModelAdmin): inlines = 10 self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.inlines' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(object): pass class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.inlines\[0\]' does not inherit from BaseModelAdmin.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): pass class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'model' is a required attribute of 'ValidationTestModelAdmin.inlines\[0\]'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class SomethingBad(object): pass class ValidationTestInline(TabularInline): model = SomethingBad class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestModelAdmin.inlines\[0\].model' does not inherit from models.Model.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_fields_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fields = 10 class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestInline.fields' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fields = ("non_existent_field",) class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestInline.fields' refers to field 'non_existent_field' that is missing from the form.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_fk_name_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fk_name = "non_existent_field" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'ValidationTestInlineModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fk_name = "parent" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_extra_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel extra = "hello" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestInline.extra' should be a integer.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel extra = 2 class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_max_num_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel max_num = "hello" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestInline.max_num' should be an integer or None \(default\).", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel max_num = 2 class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_formset_validation(self): class FakeFormSet(object): pass class ValidationTestInline(TabularInline): model = ValidationTestInlineModel formset = FakeFormSet class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesRegexp( ImproperlyConfigured, "'ValidationTestInline.formset' does not inherit from BaseModelFormSet.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class RealModelFormSet(BaseModelFormSet): pass class ValidationTestInline(TabularInline): model = ValidationTestInlineModel formset = RealModelFormSet class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel)
gpl-3.0
liberatetheweb/blockedinrussia.org
code.py
1
9146
#!/usr/bin/env python # -*- coding: UTF-8 -*- import web, bcrypt, redis, json, requests, dns.resolver, re, M2Crypto, cgi from urlparse import urlsplit from zbase62 import zbase62 web.config.debug = False r_server = redis.Redis('localhost') urls = ( '/', 'index', '/check', 'check', '/logout', 'logout', '/register', 'register' ) app = web.application(urls, globals()) session = web.session.Session(app, web.session.DiskStore('sessions')) render = web.template.render('templates/', base='layout',) proxies = { "http": "http://localhost:8118", "https": "http://localhost:8118", } checkstr = [ u'Запрашиваемая страница заблокирована на основании <br> Постановления Правительства от 26.10.2012 №1101 <br>', u'<h1><strong>Уважаемые пользователи!<br/><br/>Мы приносим свои извинения, но&nbsp;доступ к&nbsp;запрашиваемому ресурсу ограничен.</strong></h1>', u'<p>Уважаемый абонент,<br /> Вы хотите перейти по адресу, который внесён в единый реестр запрещённых сайтов или доступ к нему <span id="more">заблокирован</span> судебным решением.</p>', u'<div style="font-size: 13px; margin-top: 25px; font-weight: normal;">Адрес сайта Единого реестра доменных имен, указателей, страниц сайтов в сети "Интернет" и сетевых адресов, позволяющих идентифицировать сайты</br> в сети "Интернет", содержащие информацию, распространение которой в Российской Федерации запрещено: <a href="http://zapret-info.gov.ru/" target="_blank">zapret-info.gov.ru</a></br>Адрес Реестра нарушителей авторских прав: <a href="http://nap.rkn.gov.ru/reestr/" target="_blank">http://nap.rkn.gov.ru/reestr/</a>', u'<img src="http://212.1.224.79/stop.jpg" width="672" height="180" alt="Внимание!" />', u'<p>Access control configuration prevents your request from being allowed at this time. Please contact your service provider if you feel this is incorrect.</p>', u'<p>Сайт, который вы хотите посетить, внесен в <u><a href="http://eais.rkn.gov.ru" style="color:#FF6600">единый', u'либо в <a href="http://eais.rkn.gov.ru/" target="_blank">едином реестре</a>', u'<p>Доступ к запрашиваемому Вами Интернет-ресурсу ограничен в соответствии с требованиями законодательства. Дополнительную информацию можно получить на сайте <a href="http://www.zapret-info.gov.ru./">www.zapret-info.gov.ru</a>.</p>', u'материалов</a>, доступ к нему закрыт на основании решения суда РФ.', u'<p>В соответствии с требованиями законодательства доступ к запрашиваемому Интернет-ресурсу <br>закрыт.</p>', u'<p><b>В соответствии с требованиями законодательства Российской Федерации доступ к запрашиваемому Интернет-ресурсу', u'<h5>Ссылка заблокирована по решению суда</h5><br><br><a href=\'http://ttk.ru/\'>ЗАО "Компания ТрансТелеКом"</a>' u'Причину блокировки можно посмотреть в <a href="http://eais.rkn.gov.ru/">Едином Реестре</a>', u':80/f/accept/\' not found...', u'<a href="http://zapret-info.gov.ru"/><b>Постановление Правительства Российской Федерации от 26 октября 2012 г. N 1101</b>', u'<p>Your cache administrator is <a href="mailto:webmaster?subject=CacheErrorInfo%20-%20ERR_CONNECT_FAIL&amp;body=CacheHost%3A%20atel76.ru%0D%0AErrPage%3A%20ERR_CONNECT_FAIL%0D%0AErr%3A%20(110)%20Connection%20timed%20out%0D%0A', u'<h5><a href=\'http://eais.rkn.gov.ru/\'>Ссылка заблокирована <br>в соответствии с законодательством РФ</a></h5><br><br><a href=\'http://ttk.ru/\'>ЗАО "Компания ТрансТелеКом"</a>', u'<div class="section">\n Доступ к запрашиваемому вами интернет-ресурсу ограничен в \n соответствии с требованиями Законодательства Российской Федерации.', u'<p>Доступ к запрашиваемому Вами Интернет-ресурсу ограничен по требованию правоохранительных органов в соответствии с законодательством и/или на основании решения суда.</p>', u'<p>Доступ к запрашиваемому Вами Интернет-ресурсу ограничен в соответствии с требованиями законодательства. Дополнительную информацию можно получить на сайте <a href="/PDATransfer.axd?next_url=http%3a%2f%2fwww.zapret-info.gov.ru.%2f">www.zapret-info.gov.ru</a>.</p>' ] class index: def GET(self): if session.get('login', False): return render.index() else: return render.login('') def POST(self): data = web.input() if not all(x in data for x in ('username', 'password')): raise web.seeother('/') if 'login' not in session: session.login = 0 hashed = r_server.get('user:' + data.username) if hashed is None: session.login = 0 return render.login('No such user') if bcrypt.hashpw(data.password.encode('utf-8'), hashed) == hashed: session.login = 1 return render.index() else: session.login = 0 return render.login('Wrong password') class logout: def GET(self): session.login = 0 raise web.seeother('/') class check: def GET(self): if not session.get('login', False): raise web.seeother('/') data = web.input() if not 'url' in data: return 'Wrong parameters' results = {} results['results'] = [] try: url = urlsplit(data.url) except: return 'Not a valid URL' s = requests.Session() s.proxies=proxies results['ip'] = [] try: answers = dns.resolver.query(url.hostname, 'A') except: return 'Can\'t get A records of hostname' results['registry'] = [] regnumbers = [] for rdata in answers: results['ip'].append(rdata.address) regcheck = r_server.smembers('registry:' + rdata.address) if regcheck: regnumbers = regnumbers + list(regcheck) regcheck = r_server.smembers('registry:' + url.hostname) if regcheck: regnumbers = regnumbers + list(regcheck) for value in list(set(regnumbers)): info = r_server.hgetall('registry:' + value) results['registry'].append({ 'ip': json.loads(info['ip']), 'url': json.loads(info['url']), 'authority': info['authority'], 'base': info['base'], 'date': info['date'] }) for value in r_server.keys('as:*'): fingerprint = r_server.srandmember(value).replace('relay:','') s.headers.update({'host': url.hostname}) as_id = value.replace('as:','') try: r = s.get(url.scheme + '://' + url.hostname + '.' + fingerprint + '.exit' + url.path + '?' + url.query, verify=False) print as_id status = r.status_code if status == 200: if any(x in r.text for x in checkstr): blocked = 'yes' else: blocked = 'no' r.text else: blocked = 'maybe' except: status = 'fail' blocked = 'dunno' info = r_server.hgetall('relay:' + fingerprint) results['results'].append({ 'blocked': blocked, 'status': status, 'fingerprint': fingerprint, 'as_name': info['as_name'], 'lat': info['latitude'], 'lon': info['longitude'] }) return json.dumps(results) class register: def GET(self): data = web.input() if not 'email' in data: return 'Wrong parameters' if not re.match(r'[^@]+@[^@]+\.[^@]+', data.email): return 'This is not email' if r_server.sismember('nonregistred',data.email): password = zbase62.b2a(M2Crypto.m2.rand_bytes(20)) hashed = bcrypt.hashpw(password, bcrypt.gensalt()) r_server.set('user:' + data.email,hashed) r_server.srem('nonregistred',data.email) return render.register(password,cgi.escape(data.email)) else: return 'No such email' if __name__ == "__main__": app.run()
cc0-1.0
heeraj123/oh-mainline
vendor/packages/whoosh/src/whoosh/compat.py
17
1735
import sys if sys.version_info[0] < 3: PY3 = False def b(s): return s import cStringIO as StringIO StringIO = BytesIO = StringIO.StringIO callable = callable integer_types = (int, long) iteritems = lambda o: o.iteritems() itervalues = lambda o: o.itervalues() iterkeys = lambda o: o.iterkeys() from itertools import izip long_type = long next = lambda o: o.next() import cPickle as pickle from cPickle import dumps, loads, dump, load string_type = basestring text_type = unicode unichr = unichr from urllib import urlretrieve def u(s): return unicode(s, "unicode_escape") def with_metaclass(meta, base=object): class _WhooshBase(base): __metaclass__ = meta return _WhooshBase xrange = xrange zip_ = zip else: PY3 = True import collections def b(s): return s.encode("latin-1") import io BytesIO = io.BytesIO callable = lambda o: isinstance(o, collections.Callable) exec_ = eval("exec") integer_types = (int,) iteritems = lambda o: o.items() itervalues = lambda o: o.values() iterkeys = lambda o: iter(o.keys()) izip = zip long_type = int next = next import pickle from pickle import dumps, loads, dump, load StringIO = io.StringIO string_type = str text_type = str unichr = chr from urllib.request import urlretrieve def u(s): return s def with_metaclass(meta, base=object): ns = dict(base=base, meta=meta) exec_("""class _WhooshBase(base, metaclass=meta): pass""", ns) return ns["_WhooshBase"] xrange = range zip_ = lambda * args: list(zip(*args))
agpl-3.0
fighterCui/L4ReFiascoOC
l4/pkg/python/contrib/Lib/multiprocessing/pool.py
52
17699
# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt # __all__ = ['Pool'] # # Imports # import threading import Queue import itertools import collections import time from multiprocessing import Process, cpu_count, TimeoutError from multiprocessing.util import Finalize, debug # # Constants representing the state of a pool # RUN = 0 CLOSE = 1 TERMINATE = 2 # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return map(*args) # # Code run by worker processes # def worker(inqueue, outqueue, initializer=None, initargs=()): put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) while 1: try: task = get() except (EOFError, IOError): debug('worker got EOFError or IOError -- exiting') break if task is None: debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception, e: result = (False, e) put((job, i, result)) # # Class representing a process pool # class Pool(object): ''' Class which supports an async version of the `apply()` builtin ''' Process = Process def __init__(self, processes=None, initializer=None, initargs=()): self._setup_queues() self._taskqueue = Queue.Queue() self._cache = {} self._state = RUN if processes is None: try: processes = cpu_count() except NotImplementedError: processes = 1 self._pool = [] for i in range(processes): w = self.Process( target=worker, args=(self._inqueue, self._outqueue, initializer, initargs) ) self._pool.append(w) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) def _setup_queues(self): from .queues import SimpleQueue self._inqueue = SimpleQueue() self._outqueue = SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def apply(self, func, args=(), kwds={}): ''' Equivalent of `apply()` builtin ''' assert self._state == RUN return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Equivalent of `map()` builtin ''' assert self._state == RUN return self.map_async(func, iterable, chunksize).get() def imap(self, func, iterable, chunksize=1): ''' Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()` ''' assert self._state == RUN if chunksize == 1: result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary ''' assert self._state == RUN if chunksize == 1: result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None): ''' Asynchronous equivalent of `apply()` builtin ''' assert self._state == RUN result = ApplyResult(self._cache, callback) self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None): ''' Asynchronous equivalent of `map()` builtin ''' assert self._state == RUN if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), None)) return result @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): i = -1 for i, task in enumerate(taskseq): if thread._state: debug('task handler found thread._state != RUN') break try: put(task) except IOError: debug('could not put task on queue') break else: if set_length: debug('doing set_length()') set_length(i+1) continue break else: debug('task handler got sentinel') try: # tell result handler to finish when cache is empty debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work debug('task handler sending sentinel to workers') for p in pool: put(None) except IOError: debug('task handler got IOError when sending sentinels') debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (IOError, EOFError): debug('result handler got EOFError/IOError -- exiting') return if thread._state: assert thread._state == TERMINATE debug('result handler found thread._state=TERMINATE') break if task is None: debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass while cache and thread._state != TERMINATE: try: task = get() except (IOError, EOFError): debug('result handler got EOFError/IOError -- exiting') return if task is None: debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass if hasattr(outqueue, '_reader'): debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (IOError, EOFError): pass debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): debug('closing pool') if self._state == RUN: self._state = CLOSE self._taskqueue.put(None) def terminate(self): debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): debug('joining pool') assert self._state in (CLOSE, TERMINATE) self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, task_handler, result_handler, cache): # this is guaranteed to only be called once debug('finalizing pool') task_handler._state = TERMINATE taskqueue.put(None) # sentinel debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) assert result_handler.is_alive() or len(cache) == 0 result_handler._state = TERMINATE outqueue.put(None) # sentinel if pool and hasattr(pool[0], 'terminate'): debug('terminating workers') for p in pool: p.terminate() debug('joining task handler') task_handler.join(1e100) debug('joining result handler') result_handler.join(1e100) if pool and hasattr(pool[0], 'terminate'): debug('joining pool workers') for p in pool: p.join() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, cache, callback): self._cond = threading.Condition(threading.Lock()) self._job = job_counter.next() self._cache = cache self._ready = False self._callback = callback cache[self._job] = self def ready(self): return self._ready def successful(self): assert self._ready return self._success def wait(self, timeout=None): self._cond.acquire() try: if not self._ready: self._cond.wait(timeout) finally: self._cond.release() def get(self, timeout=None): self.wait(timeout) if not self._ready: raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) self._cond.acquire() try: self._ready = True self._cond.notify() finally: self._cond.release() del self._cache[self._job] # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, cache, chunksize, length, callback): ApplyResult.__init__(self, cache, callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._ready = True else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): success, result = success_result if success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result self._number_left -= 1 if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._cond.acquire() try: self._ready = True self._cond.notify() finally: self._cond.release() else: self._success = False self._value = result del self._cache[self._job] self._cond.acquire() try: self._ready = True self._cond.notify() finally: self._cond.release() # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, cache): self._cond = threading.Condition(threading.Lock()) self._job = job_counter.next() self._cache = cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): self._cond.acquire() try: try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration raise TimeoutError finally: self._cond.release() success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): self._cond.acquire() try: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] finally: self._cond.release() def _set_length(self, length): self._cond.acquire() try: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] finally: self._cond.release() # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): self._cond.acquire() try: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] finally: self._cond.release() # # # class ThreadPool(Pool): from .dummy import Process def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = Queue.Queue() self._outqueue = Queue.Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # put sentinels at head of inqueue to make workers finish inqueue.not_empty.acquire() try: inqueue.queue.clear() inqueue.queue.extend([None] * size) inqueue.not_empty.notify_all() finally: inqueue.not_empty.release()
gpl-2.0
JensTimmerman/pyopenssl
OpenSSL/test/util.py
4
16826
# Copyright (C) Jean-Paul Calderone # Copyright (C) Twisted Matrix Laboratories. # See LICENSE for details. """ Helpers for the OpenSSL test suite, largely copied from U{Twisted<http://twistedmatrix.com/>}. """ import shutil import traceback import os, os.path from tempfile import mktemp from unittest import TestCase import sys from OpenSSL._util import exception_from_error_queue from OpenSSL.crypto import Error try: import memdbg except Exception: class _memdbg(object): heap = None memdbg = _memdbg() from OpenSSL._util import ffi, lib, byte_string as b class TestCase(TestCase): """ :py:class:`TestCase` adds useful testing functionality beyond what is available from the standard library :py:class:`unittest.TestCase`. """ def run(self, result): run = super(TestCase, self).run if memdbg.heap is None: return run(result) # Run the test as usual before = set(memdbg.heap) run(result) # Clean up some long-lived allocations so they won't be reported as # memory leaks. lib.CRYPTO_cleanup_all_ex_data() lib.ERR_remove_thread_state(ffi.NULL) after = set(memdbg.heap) if not after - before: # No leaks, fast succeed return if result.wasSuccessful(): # If it passed, run it again with memory debugging before = set(memdbg.heap) run(result) # Clean up some long-lived allocations so they won't be reported as # memory leaks. lib.CRYPTO_cleanup_all_ex_data() lib.ERR_remove_thread_state(ffi.NULL) after = set(memdbg.heap) self._reportLeaks(after - before, result) def _reportLeaks(self, leaks, result): def format_leak(p): stacks = memdbg.heap[p] # Eventually look at multiple stacks for the realloc() case. For # now just look at the original allocation location. (size, python_stack, c_stack) = stacks[0] stack = traceback.format_list(python_stack)[:-1] # c_stack looks something like this (interesting parts indicated # with inserted arrows not part of the data): # # /home/exarkun/Projects/pyOpenSSL/branches/use-opentls/__pycache__/_cffi__x89095113xb9185b9b.so(+0x12cf) [0x7fe2e20582cf] # /home/exarkun/Projects/cpython/2.7/python(PyCFunction_Call+0x8b) [0x56265a] # /home/exarkun/Projects/cpython/2.7/python() [0x4d5f52] # /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalFrameEx+0x753b) [0x4d0e1e] # /home/exarkun/Projects/cpython/2.7/python() [0x4d6419] # /home/exarkun/Projects/cpython/2.7/python() [0x4d6129] # /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalFrameEx+0x753b) [0x4d0e1e] # /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalCodeEx+0x1043) [0x4d3726] # /home/exarkun/Projects/cpython/2.7/python() [0x55fd51] # /home/exarkun/Projects/cpython/2.7/python(PyObject_Call+0x7e) [0x420ee6] # /home/exarkun/Projects/cpython/2.7/python(PyEval_CallObjectWithKeywords+0x158) [0x4d56ec] # /home/exarkun/.local/lib/python2.7/site-packages/cffi-0.5-py2.7-linux-x86_64.egg/_cffi_backend.so(+0xe96e) [0x7fe2e38be96e] # /usr/lib/x86_64-linux-gnu/libffi.so.6(ffi_closure_unix64_inner+0x1b9) [0x7fe2e36ad819] # /usr/lib/x86_64-linux-gnu/libffi.so.6(ffi_closure_unix64+0x46) [0x7fe2e36adb7c] # /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(CRYPTO_malloc+0x64) [0x7fe2e1cef784] <------ end interesting # /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(lh_insert+0x16b) [0x7fe2e1d6a24b] . # /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(+0x61c18) [0x7fe2e1cf0c18] . # /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(+0x625ec) [0x7fe2e1cf15ec] . # /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(DSA_new_method+0xe6) [0x7fe2e1d524d6] . # /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(DSA_generate_parameters+0x3a) [0x7fe2e1d5364a] <------ begin interesting # /home/exarkun/Projects/opentls/trunk/tls/c/__pycache__/_cffi__x305d4698xb539baaa.so(+0x1f397) [0x7fe2df84d397] # /home/exarkun/Projects/cpython/2.7/python(PyCFunction_Call+0x8b) [0x56265a] # /home/exarkun/Projects/cpython/2.7/python() [0x4d5f52] # /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalFrameEx+0x753b) [0x4d0e1e] # /home/exarkun/Projects/cpython/2.7/python() [0x4d6419] # ... # # Notice the stack is upside down compared to a Python traceback. # Identify the start and end of interesting bits and stuff it into the stack we report. saved = list(c_stack) # Figure the first interesting frame will be after a the cffi-compiled module while c_stack and '/__pycache__/_cffi__' not in c_stack[-1]: c_stack.pop() # Figure the last interesting frame will always be CRYPTO_malloc, # since that's where we hooked in to things. while c_stack and 'CRYPTO_malloc' not in c_stack[0] and 'CRYPTO_realloc' not in c_stack[0]: c_stack.pop(0) if c_stack: c_stack.reverse() else: c_stack = saved[::-1] stack.extend([frame + "\n" for frame in c_stack]) stack.insert(0, "Leaked (%s) at:\n") return "".join(stack) if leaks: unique_leaks = {} for p in leaks: size = memdbg.heap[p][-1][0] new_leak = format_leak(p) if new_leak not in unique_leaks: unique_leaks[new_leak] = [(size, p)] else: unique_leaks[new_leak].append((size, p)) memdbg.free(p) for (stack, allocs) in unique_leaks.iteritems(): allocs_accum = [] for (size, pointer) in allocs: addr = int(ffi.cast('uintptr_t', pointer)) allocs_accum.append("%d@0x%x" % (size, addr)) allocs_report = ", ".join(sorted(allocs_accum)) result.addError( self, (None, Exception(stack % (allocs_report,)), None)) def tearDown(self): """ Clean up any files or directories created using :py:meth:`TestCase.mktemp`. Subclasses must invoke this method if they override it or the cleanup will not occur. """ if False and self._temporaryFiles is not None: for temp in self._temporaryFiles: if os.path.isdir(temp): shutil.rmtree(temp) elif os.path.exists(temp): os.unlink(temp) try: exception_from_error_queue(Error) except Error: e = sys.exc_info()[1] if e.args != ([],): self.fail("Left over errors in OpenSSL error queue: " + repr(e)) def assertIsInstance(self, instance, classOrTuple, message=None): """ Fail if C{instance} is not an instance of the given class or of one of the given classes. @param instance: the object to test the type (first argument of the C{isinstance} call). @type instance: any. @param classOrTuple: the class or classes to test against (second argument of the C{isinstance} call). @type classOrTuple: class, type, or tuple. @param message: Custom text to include in the exception text if the assertion fails. """ if not isinstance(instance, classOrTuple): if message is None: suffix = "" else: suffix = ": " + message self.fail("%r is not an instance of %s%s" % ( instance, classOrTuple, suffix)) def failUnlessIn(self, containee, container, msg=None): """ Fail the test if :py:data:`containee` is not found in :py:data:`container`. :param containee: the value that should be in :py:class:`container` :param container: a sequence type, or in the case of a mapping type, will follow semantics of 'if key in dict.keys()' :param msg: if msg is None, then the failure message will be '%r not in %r' % (first, second) """ if containee not in container: raise self.failureException(msg or "%r not in %r" % (containee, container)) return containee assertIn = failUnlessIn def assertNotIn(self, containee, container, msg=None): """ Fail the test if C{containee} is found in C{container}. @param containee: the value that should not be in C{container} @param container: a sequence type, or in the case of a mapping type, will follow semantics of 'if key in dict.keys()' @param msg: if msg is None, then the failure message will be '%r in %r' % (first, second) """ if containee in container: raise self.failureException(msg or "%r in %r" % (containee, container)) return containee failIfIn = assertNotIn def failUnlessIdentical(self, first, second, msg=None): """ Fail the test if :py:data:`first` is not :py:data:`second`. This is an obect-identity-equality test, not an object equality (i.e. :py:func:`__eq__`) test. :param msg: if msg is None, then the failure message will be '%r is not %r' % (first, second) """ if first is not second: raise self.failureException(msg or '%r is not %r' % (first, second)) return first assertIdentical = failUnlessIdentical def failIfIdentical(self, first, second, msg=None): """ Fail the test if :py:data:`first` is :py:data:`second`. This is an obect-identity-equality test, not an object equality (i.e. :py:func:`__eq__`) test. :param msg: if msg is None, then the failure message will be '%r is %r' % (first, second) """ if first is second: raise self.failureException(msg or '%r is %r' % (first, second)) return first assertNotIdentical = failIfIdentical def failUnlessRaises(self, exception, f, *args, **kwargs): """ Fail the test unless calling the function :py:data:`f` with the given :py:data:`args` and :py:data:`kwargs` raises :py:data:`exception`. The failure will report the traceback and call stack of the unexpected exception. :param exception: exception type that is to be expected :param f: the function to call :return: The raised exception instance, if it is of the given type. :raise self.failureException: Raised if the function call does not raise an exception or if it raises an exception of a different type. """ try: result = f(*args, **kwargs) except exception: inst = sys.exc_info()[1] return inst except: raise self.failureException('%s raised instead of %s' % (sys.exc_info()[0], exception.__name__, )) else: raise self.failureException('%s not raised (%r returned)' % (exception.__name__, result)) assertRaises = failUnlessRaises _temporaryFiles = None def mktemp(self): """ Pathetic substitute for twisted.trial.unittest.TestCase.mktemp. """ if self._temporaryFiles is None: self._temporaryFiles = [] temp = b(mktemp(dir=".")) self._temporaryFiles.append(temp) return temp # Other stuff def assertConsistentType(self, theType, name, *constructionArgs): """ Perform various assertions about :py:data:`theType` to ensure that it is a well-defined type. This is useful for extension types, where it's pretty easy to do something wacky. If something about the type is unusual, an exception will be raised. :param theType: The type object about which to make assertions. :param name: A string giving the name of the type. :param constructionArgs: Positional arguments to use with :py:data:`theType` to create an instance of it. """ self.assertEqual(theType.__name__, name) self.assertTrue(isinstance(theType, type)) instance = theType(*constructionArgs) self.assertIdentical(type(instance), theType) class EqualityTestsMixin(object): """ A mixin defining tests for the standard implementation of C{==} and C{!=}. """ def anInstance(self): """ Return an instance of the class under test. Each call to this method must return a different object. All objects returned must be equal to each other. """ raise NotImplementedError() def anotherInstance(self): """ Return an instance of the class under test. Each call to this method must return a different object. The objects must not be equal to the objects returned by C{anInstance}. They may or may not be equal to each other (they will not be compared against each other). """ raise NotImplementedError() def test_identicalEq(self): """ An object compares equal to itself using the C{==} operator. """ o = self.anInstance() self.assertTrue(o == o) def test_identicalNe(self): """ An object doesn't compare not equal to itself using the C{!=} operator. """ o = self.anInstance() self.assertFalse(o != o) def test_sameEq(self): """ Two objects that are equal to each other compare equal to each other using the C{==} operator. """ a = self.anInstance() b = self.anInstance() self.assertTrue(a == b) def test_sameNe(self): """ Two objects that are equal to each other do not compare not equal to each other using the C{!=} operator. """ a = self.anInstance() b = self.anInstance() self.assertFalse(a != b) def test_differentEq(self): """ Two objects that are not equal to each other do not compare equal to each other using the C{==} operator. """ a = self.anInstance() b = self.anotherInstance() self.assertFalse(a == b) def test_differentNe(self): """ Two objects that are not equal to each other compare not equal to each other using the C{!=} operator. """ a = self.anInstance() b = self.anotherInstance() self.assertTrue(a != b) def test_anotherTypeEq(self): """ The object does not compare equal to an object of an unrelated type (which does not implement the comparison) using the C{==} operator. """ a = self.anInstance() b = object() self.assertFalse(a == b) def test_anotherTypeNe(self): """ The object compares not equal to an object of an unrelated type (which does not implement the comparison) using the C{!=} operator. """ a = self.anInstance() b = object() self.assertTrue(a != b) def test_delegatedEq(self): """ The result of comparison using C{==} is delegated to the right-hand operand if it is of an unrelated type. """ class Delegate(object): def __eq__(self, other): # Do something crazy and obvious. return [self] a = self.anInstance() b = Delegate() self.assertEqual(a == b, [b]) def test_delegateNe(self): """ The result of comparison using C{!=} is delegated to the right-hand operand if it is of an unrelated type. """ class Delegate(object): def __ne__(self, other): # Do something crazy and obvious. return [self] a = self.anInstance() b = Delegate() self.assertEqual(a != b, [b])
apache-2.0
esikachev/sahara-backup
sahara/service/edp/oozie/workflow_creator/shell_workflow.py
9
1685
# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp.oozie.workflow_creator import base_workflow from sahara.utils import xmlutils as x class ShellWorkflowCreator(base_workflow.OozieWorkflowCreator): SHELL_XMLNS = {"xmlns": "uri:oozie:shell-action:0.1"} def __init__(self): super(ShellWorkflowCreator, self).__init__('shell') def build_workflow_xml(self, script_name, prepare={}, job_xml=None, configuration=None, env_vars={}, arguments=[], files=[]): x.add_attributes_to_element(self.doc, self.tag_name, self.SHELL_XMLNS) for k in sorted(prepare): self._add_to_prepare_element(k, prepare[k]) self._add_configuration_elements(configuration) x.add_text_element_to_tag(self.doc, self.tag_name, 'exec', script_name) for arg in arguments: x.add_text_element_to_tag(self.doc, self.tag_name, 'argument', arg) x.add_equal_separated_dict(self.doc, self.tag_name, 'env-var', env_vars) self._add_files_and_archives(files + [script_name], [])
apache-2.0
ahmetcemturan/SFACT
skeinforge_application/skeinforge_plugins/craft_plugins/chop.py
8
10424
""" This page is in the table of contents. Chop is a script to chop a shape into svg slice layers. ==Settings== ===Add Layer Template to SVG=== Default is on. When selected, the layer template will be added to the svg output, which adds javascript control boxes. So 'Add Layer Template to SVG' should be selected when the svg will be viewed in a browser. When off, no controls will be added, the svg output will only include the fabrication paths. So 'Add Layer Template to SVG' should be deselected when the svg will be used by other software, like Inkscape. ===Add Extra Top Layer if Necessary=== Default is on. When selected, chop will add an extra layer at the very top of the object if the top of the object is more than half the layer height above the first slice. This is so the cutting tool doesn't cut too deeply through the top of the object on its first pass. ===Extra Decimal Places=== Default is two. Defines the number of extra decimal places export will output compared to the number of decimal places in the layer height. The higher the 'Extra Decimal Places', the more significant figures the output numbers will have. ===Import Coarseness=== Default is one. When a triangle mesh has holes in it, the triangle mesh slicer switches over to a slow algorithm that spans gaps in the mesh. The higher the 'Import Coarseness' setting, the wider the gaps in the mesh it will span. An import coarseness of one means it will span gaps of the edge width. ===Layer Height=== Default is 0.4 mm. Defines the height of the layer, this is the most important chop setting. ===Layers=== Chop slices from top to bottom. To get only the bottom layer, set the "Layers From" to minus one. The 'Layers From' until 'Layers To' range is a python slice. ====Layers From==== Default is zero. Defines the index of the top layer that will be chopped. If the 'Layers From' is the default zero, the carving will start from the top layer. If the 'Layers From' index is negative, then the carving will start from the 'Layers From' index above the bottom layer. ====Layers To==== Default is a huge number, which will be limited to the highest index number. Defines the index of the bottom layer that will be chopped. If the 'Layers To' index is a huge number like the default, the carving will go to the bottom of the model. If the 'Layers To' index is negative, then the carving will go to the 'Layers To' index above the bottom layer. ===Mesh Type=== Default is 'Correct Mesh'. ====Correct Mesh==== When selected, the mesh will be accurately chopped, and if a hole is found, chop will switch over to the algorithm that spans gaps. ====Unproven Mesh==== When selected, chop will use the gap spanning algorithm from the start. The problem with the gap spanning algothm is that it will span gaps, even if there is not actually a gap in the model. ===Perimeter Width=== Default is 2 mm. Defines the width of the edge. ===SVG Viewer=== Default is webbrowser. If the 'SVG Viewer' is set to the default 'webbrowser', the scalable vector graphics file will be sent to the default browser to be opened. If the 'SVG Viewer' is set to a program name, the scalable vector graphics file will be sent to that program to be opened. ==Examples== The following examples chop the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and chop.py. > python chop.py This brings up the chop dialog. > python chop.py Screw Holder Bottom.stl The chop tool is parsing the file: Screw Holder Bottom.stl .. The chop tool has created the file: .. Screw Holder Bottom_chop.svg """ from __future__ import absolute_import try: import psyco psyco.full() except: pass #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from fabmetheus_utilities import svg_writer from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import math import os import sys import time __author__ = 'Enrique Perez ([email protected])' __date__ = '$Date: 2008/02/05 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getCraftedText( fileName, gcodeText = '', repository=None): "Get chopped text." if fileName.endswith('.svg'): gcodeText = archive.getTextIfEmpty(fileName, gcodeText) if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'chop'): return gcodeText carving = svg_writer.getCarving(fileName) if carving == None: return '' if repository == None: repository = ChopRepository() settings.getReadRepository(repository) return ChopSkein().getCarvedSVG( carving, fileName, repository ) def getNewRepository(): 'Get new repository.' return ChopRepository() def writeOutput(fileName, shouldAnalyze=True): "Chop a GNU Triangulated Surface file. If no fileName is specified, chop the first GNU Triangulated Surface file in this folder." startTime = time.time() print('File ' + archive.getSummarizedFileName(fileName) + ' is being chopped.') repository = ChopRepository() settings.getReadRepository(repository) chopGcode = getCraftedText( fileName, '', repository ) if chopGcode == '': return suffixFileName = fileName[ : fileName.rfind('.') ] + '_chop.svg' suffixDirectoryName = os.path.dirname(suffixFileName) suffixReplacedBaseName = os.path.basename(suffixFileName).replace(' ', '_') suffixFileName = os.path.join( suffixDirectoryName, suffixReplacedBaseName ) archive.writeFileText( suffixFileName, chopGcode ) print('The chopped file is saved as ' + archive.getSummarizedFileName(suffixFileName) ) print('It took %s to chop the file.' % euclidean.getDurationString( time.time() - startTime ) ) if shouldAnalyze: settings.openSVGPage( suffixFileName, repository.svgViewer.value ) class ChopRepository: "A class to handle the chop settings." def __init__(self): "Set the default settings, execute title & settings fileName." skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.chop.html', self ) self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getTranslatorFileTypeTuples(), 'Open File to be Chopped', self, '') self.addExtraTopLayerIfNecessary = settings.BooleanSetting().getFromValue('Add Extra Top Layer if Necessary', self, True ) self.addLayerTemplateToSVG = settings.BooleanSetting().getFromValue('Add Layer Template to SVG', self, True) self.edgeWidth = settings.FloatSpin().getFromValue( 0.4, 'Edge Width (mm):', self, 4.0, 2.0 ) self.extraDecimalPlaces = settings.FloatSpin().getFromValue(0.0, 'Extra Decimal Places (float):', self, 3.0, 2.0) self.importCoarseness = settings.FloatSpin().getFromValue( 0.5, 'Import Coarseness (ratio):', self, 2.0, 1.0 ) self.layerHeight = settings.FloatSpin().getFromValue( 0.1, 'Layer Height (mm):', self, 1.0, 0.4 ) self.layersFrom = settings.IntSpin().getFromValue( 0, 'Layers From (index):', self, 20, 0 ) self.layersTo = settings.IntSpin().getSingleIncrementFromValue( 0, 'Layers To (index):', self, 912345678, 912345678 ) self.meshTypeLabel = settings.LabelDisplay().getFromName('Mesh Type: ', self, ) importLatentStringVar = settings.LatentStringVar() self.correctMesh = settings.Radio().getFromRadio( importLatentStringVar, 'Correct Mesh', self, True ) self.unprovenMesh = settings.Radio().getFromRadio( importLatentStringVar, 'Unproven Mesh', self, False ) self.svgViewer = settings.StringSetting().getFromValue('SVG Viewer:', self, 'webbrowser') settings.LabelSeparator().getFromRepository(self) self.executeTitle = 'Chop' def execute(self): "Chop button has been clicked." fileNames = skeinforge_polyfile.getFileOrDirectoryTypes(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled) for fileName in fileNames: writeOutput(fileName) class ChopSkein: "A class to chop a carving." def addExtraTopLayerIfNecessary( self, carving, layerHeight, loopLayers ): "Add extra top layer if necessary." topRotatedBoundaryLayer = loopLayers[-1] cuttingSafeHeight = topRotatedBoundaryLayer.z + 0.5001 * layerHeight if cuttingSafeHeight > carving.getCarveCornerMaximum().z: return extraTopRotatedBoundaryLayer = topRotatedBoundaryLayer.getCopyAtZ( topRotatedBoundaryLayer.z + layerHeight ) loopLayers.append( extraTopRotatedBoundaryLayer ) def getCarvedSVG( self, carving, fileName, repository ): "Parse gnu triangulated surface text and store the chopped gcode." layerHeight = repository.layerHeight.value edgeWidth = repository.edgeWidth.value carving.setCarveLayerHeight( layerHeight ) importRadius = 0.5 * repository.importCoarseness.value * abs(edgeWidth) carving.setCarveImportRadius(max(importRadius, 0.001 * layerHeight)) carving.setCarveIsCorrectMesh( repository.correctMesh.value ) loopLayers = carving.getCarveBoundaryLayers() if len( loopLayers ) < 1: print('Warning, there are no slices for the model, this could be because the model is too small for the Layer Height.') return '' if repository.addExtraTopLayerIfNecessary.value: self.addExtraTopLayerIfNecessary( carving, layerHeight, loopLayers ) loopLayers.reverse() layerHeight = carving.getCarveLayerHeight() decimalPlacesCarried = euclidean.getDecimalPlacesCarried(repository.extraDecimalPlaces.value, layerHeight) svgWriter = svg_writer.SVGWriter( repository.addLayerTemplateToSVG.value, carving.getCarveCornerMaximum(), carving.getCarveCornerMinimum(), decimalPlacesCarried, carving.getCarveLayerHeight(), edgeWidth) truncatedRotatedBoundaryLayers = svg_writer.getTruncatedRotatedBoundaryLayers(loopLayers, repository) return svgWriter.getReplacedSVGTemplate( fileName, truncatedRotatedBoundaryLayers, 'chop', carving.getFabmetheusXML()) def main(): "Display the chop dialog." if len(sys.argv) > 1: writeOutput(' '.join(sys.argv[1 :])) else: settings.startMainLoopFromConstructor(getNewRepository()) if __name__ == "__main__": main()
agpl-3.0
jiangzhuo/kbengine
kbe/src/lib/python/Lib/encodings/cp775.py
272
34476
""" Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp775', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE 0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE 0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON 0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA 0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA 0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE 0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA 0x0096: 0x00a2, # CENT SIGN 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE 0x009e: 0x00d7, # MULTIPLICATION SIGN 0x009f: 0x00a4, # CURRENCY SIGN 0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON 0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE 0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK 0x00a7: 0x00a6, # BROKEN BAR 0x00a8: 0x00a9, # COPYRIGHT SIGN 0x00a9: 0x00ae, # REGISTERED SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK 0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON 0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK 0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK 0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK 0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON 0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK 0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON 0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK 0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE 0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK 0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON 0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK 0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON 0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE 0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA 0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA 0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA 0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA 0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA 0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON 0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA 0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK 0x00f0: 0x00ad, # SOFT HYPHEN 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS 0x00f4: 0x00b6, # PILCROW SIGN 0x00f5: 0x00a7, # SECTION SIGN 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x00b9, # SUPERSCRIPT ONE 0x00fc: 0x00b3, # SUPERSCRIPT THREE 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE '\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS '\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE '\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE '\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON '\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA '\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA '\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE '\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS '\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA '\xa2' # 0x0096 -> CENT SIGN '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE '\xa3' # 0x009c -> POUND SIGN '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE '\xd7' # 0x009e -> MULTIPLICATION SIGN '\xa4' # 0x009f -> CURRENCY SIGN '\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON '\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE '\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE '\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE '\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE '\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK '\xa6' # 0x00a7 -> BROKEN BAR '\xa9' # 0x00a8 -> COPYRIGHT SIGN '\xae' # 0x00a9 -> REGISTERED SIGN '\xac' # 0x00aa -> NOT SIGN '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER '\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK '\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON '\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK '\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK '\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK '\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON '\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK '\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON '\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK '\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE '\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK '\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON '\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK '\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON '\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u258c' # 0x00dd -> LEFT HALF BLOCK '\u2590' # 0x00de -> RIGHT HALF BLOCK '\u2580' # 0x00df -> UPPER HALF BLOCK '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) '\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE '\xb5' # 0x00e6 -> MICRO SIGN '\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE '\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA '\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA '\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA '\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA '\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA '\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON '\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA '\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK '\xad' # 0x00f0 -> SOFT HYPHEN '\xb1' # 0x00f1 -> PLUS-MINUS SIGN '\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS '\xb6' # 0x00f4 -> PILCROW SIGN '\xa7' # 0x00f5 -> SECTION SIGN '\xf7' # 0x00f6 -> DIVISION SIGN '\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK '\xb0' # 0x00f8 -> DEGREE SIGN '\u2219' # 0x00f9 -> BULLET OPERATOR '\xb7' # 0x00fa -> MIDDLE DOT '\xb9' # 0x00fb -> SUPERSCRIPT ONE '\xb3' # 0x00fc -> SUPERSCRIPT THREE '\xb2' # 0x00fd -> SUPERSCRIPT TWO '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a2: 0x0096, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a4: 0x009f, # CURRENCY SIGN 0x00a6: 0x00a7, # BROKEN BAR 0x00a7: 0x00f5, # SECTION SIGN 0x00a9: 0x00a8, # COPYRIGHT SIGN 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00ad: 0x00f0, # SOFT HYPHEN 0x00ae: 0x00a9, # REGISTERED SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b3: 0x00fc, # SUPERSCRIPT THREE 0x00b5: 0x00e6, # MICRO SIGN 0x00b6: 0x00f4, # PILCROW SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00b9: 0x00fb, # SUPERSCRIPT ONE 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x00d7: 0x009e, # MULTIPLICATION SIGN 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS 0x00f7: 0x00f6, # DIVISION SIGN 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON 0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON 0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK 0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK 0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE 0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE 0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON 0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON 0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON 0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON 0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE 0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE 0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK 0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK 0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA 0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA 0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON 0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON 0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK 0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK 0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA 0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA 0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA 0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA 0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE 0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE 0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA 0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA 0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON 0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON 0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA 0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE 0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON 0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON 0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON 0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON 0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK 0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE 0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE 0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON 0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON 0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK 0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK 0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK 0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK 0x2219: 0x00f9, # BULLET OPERATOR 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
lgpl-3.0
chriskmanx/qmole
QMOLEDEV/boost_1_49_0/libs/python/pyste/src/Pyste/pyste.py
54
14022
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and # distribution is subject to the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) """ Pyste version %s Usage: pyste [options] interface-files where options are: --module=<name> The name of the module that will be generated; defaults to the first interface filename, without the extension. -I <path> Add an include path -D <symbol> Define symbol --multiple Create various cpps, instead of only one (useful during development) --out=<name> Specify output filename (default: <module>.cpp) in --multiple mode, this will be a directory --no-using Do not declare "using namespace boost"; use explicit declarations instead --pyste-ns=<name> Set the namespace where new types will be declared; default is the empty namespace --debug Writes the xml for each file parsed in the current directory --cache-dir=<dir> Directory for cache files (speeds up future runs) --only-create-cache Recreates all caches (doesn't generate code). --generate-main Generates the _main.cpp file (in multiple mode) --file-list A file with one pyste file per line. Use as a substitute for passing the files in the command line. --gccxml-path=<path> Path to gccxml executable (default: gccxml) --no-default-include Do not use INCLUDE environment variable for include files to pass along gccxml. -h, --help Print this help and exit -v, --version Print version information """ import sys import os import getopt import exporters import SingleCodeUnit import MultipleCodeUnit import infos import exporterutils import settings import gc import sys from policies import * from CppParser import CppParser, CppParserError import time import declarations __version__ = '0.9.30' def RecursiveIncludes(include): 'Return a list containg the include dir and all its subdirectories' dirs = [include] def visit(arg, dir, names): # ignore CVS dirs if os.path.split(dir)[1] != 'CVS': dirs.append(dir) os.path.walk(include, visit, None) return dirs def GetDefaultIncludes(): if 'INCLUDE' in os.environ: include = os.environ['INCLUDE'] return include.split(os.pathsep) else: return [] def ProcessIncludes(includes): if sys.platform == 'win32': index = 0 for include in includes: includes[index] = include.replace('\\', '/') index += 1 def ReadFileList(filename): f = file(filename) files = [] try: for line in f: line = line.strip() if line: files.append(line) finally: f.close() return files def ParseArguments(): def Usage(): print __doc__ % __version__ sys.exit(1) try: options, files = getopt.getopt( sys.argv[1:], 'R:I:D:vh', ['module=', 'multiple', 'out=', 'no-using', 'pyste-ns=', 'debug', 'cache-dir=', 'only-create-cache', 'version', 'generate-main', 'file-list=', 'help', 'gccxml-path=', 'no-default-include']) except getopt.GetoptError, e: print print 'ERROR:', e Usage() default_includes = GetDefaultIncludes() includes = [] defines = [] module = None out = None multiple = False cache_dir = None create_cache = False generate_main = False gccxml_path = 'gccxml' for opt, value in options: if opt == '-I': includes.append(value) elif opt == '-D': defines.append(value) elif opt == '-R': includes.extend(RecursiveIncludes(value)) elif opt == '--module': module = value elif opt == '--out': out = value elif opt == '--no-using': settings.namespaces.python = 'boost::python::' settings.USING_BOOST_NS = False elif opt == '--pyste-ns': settings.namespaces.pyste = value + '::' elif opt == '--debug': settings.DEBUG = True elif opt == '--multiple': multiple = True elif opt == '--cache-dir': cache_dir = value elif opt == '--only-create-cache': create_cache = True elif opt == '--file-list': files += ReadFileList(value) elif opt in ['-h', '--help']: Usage() elif opt in ['-v', '--version']: print 'Pyste version %s' % __version__ sys.exit(2) elif opt == '--generate-main': generate_main = True elif opt == '--gccxml-path': gccxml_path = value elif opt == '--no-default-include': default_includes = [] else: print 'Unknown option:', opt Usage() includes[0:0] = default_includes if not files: Usage() if not module: module = os.path.splitext(os.path.basename(files[0]))[0] if not out: out = module if not multiple: out += '.cpp' for file in files: d = os.path.dirname(os.path.abspath(file)) if d not in sys.path: sys.path.append(d) if create_cache and not cache_dir: print 'Error: Use --cache-dir to indicate where to create the cache files!' Usage() sys.exit(3) if generate_main and not multiple: print 'Error: --generate-main only valid in multiple mode.' Usage() sys.exit(3) ProcessIncludes(includes) return includes, defines, module, out, files, multiple, cache_dir, create_cache, \ generate_main, gccxml_path def PCHInclude(*headers): code = '\n'.join(['#include <%s>' % x for x in headers]) infos.CodeInfo(code, 'pchinclude') def CreateContext(): 'create the context where a interface file will be executed' context = {} context['Import'] = Import # infos context['Function'] = infos.FunctionInfo context['Class'] = infos.ClassInfo context['Include'] = lambda header: infos.CodeInfo('#include <%s>\n' % header, 'include') context['PCHInclude'] = PCHInclude context['Template'] = infos.ClassTemplateInfo context['Enum'] = infos.EnumInfo context['AllFromHeader'] = infos.HeaderInfo context['Var'] = infos.VarInfo # functions context['rename'] = infos.rename context['set_policy'] = infos.set_policy context['exclude'] = infos.exclude context['set_wrapper'] = infos.set_wrapper context['use_shared_ptr'] = infos.use_shared_ptr context['use_auto_ptr'] = infos.use_auto_ptr context['holder'] = infos.holder context['add_method'] = infos.add_method context['final'] = infos.final context['export_values'] = infos.export_values # policies context['return_internal_reference'] = return_internal_reference context['with_custodian_and_ward'] = with_custodian_and_ward context['return_value_policy'] = return_value_policy context['reference_existing_object'] = reference_existing_object context['copy_const_reference'] = copy_const_reference context['copy_non_const_reference'] = copy_non_const_reference context['return_opaque_pointer'] = return_opaque_pointer context['manage_new_object'] = manage_new_object context['return_by_value'] = return_by_value context['return_self'] = return_self # utils context['Wrapper'] = exporterutils.FunctionWrapper context['declaration_code'] = lambda code: infos.CodeInfo(code, 'declaration-outside') context['module_code'] = lambda code: infos.CodeInfo(code, 'module') context['class_code'] = infos.class_code return context def Begin(): # parse arguments includes, defines, module, out, interfaces, multiple, cache_dir, create_cache, generate_main, gccxml_path = ParseArguments() # run pyste scripts for interface in interfaces: ExecuteInterface(interface) # create the parser parser = CppParser(includes, defines, cache_dir, declarations.version, gccxml_path) try: if not create_cache: if not generate_main: return GenerateCode(parser, module, out, interfaces, multiple) else: return GenerateMain(module, out, OrderInterfaces(interfaces)) else: return CreateCaches(parser) finally: parser.Close() def CreateCaches(parser): # There is one cache file per interface so we organize the headers # by interfaces. For each interface collect the tails from the # exporters sharing the same header. tails = JoinTails(exporters.exporters) # now for each interface file take each header, and using the tail # get the declarations and cache them. for interface, header in tails: tail = tails[(interface, header)] declarations = parser.ParseWithGCCXML(header, tail) cachefile = parser.CreateCache(header, interface, tail, declarations) print 'Cached', cachefile return 0 _imported_count = {} # interface => count def ExecuteInterface(interface): old_interface = exporters.current_interface if not os.path.exists(interface): if old_interface and os.path.exists(old_interface): d = os.path.dirname(old_interface) interface = os.path.join(d, interface) if not os.path.exists(interface): raise IOError, "Cannot find interface file %s."%interface _imported_count[interface] = _imported_count.get(interface, 0) + 1 exporters.current_interface = interface context = CreateContext() context['INTERFACE_FILE'] = os.path.abspath(interface) execfile(interface, context) exporters.current_interface = old_interface def Import(interface): exporters.importing = True ExecuteInterface(interface) exporters.importing = False def JoinTails(exports): '''Returns a dict of {(interface, header): tail}, where tail is the joining of all tails of all exports for the header. ''' tails = {} for export in exports: interface = export.interface_file header = export.Header() tail = export.Tail() or '' if (interface, header) in tails: all_tails = tails[(interface,header)] all_tails += '\n' + tail tails[(interface, header)] = all_tails else: tails[(interface, header)] = tail return tails def OrderInterfaces(interfaces): interfaces_order = [(_imported_count[x], x) for x in interfaces] interfaces_order.sort() interfaces_order.reverse() return [x for _, x in interfaces_order] def GenerateMain(module, out, interfaces): codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out) codeunit.GenerateMain(interfaces) return 0 def GenerateCode(parser, module, out, interfaces, multiple): # prepare to generate the wrapper code if multiple: codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out) else: codeunit = SingleCodeUnit.SingleCodeUnit(module, out) # stop referencing the exporters here exports = exporters.exporters exporters.exporters = None exported_names = dict([(x.Name(), None) for x in exports]) # order the exports order = {} for export in exports: if export.interface_file in order: order[export.interface_file].append(export) else: order[export.interface_file] = [export] exports = [] interfaces_order = OrderInterfaces(interfaces) for interface in interfaces_order: exports.extend(order[interface]) del order del interfaces_order # now generate the code in the correct order #print exported_names tails = JoinTails(exports) for i in xrange(len(exports)): export = exports[i] interface = export.interface_file header = export.Header() if header: tail = tails[(interface, header)] declarations, parsed_header = parser.Parse(header, interface, tail) else: declarations = [] parsed_header = None ExpandTypedefs(declarations, exported_names) export.SetDeclarations(declarations) export.SetParsedHeader(parsed_header) if multiple: codeunit.SetCurrent(export.interface_file, export.Name()) export.GenerateCode(codeunit, exported_names) # force collect of cyclic references exports[i] = None del declarations del export gc.collect() # finally save the code unit codeunit.Save() if not multiple: print 'Module %s generated' % module return 0 def ExpandTypedefs(decls, exported_names): '''Check if the names in exported_names are a typedef, and add the real class name in the dict. ''' for name in exported_names.keys(): for decl in decls: if isinstance(decl, declarations.Typedef): exported_names[decl.type.FullName()] = None def UsePsyco(): 'Tries to use psyco if possible' try: import psyco psyco.profile() except: pass def main(): start = time.clock() UsePsyco() status = Begin() print '%0.2f seconds' % (time.clock()-start) sys.exit(status) if __name__ == '__main__': main()
gpl-3.0
bearstech/ansible
test/units/modules/network/nxos/test_nxos_config.py
47
4988
#!/usr/bin/env python # # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_config from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosConfigModule(TestNxosModule): module = nxos_config def setUp(self): self.mock_get_config = patch('ansible.modules.network.nxos.nxos_config.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_config.load_config') self.load_config = self.mock_load_config.start() def tearDown(self): self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None, device=''): self.get_config.return_value = load_fixture('nxos_config', 'config.cfg') self.load_config.return_value = None def test_nxos_config_no_change(self): args = dict(lines=['hostname localhost']) set_module_args(args) result = self.execute_module() def test_nxos_config_src(self): args = dict(src=load_fixture('nxos_config', 'candidate.cfg')) set_module_args(args) result = self.execute_module(changed=True) config = ['hostname switch01', 'interface Ethernet1', 'description test interface', 'no shutdown', 'ip routing'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) def test_nxos_config_lines(self): args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com']) set_module_args(args) result = self.execute_module(changed=True) config = ['hostname switch01'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) def test_nxos_config_before(self): args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'], before=['before command']) set_module_args(args) result = self.execute_module(changed=True) config = ['before command', 'hostname switch01'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) self.assertEqual('before command', result['commands'][0]) def test_nxos_config_after(self): args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'], after=['after command']) set_module_args(args) result = self.execute_module(changed=True) config = ['after command', 'hostname switch01'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) self.assertEqual('after command', result['commands'][-1]) def test_nxos_config_parents(self): args = dict(lines=['ip address 1.2.3.4/5', 'no shutdown'], parents=['interface Ethernet10']) set_module_args(args) result = self.execute_module(changed=True) config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown'] self.assertEqual(config, result['commands'], result['commands']) def test_nxos_config_src_and_lines_fails(self): args = dict(src='foo', lines='foo') set_module_args(args) result = self.execute_module(failed=True) def test_nxos_config_match_exact_requires_lines(self): args = dict(match='exact') set_module_args(args) result = self.execute_module(failed=True) def test_nxos_config_match_strict_requires_lines(self): args = dict(match='strict') set_module_args(args) result = self.execute_module(failed=True) def test_nxos_config_replace_block_requires_lines(self): args = dict(replace='block') set_module_args(args) result = self.execute_module(failed=True) def test_nxos_config_replace_config_requires_src(self): args = dict(replace='config') set_module_args(args) result = self.execute_module(failed=True) def test_nxos_config_backup_returns__backup__(self): args = dict(backup=True) set_module_args(args) result = self.execute_module() self.assertIn('__backup__', result)
gpl-3.0
sup95/zulip
zerver/views/webhooks/circleci.py
11
1853
# Webhooks for external integrations. from __future__ import absolute_import from django.http import HttpRequest, HttpResponse from six import text_type from typing import Any from zerver.lib.actions import check_send_message from zerver.lib.response import json_success, json_error from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view from zerver.models import UserProfile, Client import ujson CIRCLECI_SUBJECT_TEMPLATE = u'{repository_name}' CIRCLECI_MESSAGE_TEMPLATE = u'[Build]({build_url}) triggered by {username} on {branch} branch {status}.' FAILED_STATUS = 'failed' @api_key_only_webhook_view('CircleCI') @has_request_variables def api_circleci_webhook(request, user_profile, client, payload=REQ(argument_type='body'), stream=REQ(default='circleci')): # type: (HttpRequest, UserProfile, Client, Dict[str, Any], text_type) -> HttpResponse payload = payload['payload'] subject = get_subject(payload) body = get_body(payload) check_send_message(user_profile, client, 'stream', [stream], subject, body) return json_success() def get_subject(payload): # type: (Dict[str, Any]) -> text_type return CIRCLECI_SUBJECT_TEMPLATE.format(repository_name=payload['reponame']) def get_body(payload): # type: (Dict[str, Any]) -> text_type data = { 'build_url': payload['build_url'], 'username': payload['username'], 'branch': payload['branch'], 'status': get_status(payload) } return CIRCLECI_MESSAGE_TEMPLATE.format(**data) def get_status(payload): # type: (Dict[str, Any]) -> text_type status = payload['status'] if payload['previous']['status'] == FAILED_STATUS and status == FAILED_STATUS: return u'is still failing' if status == 'success': return u'succeeded' return status
apache-2.0
amitdubey90/lastdayasafreeman
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py
2767
2174
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Applies a fix to CR LF TAB handling in xml.dom. Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293 Working around this: http://bugs.python.org/issue5752 TODO(bradnelson): Consider dropping this when we drop XP support. """ import xml.dom.minidom def _Replacement_write_data(writer, data, is_attrib=False): """Writes datachars to writer.""" data = data.replace("&", "&amp;").replace("<", "&lt;") data = data.replace("\"", "&quot;").replace(">", "&gt;") if is_attrib: data = data.replace( "\r", "&#xD;").replace( "\n", "&#xA;").replace( "\t", "&#x9;") writer.write(data) def _Replacement_writexml(self, writer, indent="", addindent="", newl=""): # indent = current indentation # addindent = indentation to add to higher levels # newl = newline string writer.write(indent+"<" + self.tagName) attrs = self._get_attributes() a_names = attrs.keys() a_names.sort() for a_name in a_names: writer.write(" %s=\"" % a_name) _Replacement_write_data(writer, attrs[a_name].value, is_attrib=True) writer.write("\"") if self.childNodes: writer.write(">%s" % newl) for node in self.childNodes: node.writexml(writer, indent + addindent, addindent, newl) writer.write("%s</%s>%s" % (indent, self.tagName, newl)) else: writer.write("/>%s" % newl) class XmlFix(object): """Object to manage temporary patching of xml.dom.minidom.""" def __init__(self): # Preserve current xml.dom.minidom functions. self.write_data = xml.dom.minidom._write_data self.writexml = xml.dom.minidom.Element.writexml # Inject replacement versions of a function and a method. xml.dom.minidom._write_data = _Replacement_write_data xml.dom.minidom.Element.writexml = _Replacement_writexml def Cleanup(self): if self.write_data: xml.dom.minidom._write_data = self.write_data xml.dom.minidom.Element.writexml = self.writexml self.write_data = None def __del__(self): self.Cleanup()
apache-2.0