repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
PrismTech/opensplice | build/scripts/overnight/python/DBMSConnect.py | 2 | 12435 | import sys
import os
import json
import shutil
import subprocess
import fileinput
import platform
import time
from shutil import copy
import example_logparser
from example_exceptions import LogCheckFail
from Example import Example
from Example import ExeThread
import pdb
"""
Class specific to the DBMSConnect example as it is very different
to all other examples having a different directory structure and
also runs more than a simple publisher/subscriber
"""
class dbmsconnect (Example):
def __init__(self, host, logger):
super(dbmsconnect, self).__init__(host, logger, "dbmsconnect", "services")
with open ('examples.json') as data_file:
data = json.load(data_file)
self.odbcMsgBoard_params = data["services"]["dbmsconnect"]["params"]["odbcMsgBoard_params"]
self.odbcChatter1_params = data["services"]["dbmsconnect"]["params"]["odbcChatter1_params"]
self.odbcChatter2_params = data["services"]["dbmsconnect"]["params"]["odbcChatter2_params"]
self.cppChatter1_params = data["services"]["dbmsconnect"]["params"]["cppChatter1_params"]
self.cppChatter2_params = data["services"]["dbmsconnect"]["params"]["cppChatter2_params"]
self.odbcChatterQuit_params = data["services"]["dbmsconnect"]["params"]["odbcChatterQuit_params"]
self.cppChatterQuit_params = data["services"]["dbmsconnect"]["params"]["cppChatterQuit_params"]
super(dbmsconnect, self).setPath(os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', 'SQL', 'C++', 'ODBC'))
if os.environ['EXRUNTYPE'] == "shm":
self.uri = "file://" + os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', self.shm_uri)
else:
self.uri = "file://" + os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', self.sp_uri)
self.runDBMSConnect = self.host.runExample(self.expath, self.name, "")
def runExample(self):
print "In runExample for " + self.expath + ": " + self.name
currPath = os.getcwd()
try:
self.exdir = "servicesdbmsconnectSQLCPPODBC"
exSfx = ""
if self.host.isWindows():
exSfx = ".exe"
os.putenv("ODBC_LIB_NAME", "odbc32")
else:
os.putenv("ODBC_LIB_NAME", "odbc")
msg = "NONE"
result = "PASS"
dsn = self.odbcMsgBoard_params[0]
os.putenv("MY_DSN", dsn);
os.environ["MY_DSN"]= dsn;
os.putenv("OSPL_URI", self.uri)
os.environ["OSPL_URI"] = self.uri
try:
self.convertConfig()
self.setLogPathAndLogs("", "")
odbcMsgBoardLog = os.path.join(self.pPath, 'odbcMsgBoard.log')
odbcChatter1Log = os.path.join(self.pPath, 'odbcChatter1.log')
odbcChatter2Log = os.path.join(self.pPath, 'odbcChatter2.log')
odbcChatterQuitLog = os.path.join(self.pPath, 'odbcChatterQuit.log')
cppMsgBoardLog = os.path.join(self.pPath, 'cppMsgBoard.log')
cppChatter1Log = os.path.join(self.pPath, 'cppChatter1.log')
cppChatter2Log = os.path.join(self.pPath, 'cppChatter2.log')
cppChatterQuitLog = os.path.join(self.pPath, 'cppChatterQuit.log')
with open ('examples.json') as data_file:
data = json.load(data_file)
odbcMsgBoardName = data[self.expath][self.name]["executables"]["odbc"]["msgBoardName"]
odbcChatterName = data[self.expath][self.name]["executables"]["odbc"]["chatterName"]
cppMsgBoardName = data[self.expath][self.name]["executables"]["cpp"]["msgBoardName"]
cppChatterName = data[self.expath][self.name]["executables"]["cpp"]["chatterName"]
odbcmsgboard_conds_file = data[self.expath][self.name]["log_conditions_file"]["odbcmsgboard_conds"]
cppmsgboard_conds_file = data[self.expath][self.name]["log_conditions_file"]["msgboard_conds"]
odbcchatter_conds_file = data[self.expath][self.name]["log_conditions_file"]["odbcchatter_conds"]
chatter_conds_file = data[self.expath][self.name]["log_conditions_file"]["chatter_conds"]
odbcmsgboard_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', odbcmsgboard_conds_file)
odbcchatter_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', odbcchatter_conds_file)
cppmsgboard_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', cppmsgboard_conds_file)
cppchatter_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', chatter_conds_file)
if odbcMsgBoardName != "":
if self.classpath == "":
odbcMsgBoardExe = os.path.join(self.pPath, odbcMsgBoardName) + exSfx
if not os.path.isfile (odbcMsgBoardExe):
msg = "MissingExecutable: " + odbcMsgBoardExe
else:
odbcMsgBoardExe = odbcMsgBoardName
if odbcChatterName != "":
if self.classpath == "":
odbcChatterNameExe = os.path.join(self.pPath, odbcChatterName) + exSfx
if not os.path.isfile (odbcChatterNameExe):
msg = "MissingExecutable: " + odbcChatterNameExe
else:
odbcChatterNameExe = odbcChatterName
cppPath = os.path.join(os.environ['OSPL_HOME'], 'examples', "dcps", "Tutorial", "cpp", "standalone")
if cppMsgBoardName != "":
if self.classpath == "":
cppMsgBoardExe = os.path.join(cppPath, cppMsgBoardName) + exSfx
if not os.path.isfile (cppMsgBoardExe):
msg = "MissingExecutable: " + cppMsgBoardExe
else:
cppMsgBoardExe = cppMsgBoardName
if cppChatterName != "":
if self.classpath == "":
cppChatterNameExe = os.path.join(cppPath, cppChatterName) + exSfx
if not os.path.isfile (cppChatterNameExe):
msg = "MissingExecutable: " + cppChatterNameExe
else:
cppChatterNameExe = cppChatterName
if msg == "NONE":
odbcMsgBoard_Thread = ExeThread(self.classpath, odbcMsgBoardLog, "", odbcMsgBoardExe, self.odbcMsgBoard_params, self.example_timeout * 2)
odbcChatter1_Thread = ExeThread(self.classpath, odbcChatter1Log, "", odbcChatterNameExe, self.odbcChatter1_params, self.example_timeout)
odbcChatter2_Thread = ExeThread(self.classpath, odbcChatter2Log, "", odbcChatterNameExe, self.odbcChatter2_params, self.example_timeout)
cppMsgBoard_Thread = ExeThread(self.classpath, cppMsgBoardLog, "", cppMsgBoardExe, "", self.example_timeout * 2)
cppChatter1_Thread = ExeThread(self.classpath, cppChatter1Log, "", cppChatterNameExe, self.cppChatter1_params, self.example_timeout)
cppChatter2_Thread = ExeThread(self.classpath, cppChatter2Log, "", cppChatterNameExe, self.cppChatter2_params, self.example_timeout)
odbcChatterQuit_Thread = ExeThread(self.classpath, odbcChatterQuitLog, "", odbcChatterNameExe, self.odbcChatterQuit_params, self.example_timeout)
cppChatterQuit_Thread = ExeThread(self.classpath, cppChatterQuitLog, "", cppChatterNameExe, self.cppChatterQuit_params, self.example_timeout)
os.chdir(self.pPath)
self.startOSPL()
cppMsgBoard_Thread.start()
odbcMsgBoard_Thread.start()
time.sleep(5)
odbcChatter1_Thread.start()
odbcChatter2_Thread.start()
cppChatter1_Thread.start()
cppChatter2_Thread.start()
odbcChatter1_Thread.join(self.example_timeout)
odbcChatter2_Thread.join(self.example_timeout)
cppChatter1_Thread.join(self.example_timeout)
cppChatter2_Thread.join(self.example_timeout)
time.sleep(10)
odbcChatterQuit_Thread.start()
cppChatterQuit_Thread.start()
odbcChatterQuit_Thread.join(self.example_timeout)
cppChatterQuit_Thread.join(self.example_timeout)
cppMsgBoard_Thread.join(self.example_timeout)
odbcMsgBoard_Thread.join(self.example_timeout)
except Exception as ex:
msg = "Exception running ", str(ex)
try:
self.stopOSPL()
except Exception as ex:
print "Exception stopping OpenSplice ", str(ex)
if msg == "NONE":
try:
#Allow time for all messages to be written to log
time.sleep (15)
super(dbmsconnect, self).copyLogs()
if os.path.isfile (self.ospl_error_log):
msg = "ospl-error.log found"
print "checking odbcMsgBoardLog with odbcmsgboard_conds", odbcMsgBoardLog, odbcmsgboard_conds
self.checkResults(odbcMsgBoardLog, odbcmsgboard_conds)
print "checking odbcChatter1Log with odbcchatter_conds", odbcChatter1Log, odbcchatter_conds
self.checkResults(odbcChatter1Log, odbcchatter_conds)
print "checking odbcChatter2Log with odbcchatter_conds", odbcChatter2Log, odbcchatter_conds
self.checkResults(odbcChatter2Log, odbcchatter_conds)
self.checkResults(cppMsgBoardLog, cppmsgboard_conds)
self.checkResults(cppChatter1Log, cppchatter_conds)
self.checkResults(cppChatter2Log, cppchatter_conds)
self.checkOSPLInfoLog(self.ospl_info_log)
except LogCheckFail as lf:
reason = str(lf)
if "OpenSpliceDDS Warnings" in reason:
msg = "LogCheckFail: OpenSpliceDDS Warnings in ospl-info.log"
else:
msg = "LogCheckFail: " + str(lf)
except Exception:
msg = "Exception checking logs " + str(sys.exc_info()[0])
logdir = os.path.join(os.environ['LOGDIR'], "examples", "run_" + os.environ['EXRUNTYPE'], self.exdir)
dbmsconnLog = os.path.join(self.pPath, 'dbmsconnect.log')
print "dbmsconnect.log is ", dbmsconnLog
copy(dbmsconnLog, logdir)
if msg != "NONE":
result = "FAIL"
try:
self.writeResult (result, self.exdir, "", msg)
except Exception as ex:
print "Exception writing result", str(ex)
try:
self.cleanUp()
except Exception as ex:
print "Exception cleaning up", str(ex)
except Exception as ex:
print "Unexpected exception ", str(ex)
finally:
os.chdir(currPath)
def convertConfig(self):
if os.environ['EXRUNTYPE'] == "shm":
uri = self.shm_uri
else:
uri = self.sp_uri
fcfg = os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', uri)
forig = os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', uri+'.orig')
os.rename(fcfg, forig)
if self.host.name != "default":
hn = self.host.name
else:
hn = platform.uname()[1]
prefix = hn[:16].replace('-', '_') + '_'
fout = open(fcfg, "w")
for line in fileinput.input(forig):
fout.write(line.replace("Sql", prefix))
fout.close()
| gpl-3.0 |
MakeHer/edx-platform | cms/djangoapps/contentstore/views/tests/test_course_index.py | 25 | 36973 | """
Unit tests for getting the list of courses and the course outline.
"""
import ddt
import json
import lxml
import datetime
import mock
import pytz
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.test.utils import override_settings
from django.utils.translation import ugettext as _
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url, reverse_library_url, add_instructor, reverse_usage_url
from contentstore.views.course import (
course_outline_initial_state, reindex_course_and_check_access, _deprecated_blocks_info
)
from contentstore.views.item import create_xblock_info, VisibilityState
from course_action_state.managers import CourseRerunUIStateManager
from course_action_state.models import CourseRerunState
from opaque_keys.edx.locator import CourseLocator
from search.api import perform_search
from student.auth import has_course_author_access
from student.tests.factories import UserFactory
from util.date_utils import get_default_time_display
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory
class TestCourseIndex(CourseTestCase):
"""
Unit tests for getting the list of courses and the course outline.
"""
def setUp(self):
"""
Add a course with odd characters in the fields
"""
super(TestCourseIndex, self).setUp()
# had a problem where index showed course but has_access failed to retrieve it for non-staff
self.odd_course = CourseFactory.create(
org='test.org_1-2',
number='test-2.3_course',
display_name='dotted.course.name-2',
)
def check_index_and_outline(self, authed_client):
"""
Test getting the list of courses and then pulling up their outlines
"""
index_url = '/home/'
index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
course_link_eles = parsed_html.find_class('course-link')
self.assertGreaterEqual(len(course_link_eles), 2)
for link in course_link_eles:
self.assertRegexpMatches(
link.get("href"),
'course/{}'.format(settings.COURSE_KEY_PATTERN)
)
# now test that url
outline_response = authed_client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
# ensure it has the expected 2 self referential links
outline_parsed = lxml.html.fromstring(outline_response.content)
outline_link = outline_parsed.find_class('course-link')[0]
self.assertEqual(outline_link.get("href"), link.get("href"))
course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0]
self.assertEqual(course_menu_link.find("a").get("href"), link.get("href"))
def test_libraries_on_course_index(self):
"""
Test getting the list of libraries from the course listing page
"""
# Add a library:
lib1 = LibraryFactory.create()
index_url = '/home/'
index_response = self.client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
library_link_elements = parsed_html.find_class('library-link')
self.assertEqual(len(library_link_elements), 1)
link = library_link_elements[0]
self.assertEqual(
link.get("href"),
reverse_library_url('library_handler', lib1.location.library_key),
)
# now test that url
outline_response = self.client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
self.assertEqual(outline_response.status_code, 200)
def test_is_staff_access(self):
"""
Test that people with is_staff see the courses and can navigate into them
"""
self.check_index_and_outline(self.client)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
outline_url = reverse_course_url('course_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_course_staff_access(self):
"""
Make and register course_staff and ensure they can access the courses
"""
course_staff_client, course_staff = self.create_non_staff_authed_user_client()
for course in [self.course, self.odd_course]:
permission_url = reverse_course_url('course_team_handler', course.id, kwargs={'email': course_staff.email})
self.client.post(
permission_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
# test access
self.check_index_and_outline(course_staff_client)
def test_json_responses(self):
outline_url = reverse_course_url('course_handler', self.course.id)
chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1")
lesson = ItemFactory.create(parent_location=chapter.location, category='sequential', display_name="Lesson 1")
subsection = ItemFactory.create(
parent_location=lesson.location,
category='vertical',
display_name='Subsection 1'
)
ItemFactory.create(parent_location=subsection.location, category="video", display_name="My Video")
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def test_notifications_handler_get(self):
state = CourseRerunUIStateManager.State.FAILED
action = CourseRerunUIStateManager.ACTION
should_display = True
# try when no notification exists
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': 1,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
# verify that we get an empty dict out
self.assertEquals(resp.status_code, 400)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=self.course.id,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=UserFactory(),
should_display=should_display
)
# try to get information on this notification
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.assertEquals(json_response['state'], state)
self.assertEquals(json_response['action'], action)
self.assertEquals(json_response['should_display'], should_display)
def test_notifications_handler_dismiss(self):
state = CourseRerunUIStateManager.State.FAILED
should_display = True
rerun_course_key = CourseLocator(org='testx', course='test_course', run='test_run')
# add an instructor to this course
user2 = UserFactory()
add_instructor(rerun_course_key, self.user, user2)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=rerun_course_key,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=user2,
should_display=should_display
)
# try to get information on this notification
notification_dismiss_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.delete(notification_dismiss_url)
self.assertEquals(resp.status_code, 200)
with self.assertRaises(CourseRerunState.DoesNotExist):
# delete nofications that are dismissed
CourseRerunState.objects.get(id=rerun_state.id)
self.assertFalse(has_course_author_access(user2, rerun_course_key))
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_updates_invalid_url(self):
"""
Tests the error conditions for the invalid course updates URL.
"""
# Testing the response code by passing slash separated course id whose format is valid but no course
# having this id exists.
invalid_course_key = '{}_blah_blah_blah'.format(self.course.id)
course_updates_url = reverse_course_url('course_info_handler', invalid_course_key)
response = self.client.get(course_updates_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course id whose format is valid but no course
# having this id exists.
split_course_key = CourseLocator(org='orgASD', course='course_01213', run='Run_0_hhh_hhh_hhh')
course_updates_url_split = reverse_course_url('course_info_handler', split_course_key)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
# Testing the response by passing split course id whose format is invalid.
invalid_course_id = 'invalid.course.key/{}'.format(split_course_key)
course_updates_url_split = reverse_course_url('course_info_handler', invalid_course_id)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
def test_course_index_invalid_url(self):
"""
Tests the error conditions for the invalid course index URL.
"""
# Testing the response code by passing slash separated course key, no course
# having this key exists.
invalid_course_key = '{}_some_invalid_run'.format(self.course.id)
course_outline_url = reverse_course_url('course_handler', invalid_course_key)
response = self.client.get_html(course_outline_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course key, no course
# having this key exists.
split_course_key = CourseLocator(org='invalid_org', course='course_01111', run='Run_0_invalid')
course_outline_url_split = reverse_course_url('course_handler', split_course_key)
response = self.client.get_html(course_outline_url_split)
self.assertEqual(response.status_code, 404)
def test_course_outline_with_display_course_number_as_none(self):
"""
Tests course outline when 'display_coursenumber' field is none.
"""
# Change 'display_coursenumber' field to None and update the course.
self.course.display_coursenumber = None
updated_course = self.update_course(self.course, self.user.id)
# Assert that 'display_coursenumber' field has been changed successfully.
self.assertEqual(updated_course.display_coursenumber, None)
# Perform GET request on course outline url with the course id.
course_outline_url = reverse_course_url('course_handler', updated_course.id)
response = self.client.get_html(course_outline_url)
# Assert that response code is 200.
self.assertEqual(response.status_code, 200)
# Assert that 'display_course_number' is being set to "" (as display_coursenumber was None).
self.assertIn('display_course_number: ""', response.content)
@ddt.ddt
class TestCourseOutline(CourseTestCase):
"""
Unit tests for the course outline.
"""
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseOutline, self).setUp()
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
def test_json_responses(self):
"""
Verify the JSON responses returned for the course.
"""
outline_url = reverse_course_url('course_handler', self.course.id)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(self.chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_outline_initial_state(self):
course_module = modulestore().get_item(self.course.location)
course_structure = create_xblock_info(
course_module,
include_child_info=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical'
)
# Verify that None is returned for a non-existent locator
self.assertIsNone(course_outline_initial_state('no-such-locator', course_structure))
# Verify that the correct initial state is returned for the test chapter
chapter_locator = unicode(self.chapter.location)
initial_state = course_outline_initial_state(chapter_locator, course_structure)
self.assertEqual(initial_state['locator_to_show'], chapter_locator)
expanded_locators = initial_state['expanded_locators']
self.assertIn(unicode(self.sequential.location), expanded_locators)
self.assertIn(unicode(self.vertical.location), expanded_locators)
def test_start_date_on_page(self):
"""
Verify that the course start date is included on the course outline page.
"""
def _get_release_date(response):
"""Return the release date from the course page"""
parsed_html = lxml.html.fromstring(response.content)
return parsed_html.find_class('course-status')[0].find_class('status-release-value')[0].text_content()
def _assert_settings_link_present(response):
"""
Asserts there's a course settings link on the course page by the course release date.
"""
parsed_html = lxml.html.fromstring(response.content)
settings_link = parsed_html.find_class('course-status')[0].find_class('action-edit')[0].find('a')
self.assertIsNotNone(settings_link)
self.assertEqual(settings_link.get('href'), reverse_course_url('settings_handler', self.course.id))
outline_url = reverse_course_url('course_handler', self.course.id)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
# A course with the default release date should display as "Unscheduled"
self.assertEqual(_get_release_date(response), 'Unscheduled')
_assert_settings_link_present(response)
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
self.assertEqual(_get_release_date(response), get_default_time_display(self.course.start))
_assert_settings_link_present(response)
def _create_test_data(self, course_module, create_blocks=False, publish=True, block_types=None):
"""
Create data for test.
"""
if create_blocks:
for block_type in block_types:
ItemFactory.create(
parent_location=self.vertical.location,
category=block_type,
display_name='{} Problem'.format(block_type)
)
if not publish:
self.store.unpublish(self.vertical.location, self.user.id)
course_module.advanced_modules.extend(block_types)
def _verify_deprecated_info(self, course_id, advanced_modules, info, deprecated_block_types):
"""
Verify deprecated info.
"""
expected_blocks = []
for block_type in deprecated_block_types:
expected_blocks.append(
[
reverse_usage_url('container_handler', self.vertical.location),
'{} Problem'.format(block_type)
]
)
self.assertEqual(info['block_types'], deprecated_block_types)
self.assertEqual(
info['block_types_enabled'],
any(component in advanced_modules for component in deprecated_block_types)
)
self.assertItemsEqual(info['blocks'], expected_blocks)
self.assertEqual(
info['advance_settings_url'],
reverse_course_url('advanced_settings_handler', course_id)
)
@ddt.data(
{'publish': True},
{'publish': False},
)
@ddt.unpack
def test_verify_deprecated_warning_message_with_single_feature(self, publish):
"""
Verify deprecated warning info for single deprecated feature.
"""
block_types = ['notes']
with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(
course_module.id,
course_module.advanced_modules,
info,
block_types
)
def test_verify_deprecated_warning_message_with_multiple_features(self):
"""
Verify deprecated warning info for multiple deprecated features.
"""
block_types = ['notes', 'lti']
with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types)
@ddt.data(
{'delete_vertical': True},
{'delete_vertical': False},
)
@ddt.unpack
def test_deprecated_blocks_list_updated_correctly(self, delete_vertical):
"""
Verify that deprecated blocks list shown on banner is updated correctly.
Here is the scenario:
This list of deprecated blocks shown on banner contains published
and un-published blocks. That list should be updated when we delete
un-published block(s). This behavior should be same if we delete
unpublished vertical or problem.
"""
block_types = ['notes']
course_module = modulestore().get_item(self.course.location)
vertical1 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert1 Subsection1'
)
problem1 = ItemFactory.create(
parent_location=vertical1.location,
category='notes',
display_name='notes problem in vert1',
publish_item=False
)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should be empty here because there is nothing
# published or un-published present
self.assertEqual(info['blocks'], [])
vertical2 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert2 Subsection1'
)
ItemFactory.create(
parent_location=vertical2.location,
category='notes',
display_name='notes problem in vert2',
pubish_item=True
)
# At this point CourseStructure will contain both the above
# published and un-published verticals
info = _deprecated_blocks_info(course_module, block_types)
self.assertItemsEqual(
info['blocks'],
[
[reverse_usage_url('container_handler', vertical1.location), 'notes problem in vert1'],
[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']
]
)
# Delete the un-published vertical or problem so that CourseStructure updates its data
if delete_vertical:
self.store.delete_item(vertical1.location, self.user.id)
else:
self.store.delete_item(problem1.location, self.user.id)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should only contain the info about vertical2 which is published.
# There shouldn't be any info present about un-published vertical1
self.assertEqual(
info['blocks'],
[[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']]
)
class TestCourseReIndex(CourseTestCase):
"""
Unit tests for the course outline.
"""
SUCCESSFUL_RESPONSE = _("Course has been successfully reindexed.")
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseReIndex, self).setUp()
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, self.user.id)
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
self.html = ItemFactory.create(
parent_location=self.vertical.location, category="html", display_name="My HTML",
data="<div>This is my unique HTML content</div>",
)
def test_reindex_course(self):
"""
Verify that course gets reindexed.
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
response = self.client.post(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.content, '')
self.assertEqual(response.status_code, 405)
self.client.logout()
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 302)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_empty_content_type(self):
"""
Test json content type is set if '' is selected
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, CONTENT_TYPE='')
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_course_search_index_error(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# Start manual reindex and check error in response
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 500)
def test_reindex_json_responses(self):
"""
Test json response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
reindex_course_and_check_access(self.course.id, self.user)
# Check results remain the same
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_reindex_video_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_html_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_reindex_seq_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_reindex_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
def test_reindex_no_permissions(self):
# register a non-staff member and try to delete the course branch
user2 = UserFactory()
with self.assertRaises(PermissionDenied):
reindex_course_and_check_access(self.course.id, user2)
def test_indexing_responses(self):
"""
Test do_course_reindex response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
# Check results are the same following reindex
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_indexing_video_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_indexing_html_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_indexing_seq_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_indexing_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
| agpl-3.0 |
robbiet480/home-assistant | homeassistant/components/xs1/__init__.py | 8 | 2700 | """Support for the EZcontrol XS1 gateway."""
import asyncio
import logging
import voluptuous as vol
import xs1_api_client
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "xs1"
ACTUATORS = "actuators"
SENSORS = "sensors"
# define configuration parameters
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=80): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_USERNAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
XS1_COMPONENTS = ["climate", "sensor", "switch"]
# Lock used to limit the amount of concurrent update requests
# as the XS1 Gateway can only handle a very
# small amount of concurrent requests
UPDATE_LOCK = asyncio.Lock()
def setup(hass, config):
"""Set up XS1 Component."""
_LOGGER.debug("Initializing XS1")
host = config[DOMAIN][CONF_HOST]
port = config[DOMAIN][CONF_PORT]
ssl = config[DOMAIN][CONF_SSL]
user = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
# initialize XS1 API
try:
xs1 = xs1_api_client.XS1(
host=host, port=port, ssl=ssl, user=user, password=password
)
except ConnectionError as error:
_LOGGER.error(
"Failed to create XS1 API client because of a connection error: %s", error,
)
return False
_LOGGER.debug("Establishing connection to XS1 gateway and retrieving data...")
hass.data[DOMAIN] = {}
actuators = xs1.get_all_actuators(enabled=True)
sensors = xs1.get_all_sensors(enabled=True)
hass.data[DOMAIN][ACTUATORS] = actuators
hass.data[DOMAIN][SENSORS] = sensors
_LOGGER.debug("Loading components for XS1 platform...")
# Load components for supported devices
for component in XS1_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class XS1DeviceEntity(Entity):
"""Representation of a base XS1 device."""
def __init__(self, device):
"""Initialize the XS1 device."""
self.device = device
async def async_update(self):
"""Retrieve latest device state."""
async with UPDATE_LOCK:
await self.hass.async_add_executor_job(self.device.update)
| apache-2.0 |
hendradarwin/VTK | Filters/General/Testing/Python/clipImage.py | 20 | 1534 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create pipeline
#
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64,64)
v16.GetOutput().SetOrigin(0.0,0.0,0.0)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
v16.SetImageRange(45,45)
v16.SetDataSpacing(3.2,3.2,1.5)
v16.Update()
# do the pixel clipping
clip = vtk.vtkClipDataSet()
clip.SetInputConnection(v16.GetOutputPort())
clip.SetValue(1000)
clipMapper = vtk.vtkDataSetMapper()
clipMapper.SetInputConnection(clip.GetOutputPort())
clipMapper.ScalarVisibilityOff()
clipActor = vtk.vtkActor()
clipActor.SetMapper(clipMapper)
# put an outline around the data
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(v16.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.VisibilityOff()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(clipActor)
ren1.SetBackground(0,0,0)
renWin.SetSize(200,200)
iren.Initialize()
# render the image
#
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/decomposition/nmf.py | 15 | 19103 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
toinbis/369old | src/web369/conf/base.py | 1 | 2325 | from pkg_resources import resource_filename
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'web369',
'USER': 'root',
'PASSWORD': '',
}
}
TIME_ZONE = 'Europe/Vilnius'
LANGUAGE_CODE = 'lt'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
STATIC_URL = '/static/'
STATIC_ROOT = resource_filename('web369', '../../var/htdocs/static')
STATICFILES_DIRS = (
resource_filename('web369', 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = resource_filename('web369', '../../var/htdocs/media')
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
SECRET_KEY = 'SBX*YTL!cANetM&uFTf6R5Je(@PX3!rtgo)kgwNT'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'web369.urls.default'
TEMPLATE_DIRS = (
resource_filename('web369', 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'south',
'web369',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
'TIMEOUT': 60,
'OPTIONS': {
'MAX_ENTRIES': 1000
}
}
}
# Word count will be updated when new documents are scrapped:
LIVE_WORD_COUNT = True
| bsd-3-clause |
ZhangXinNan/tensorflow | tensorflow/python/ops/parallel_for/pfor.py | 2 | 101653 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compiled parallel-for loop."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import flags
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
flags.DEFINE_bool(
"op_conversion_fallback_to_while_loop", False,
"If true, falls back to using a while loop for ops for "
"which a converter is not defined.")
def _stack(t, length):
"""stacks `t` `length` times."""
ones = array_ops.ones_like(array_ops.shape(t))
multiples = array_ops.concat([length, ones], 0)
t = array_ops.tile(array_ops.expand_dims(t, 0), multiples)
return wrap(t, True)
# The following stateful ops can be safely called once, and with the same
# signature as the unconverted version, if their inputs are loop invariant.
# TODO(agarwal): implement a strategy for converting Variable reads/writes. The
# plan is to map each read/write in the loop_fn to a corresponding merged
# read/write in the converted graph. Writes need to be mergeable (e.g.
# AssignAdd) to be used in `pfor`. Given a certain read/write order in the
# loop_fn, doing a one-to-one conversion will simulate executing such
# instructions in lock-step across all iterations.
passthrough_stateful_ops = set([
"VariableV2",
"VarHandleOp",
"ReadVariableOp",
"StackV2",
"TensorArrayWriteV3",
"TensorArrayReadV3",
"TensorArraySizeV3",
])
def _is_stateful_pfor_op(op):
if isinstance(op, WhileOp):
return op.is_stateful
if op.type == "Const":
# Const didn't have an op_def.
return False
if op.type in passthrough_stateful_ops:
return False
assert hasattr(op, "op_def") and op.op_def is not None, op
return op.op_def.is_stateful
# pylint: disable=protected-access
class WhileOp(object):
"""Object for storing state for converting the outputs of a while_loop."""
def __init__(self, exit_node, pfor_ops):
"""Initializer.
Args:
exit_node: A tensor output from the while_loop.
pfor_ops: list of ops inside the current pfor loop.
"""
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
assert isinstance(exit_node, ops.Tensor)
self._while_context = exit_node.op._get_control_flow_context()
assert isinstance(self._while_context, control_flow_ops.WhileContext)
self._context_name = self._while_context.name
self._condition = self._while_context.pivot.op.inputs[0]
# Parts of an external while_loop could be created inside a pfor loop.
# However for the purpose here, we declare such loops to be external. Also
# note that we check if the condition was created inside or outside to
# determine if the while_loop was first created inside or outside.
# TODO(agarwal): check that the Enter and Exit of this loop are unstacked.
self._is_inside_loop = self.op_is_inside_loop(self._condition.op)
if self._is_inside_loop:
for e in self._while_context.loop_exits:
assert self.op_is_inside_loop(e.op)
# Note the code below tries to reverse engineer an existing while_loop graph
# by assuming the following pattern of nodes.
#
# NextIteration <---- Body <--- Enter
# | ^
# V ___| Y
# Enter -> Merge -> Switch___
# ^ | N
# | V
# LoopCond Exit
# Node that elements in the list below correspond one-to-one with each
# other. i.e. these lists are the same size, and the i_th entry corresponds
# to different Operations/Tensors of a single cycle as illustrated above.
# List of Switch ops (ops.Operation) that feed into an Exit Node.
self._exit_switches = []
# List of inputs (ops.Tensor) to NextIteration.
self._body_outputs = []
# List of list of control inputs of the NextIteration nodes.
self._next_iter_control_inputs = []
# List of Merge ops (ops.Operation).
self._enter_merges = []
# List of output (ops.Tensor) of Exit nodes.
self._outputs = []
# List of Enter Tensors.
# There are two types of Enter nodes:
# - The Enter nodes that are used in the `loop_vars` argument to
# `while_loop` (see
# https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect
# these Enter nodes immediately below by tracing backwards from the Exit
# nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the
# diagram above. This allows us to have a 1:1 correspondence between the
# self._outputs and the first elements in self._enters.
# - The Enter nodes that are used only by the body. They don't appear in the
# `loop_vars` and are not returned from the `while_loop`. In Python code,
# they are usually captured by the body lambda. We collect them below by
# iterating over all the ops in the graph. They are appended to the end of
# self._enters or self._direct_enters, and don't correspond to any outputs
# in self._outputs. Note that we keep the resource/variant Enter nodes in
# self._direct_enters and the constructed while_loop's body uses them
# directly as opposed to passing them as loop variables. This is done
# because the while_body cannot partition the resource/variant Tensors, so
# it has to leave them unchanged.
self._enters = []
self._direct_enters = []
for e in self._while_context.loop_exits:
self._outputs.append(e.op.outputs[0])
switch = e.op.inputs[0].op
assert switch.type == "Switch", switch
self._exit_switches.append(switch)
merge = switch.inputs[0].op
assert merge.type == "Merge", merge
self._enter_merges.append(merge)
enter = merge.inputs[0].op
assert enter.type == "Enter", enter
self._enters.append(enter.outputs[0])
next_iter = merge.inputs[1].op
assert next_iter.type == "NextIteration", next_iter
self._body_outputs.append(next_iter.inputs[0])
self._next_iter_control_inputs.append(next_iter.control_inputs)
# Collect all the Enter nodes that are not part of `loop_vars`, the second
# category described above.
# Also track whether the loop body has any stateful ops.
self._is_stateful = False
for op in ops.get_default_graph().get_operations():
# TODO(agarwal): make sure this works with nested case.
control_flow_context = op._get_control_flow_context()
if control_flow_context is None:
continue
if control_flow_context.name == self._context_name:
self._is_stateful |= _is_stateful_pfor_op(op)
if op.type == "Enter":
output = op.outputs[0]
if output not in self._enters:
if output.dtype in (dtypes.resource, dtypes.variant):
if output not in self._direct_enters:
self._direct_enters.append(output)
else:
self._enters.append(output)
def __str__(self):
"""String representation."""
return "while_loop(%s)" % self.name
@property
def inputs(self):
"""Input to all the Enter nodes."""
return [x.op.inputs[0] for x in self._enters + self._direct_enters]
@property
def control_inputs(self):
"""Control input to all the Enter nodes."""
control_inputs = []
for x in self._enters + self._direct_enters:
control_inputs.extend(x.op.control_inputs)
return control_inputs
@property
def outputs(self):
"""Outputs of all the Exit nodes."""
return self._outputs
@property
def name(self):
"""Context name for the while loop."""
return self._context_name
@property
def is_inside_loop(self):
"""Returns true if the while_loop was created inside the pfor."""
return self._is_inside_loop
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
@property
def is_stateful(self):
return self._is_stateful
@property
def pfor_converter(self):
"""Return a converter for the while loop."""
return self
def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs,
inputs_stacked):
"""Create a PFor object for converting parts of the while_loop.
Args:
parent_pfor: PFor object being used for converting the while_loop.
indices: int32 Tensor of ids for the iterations that are still active
(i.e. did not exit the while_loop).
cond_stacked: True if the while_loop condition is stacked.
inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
that these Tensors are a subset of the loop variables for the generated
while_loop.
inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
indicating if the value is stacked or not.
Returns:
A PFor instance. The instance is initialized by adding conversion mappings
of nodes that will be external to the conversion that the returned
instance will be used for. e.g. Enter nodes as well as Merge and Switch
outputs are mapped to converted values.
"""
num_outputs = len(self._outputs)
assert len(inputs) == len(self._enters)
assert len(inputs_stacked) == len(self._enters)
loop_var = parent_pfor.loop_var
loop_len = array_ops.size(indices)
pfor = PFor(
loop_var,
loop_len,
pfor_ops=self._pfor_ops,
all_indices=indices,
all_indices_partitioned=cond_stacked)
# Map all inputs of Enter nodes in self._direct_enters to their converted
# values.
for enter in self._direct_enters:
enter_input = enter.op.inputs[0]
converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(
enter_input)
# Since these are resources / variants, they should be unstacked.
assert not stacked and not is_sparse_stacked, (enter, converted_enter)
pfor._add_conversion(enter, wrap(converted_enter, False))
# Map all Enter nodes to the inputs.
for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
pfor._add_conversion(enter, wrap(inp, stacked))
# Map outputs of Switch and Merge.
for i in range(num_outputs):
wrapped_inp = wrap(inputs[i], inputs_stacked[i])
merge = self._enter_merges[i]
pfor._add_conversion(merge.outputs[0], wrapped_inp)
# Note that second output of Merge is typically not used, except possibly
# as a control dependency. To avoid trying to output the correct value, we
# employ a hack here. We output a dummy invalid value with an incorrect
# dtype. This will allow control dependency to work but if using it as an
# input, it should typically lead to errors during graph construction due
# to dtype mismatch.
# TODO(agarwal): Check in the original graph to see if there are any
# consumers of this Tensor that use it as an input.
pfor._add_conversion(merge.outputs[1],
wrap(constant_op.constant(-1.0), False))
switch = self._exit_switches[i]
# Don't need to worry about switch.output[0] which will feed to Exit node.
pfor._add_conversion(switch.outputs[1], wrapped_inp)
return pfor
def _convert_enter(self, parent_pfor, enter):
"""Converts an Enter node."""
inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])
control_inputs = [
parent_pfor._convert_helper(x).t for x in enter.op.control_inputs
]
if control_inputs:
with ops.control_dependencies(control_inputs):
inp = array_ops.identity(inp)
return inp, stacked
def _maybe_stacked(self, cache, inp):
"""Heuristic to figue out if the coverting inp leads to a stacked value.
Args:
cache: map from Tensor to boolean indicating stacked/unstacked.
inp: input Tensor.
Returns:
True if `inp` could get stacked. If the function returns False, the
converted value should be guaranteed to be unstacked. If returning True,
it may or may not be stacked.
"""
if inp in cache:
return cache[inp]
if not self.op_is_inside_loop(inp.op):
return False
op = inp.op
output = False
if op.type in [
"Shape",
"Rank"
"ShapeN",
"ZerosLike",
"TensorArrayV3",
"TensorArraySizeV3",
]:
output = False
elif _is_stateful_pfor_op(op):
# This may be fairly aggressive.
output = True
elif op.type == "Exit":
# This may be fairly aggressive.
output = True
else:
for t in op.inputs:
if self._maybe_stacked(cache, t):
output = True
break
cache[inp] = output
return output
def _create_init_values(self, pfor_input):
"""Create arguments passed to converted while_loop."""
with ops.name_scope("while_init"):
loop_len_vector = pfor_input.pfor.loop_len_vector
loop_len = loop_len_vector[0]
num_outputs = len(self._outputs)
inputs = []
maybe_stacked_cache = {}
# Convert all the Enters. Need to do this before checking for stacking
# below.
for i, enter in enumerate(self._enters):
inp, stacked = self._convert_enter(pfor_input.pfor, enter)
inputs.append(inp)
maybe_stacked_cache[enter] = stacked
# Since this enter node is part of the `loop_vars`, it corresponds to an
# output and its preceding switch. We mark this switch's output the same
# stackness, to act at the base case for the logic below. Below, we will
# be going through the body figuring out which inputs might need to be
# stacked and which inputs can safely remain unstacked.
if i < num_outputs:
maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked
# Shape invariants for init_values corresponding to self._enters.
input_shape_invariants = []
# TensorArrays for outputs of converted while loop
output_tas = []
# Shape invariants for output TensorArrays.
ta_shape_invariants = []
# List of booleans indicating stackness of inputs, i.e. tensors
# corresponding to self._enters.
inputs_stacked = []
for i, inp in enumerate(inputs):
enter = self._enters[i]
inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter)
# Note that even when an input is unstacked, the body could make it
# stacked. we use a heuristic below to figure out if body may be making
# it stacked.
if i < num_outputs:
body_output = self._body_outputs[i]
if enter.op in self._pfor_ops:
body_output_stacked = self._maybe_stacked(maybe_stacked_cache,
body_output)
else:
# If constructed outside of pfor loop, then the output would not be
# stacked.
body_output_stacked = False
if body_output_stacked and not inp_stacked:
inp = _stack(inp, loop_len_vector).t
inputs[i] = inp
inp_stacked = True
# TODO(agarwal): other attributes for the TensorArray ?
output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len))
ta_shape_invariants.append(tensor_shape.TensorShape(None))
inputs_stacked.append(inp_stacked)
input_shape_invariants.append(tensor_shape.TensorShape(None))
# See documentation for __call__ for the structure of init_values.
init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas
# TODO(agarwal): try stricter shape invariants
shape_invariants = (
[tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)
] + input_shape_invariants + ta_shape_invariants)
return init_values, inputs_stacked, shape_invariants
def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
"""Handles case when condition is unstacked.
Note that all iterations end together. So we don't need to partition the
inputs. When all iterations are done, we write the inputs to the
TensorArrays. Note that we only write to index 0 of output_tas. Since all
iterations end together, they can all be output together.
"""
not_all_done = array_ops.reshape(conditions, [])
new_output_tas = []
# pylint: disable=cell-var-from-loop
for i, out_ta in enumerate(output_tas):
inp = inputs[i]
new_output_tas.append(
control_flow_ops.cond(not_all_done,
lambda: out_ta,
lambda: out_ta.write(0, inp)))
# pylint: enable=cell-var-from-loop
return not_all_done, indices, inputs, new_output_tas
def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
output_tas):
num_outputs = len(self._outputs)
# Compute if all iterations are done.
not_all_done = math_ops.reduce_any(conditions)
conditions_int = math_ops.cast(conditions, dtypes.int32)
# Partition the indices.
done_indices, new_indices = data_flow_ops.dynamic_partition(
indices, conditions_int, 2)
new_inputs = []
new_output_tas = []
for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
# Partition the inputs.
if stacked:
done_inp, new_inp = data_flow_ops.dynamic_partition(
inp, conditions_int, 2)
else:
# TODO(agarwal): avoid this stacking. See TODO earlier in
# _process_cond_unstacked.
done_inp = _stack(inp, [array_ops.size(done_indices)]).t
new_inp = inp
new_inputs.append(new_inp)
# For iterations that are done, write them to TensorArrays.
if i < num_outputs:
out_ta = output_tas[i]
# Note that done_indices can be empty. done_inp should also be empty in
# that case.
new_output_tas.append(out_ta.scatter(done_indices, done_inp))
return not_all_done, new_indices, new_inputs, new_output_tas
def _process_body(self, pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done):
"""Convert the body function."""
def true_fn(control_inputs, body_pfor, body_output, stacked):
"""Converts the body function for all but last iteration.
This essentially converts body_output. Additionally, it needs to handle
any control dependencies on the NextIteration node. So it creates another
Identity node with the converted dependencies.
"""
converted_control_inp = []
for x in control_inputs:
for t in x.outputs:
converted_control_inp.append(body_pfor._convert_helper(t).t)
if stacked:
# Note convert always does the stacking.
output = body_pfor.convert(body_output)
else:
output, convert_stacked, _ = body_pfor._convert_helper(body_output)
assert convert_stacked == stacked, body_output
with ops.control_dependencies(converted_control_inp):
return array_ops.identity(output)
body_pfor = self._init_pfor(pfor_input.pfor, new_indices,
cond_stacked, new_inputs,
inputs_stacked)
new_outputs = []
for i, (body_output, stacked) in enumerate(
zip(self._body_outputs, inputs_stacked)):
control_inp = self._next_iter_control_inputs[i]
out_dtype = body_output.dtype
# Note that we want to run the body only if not all pfor iterations are
# done. If all are done, we return empty tensors since these values will
# not be used. Notice that the value returned by the loop is based on
# TensorArrays and not directly on these returned values.
# pylint: disable=cell-var-from-loop
new_output = control_flow_ops.cond(
not_all_done,
lambda: true_fn(control_inp, body_pfor, body_output, stacked),
lambda: constant_op.constant([], dtype=out_dtype))
# pylint: enable=cell-var-from-loop
new_outputs.append(new_output)
return new_outputs
def __call__(self, pfor_input):
"""Converter for the while_loop.
The conversion of a while_loop is another while_loop.
The arguments to this converted while_loop are as follows:
not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
are done.
indices: int32 1-D Tensor storing the id of the iterations that are not
done.
args: Remaining arguments. These can be divided into 3 categories:
- First set of arguments are the tensors that correspond to the initial
elements of self._enters. The elements that appear in original while
loop's `loop_vars`.
- The second set of arguments are the tensors that correspond to the
remaining elements of self._enters. These are the tensors that directly
enter the original while loop body.
- Finally, the last set of arguments are TensorArrays. These TensorArrays
correspond to the outputs of the original while_loop, i.e. to the
elements in self._outputs. Each TensorArray has `PFor.loop_len`
elements, i.e. the number of pfor iterations. At the end, the i'th
element of each TensorArray will contain the output computed by the
i'th iteration of pfor. Note that elements can be written into these
tensors arrays in any order, depending on when the corresponding pfor
iteration is done.
If the original while_loop had `k` tensors in its `loop_vars` and its body
directly captured `m` tensors, the `args` will contain `2 * k + m` values.
In each iteration, the while_loop body recomputes the condition for all
active pfor iterations to see which of them are now done. It then partitions
all the inputs and passes them along to the converted body. Values for all
the iterations that are done are written to TensorArrays indexed by the pfor
iteration number. When all iterations are done, the TensorArrays are stacked
to get the final value.
Args:
pfor_input: A PForInput object corresponding to the output of any Exit
node from this while loop.
Returns:
List of converted outputs.
"""
# Create init_values that will be passed to the while_loop.
init_values, inputs_stacked, shape_invariants = self._create_init_values(
pfor_input)
# Note that we use a list as a hack since we need the nested function body
# to set the value of cond_is_stacked. python2.x doesn't support nonlocal
# variables.
cond_is_stacked = [None]
def cond(not_all_done, *_):
return not_all_done
def body(not_all_done, indices, *args):
# See documentatin for __call__ for the structure of *args.
num_enters = len(self._enters)
inputs = args[:num_enters]
output_tas = args[num_enters:]
# TODO(agarwal): see which outputs have consumers and only populate the
# TensorArrays corresponding to those. Or do those paths get trimmed out
# from inside the while_loop body?
assert len(inputs) >= len(output_tas)
assert len(inputs) == len(inputs_stacked)
# Convert condition
with ops.name_scope("while_cond"):
# Note that we set cond_stacked to True here. At this point we don't
# know if it could be loop invariant, hence the conservative value is
# to assume stacked.
cond_pfor = self._init_pfor(pfor_input.pfor, indices,
cond_stacked=True,
inputs=inputs,
inputs_stacked=inputs_stacked)
conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition)
cond_is_stacked[0] = cond_stacked
# Recompute the new condition, write outputs of done iterations, and
# partition the inputs if needed.
if not cond_stacked:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_unstacked(
conditions, indices, inputs, output_tas)
else:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_stacked(
conditions, indices, inputs, inputs_stacked, output_tas)
# Convert body
with ops.name_scope("while_body"):
# Compute the outputs from the body.
new_outputs = self._process_body(pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done)
# Note that the first num_outputs new values of inputs are computed using
# the body. Rest of them were direct Enters into the condition/body and
# the partitioning done earlier is sufficient to give the new value.
num_outputs = len(self._outputs)
new_args = ([not_all_done, new_indices] + new_outputs + list(
new_inputs[num_outputs:]) + new_output_tas)
return tuple(new_args)
while_outputs = control_flow_ops.while_loop(
cond, body, init_values, shape_invariants=shape_invariants)
output_tas = while_outputs[-len(self._outputs):]
outputs = []
assert cond_is_stacked[0] is not None
for inp_stacked, ta in zip(inputs_stacked, output_tas):
if cond_is_stacked[0]:
outputs.append(wrap(ta.stack(), True))
else:
# Note that if while_loop condition is unstacked, all iterations exit at
# the same time and we wrote those outputs in index 0 of the tensor
# array.
outputs.append(wrap(ta.read(0), inp_stacked))
return outputs
class _PforInput(object):
"""Input object passed to registered pfor converters."""
def __init__(self, pfor, op, inputs):
"""Creates a _PforInput object.
Args:
pfor: PFor converter object.
op: the Operation object that is being converted.
inputs: list of WrappedTensor objects representing converted values of the
inputs of `op`.
"""
self.pfor = pfor
self._op = op
self._inputs = inputs
def stack_inputs(self, stack_indices=None):
"""Stacks unstacked inputs at `stack_indices`.
Args:
stack_indices: indices of inputs at which stacking is done. If None,
stacking is done at all indices.
"""
if stack_indices is None:
stack_indices = range(len(self._inputs))
length = self.pfor.loop_len_vector
for i in stack_indices:
inp = self._inputs[i]
if not inp.is_stacked:
self._inputs[i] = _stack(inp.t, length)
def expanddim_inputs_for_broadcast(self):
"""Reshapes stacked inputs to prepare them for broadcast.
Since stacked inputs have an extra leading dimension, automatic broadcasting
rules could incorrectly try to expand dimensions before that leading
dimension. To avoid that, we reshape these stacked inputs to the maximum
rank they will need to be broadcasted to.
"""
if not self._inputs:
return
# Find max rank
def _get_rank(x):
rank = array_ops.rank(x.t)
if not x.is_stacked:
rank += 1
return rank
ranks = [_get_rank(x) for x in self._inputs]
max_rank = ranks[0]
for rank in ranks[1:]:
max_rank = math_ops.maximum(rank, max_rank)
for i, inp in enumerate(self._inputs):
if inp.is_stacked:
shape = array_ops.shape(inp.t)
rank_diff = array_ops.reshape(max_rank - ranks[i], [1])
ones = array_ops.tile([1], rank_diff)
new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0)
self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True)
@property
def inputs(self):
return self._inputs
@property
def num_inputs(self):
return len(self._inputs)
def input(self, index):
assert len(self._inputs) > index, (index, self._inputs)
return self._inputs[index]
def stacked_input(self, index):
t, is_stacked, _ = self.input(index)
if not is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be not loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
def unstacked_input(self, index):
t, is_stacked, _ = self.input(index)
if is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
@property
def op(self):
return self._op
@property
def op_type(self):
return self._op.type
def get_attr(self, attr):
return self._op.get_attr(attr)
@property
def outputs(self):
return self._op.outputs
def output(self, index):
assert index < len(self._op.outputs)
return self._op.outputs[index]
_pfor_converter_registry = {}
class RegisterPFor(object):
"""Utility to register converters for pfor.
Usage:
@RegisterPFor(foo_op_type)
def _foo_converter(pfor_input):
...
The above will register conversion function `_foo_converter` for handling
conversion of `foo_op_type`. During conversion, the registered functin will be
called with a single argument of type `PForInput` which will contain state
needed for the conversion. This registered function should output a list of
WrappedTensor object with the same length as the number of outputs of op being
converted. If the op had zero outputs, then it should return a ops.Operation
object.
"""
def __init__(self, op_type):
"""Creates an object to register a converter for op with type `op_type`."""
self.op_type = op_type
def __call__(self, converter):
name = self.op_type
assert name not in _pfor_converter_registry, "Re-registering %s " % name
_pfor_converter_registry[name] = converter
return converter
class RegisterPForWithArgs(RegisterPFor):
"""Utility to register converters for pfor.
Usage:
@RegisteRPFor(foo_op_type, foo=value, ....)
def _foo_converter(pfor_input, foo=None, ....):
...
See RegisterPFor for details on the conversion function.
`RegisterPForWithArgs` allows binding extra arguments to the
conversion function at registration time.
"""
def __init__(self, op_type, *args, **kw_args):
super(RegisterPForWithArgs, self).__init__(op_type)
self._args = args
self._kw_args = kw_args
def __call__(self, converter):
def _f(pfor_input):
return converter(pfor_input, self.op_type, *self._args, **self._kw_args)
super(RegisterPForWithArgs, self).__call__(_f)
return converter
def _create_op(op_type, inputs, op_dtypes, attrs=None):
"""Utility to create an op."""
return ops.get_default_graph().create_op(
op_type, inputs, op_dtypes, attrs=attrs, compute_device=True)
WrappedTensor = collections.namedtuple("WrappedTensor",
["t", "is_stacked", "is_sparse_stacked"])
"""Wrapper around the result of a Tensor conversion.
The additional fields are useful for keeping track of the conversion state as
data flows through the ops in the loop body. For every op whose output is a
Tensor, its converter should return either a WrappedTensor or a list of
WrappedTensors.
Args:
t: The converted tensor
is_stacked: True if the tensor is stacked, i.e. represents the results of all
the iterations of the loop, where each row i of the tensor corresponds to
that op's output on iteration i of the loop. False if the tensor is not
stacked, i.e. represents the result of the op on of a single iteration of
the loop, where the result does not vary between iterations.
is_sparse_stacked: True if the tensor corresponds to a component tensor
(indices, values, or dense_shape) of a sparse tensor, and has been logically
stacked via a sparse conversion.
"""
def wrap(tensor, is_stacked=True, is_sparse_stacked=False):
"""Helper to create a WrappedTensor object."""
assert isinstance(is_stacked, bool)
assert isinstance(is_sparse_stacked, bool)
assert isinstance(tensor, ops.Tensor)
assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is "
"stacked via a sparse "
"conversion, it must also be "
"stacked.")
return WrappedTensor(tensor, is_stacked, is_sparse_stacked)
def _fallback_converter(pfor_input):
logging.warn("Using a while_loop for converting %s", pfor_input.op_type)
output_dtypes = [x.dtype for x in pfor_input.outputs]
iters = pfor_input.pfor.loop_len_vector[0]
def while_body(i, *ta_list):
"""Body of while loop."""
inputs = [
x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs
]
op_outputs = _create_op(
pfor_input.op_type,
inputs,
output_dtypes,
attrs=pfor_input.op.node_def.attr).outputs
outputs = []
for out, ta in zip(op_outputs, ta_list):
assert isinstance(out, ops.Tensor)
outputs.append(ta.write(i, array_ops.expand_dims(out, 0)))
return tuple([i + 1] + outputs)
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters, while_body, [0] + [
tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes
])[1:]
return tuple([wrap(ta.concat(), True) for ta in ta_list])
class PFor(object):
"""Implementation of rewrite of parallel-for loops.
This class takes a DAG or a set of DAGs representing the body of a
parallel-for loop, and adds new operations to the graph that implements
functionality equivalent to running that loop body for a specified number of
iterations. This new set of nodes may or may not use a tensorflow loop
construct.
The process of conversion does not delete or change any existing operations.
It only adds operations that efficiently implement the equivalent
functionality. We refer to the added ops as "converted ops".
The conversion process uses a simple greedy heuristic. It walks the loop body
and tries to express the functionality of running each node in a loop with a
new set of nodes. When converting an op several cases are possible:
- The op is not inside the loop body. Hence it can be used as is.
- The op does not depend on the iteration number and is stateless. In this
case, it can be used as is.
- The op is not stateful, and depends on iteration number only through control
dependencies. In this case, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is not stateful, and all its inputs are loop invariant. In this
case, similar to above, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is stateful or at least one of the inputs is not loop invariant. In
this case, we run the registered converter for that op to create a set of
converted ops. All nodes in the set will have converted control dependencies
corresponding to control dependencies of the original op. If the op returned
multiple outputs, "converted outputs" could be produced by different ops in
this set.
"""
def __init__(self,
loop_var,
loop_len,
pfor_ops,
all_indices=None,
all_indices_partitioned=False):
"""Creates an object to rewrite a parallel-for loop.
Args:
loop_var: ops.Tensor output of a Placeholder operation. The value should
be an int32 scalar representing the loop iteration number.
loop_len: A scalar or scalar Tensor representing the number of iterations
the loop is run for.
pfor_ops: List of all ops inside the loop body.
all_indices: If not None, an int32 vector with size `loop_len`
representing the iteration ids that are still active. These values
should be unique and sorted. However they may not be contiguous. This is
typically the case when inside a control flow construct which has
partitioned the indices of the iterations that are being converted.
all_indices_partitioned: If True, this object is being constructed from a
control flow construct where not all the pfor iterations are guaranteed
to be active.
"""
assert isinstance(loop_var, ops.Tensor)
assert loop_var.op.type == "Placeholder"
self._loop_var = loop_var
loop_len_value = tensor_util.constant_value(loop_len)
if loop_len_value is not None:
loop_len = loop_len_value
self._loop_len_vector = array_ops.reshape(loop_len, [1])
self._all_indices_partitioned = all_indices_partitioned
if all_indices_partitioned:
assert all_indices is not None
self.all_indices = (
math_ops.range(loop_len) if all_indices is None else all_indices)
self._conversion_map = {}
self._conversion_map[loop_var] = wrap(self.all_indices, True)
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
def _convert_sparse(self, y):
"""Returns the converted value corresponding to SparseTensor y.
For SparseTensors, instead of stacking the component tensors separately,
resulting in component tensors with shapes (N, m, rank), (N, m), and (N,
rank) respectively for indices, values, and dense_shape (where N is the loop
length and m is the number of sparse tensor values per loop iter), we want
to logically stack the SparseTensors, to create a SparseTensor whose
components are size (N * m, rank + 1), (N * m, ), and (rank + 1,)
respectively.
Here, we try to get the conversion of each component tensor.
If the tensors are stacked via a sparse conversion, return the resulting
SparseTensor composed of the converted components. Otherwise, the component
tensors are either unstacked or stacked naively. In the latter case, we
unstack the component tensors to reform loop_len SparseTensor elements,
then correctly batch them.
The unstacked tensors must have the same rank. Each dimension of each
SparseTensor will expand to be the largest among all SparseTensor elements
for that dimension. For example, if there are N SparseTensors of rank 3
being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i),
the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)).
Args:
y: A tf.SparseTensor.
Returns:
A tf.SparseTensor that is the converted value corresponding to y.
"""
outputs = [
self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)
]
assert all(isinstance(o, WrappedTensor) for o in outputs)
if all(w.is_sparse_stacked for w in outputs):
return sparse_tensor.SparseTensor(*[w.t for w in outputs])
assert not any(w.is_sparse_stacked for w in outputs), (
"Error converting SparseTensor. All components should be logically "
"stacked, or none.")
# If component tensors were not sparsely stacked, they are either unstacked
# or stacked without knowledge that they are components of sparse tensors.
# In this case, we have to restack them.
return self._restack_sparse_tensor_logically(
*[self._unwrap_or_tile(w) for w in outputs])
def _restack_sparse_tensor_logically(self, indices, values, shape):
sparse_tensor_rank = indices.get_shape()[-1].value
if sparse_tensor_rank is not None:
sparse_tensor_rank += 1
def map_fn(args):
res = gen_sparse_ops.serialize_sparse(
args[0], args[1], args[2], out_type=dtypes.variant)
return res
# Applies a map function to the component tensors to serialize each
# sparse tensor element and batch them all, then deserializes the batch.
# TODO(rachelim): Try to do this without map_fn -- add the right offsets
# to shape and indices tensors instead.
result = functional_ops.map_fn(
map_fn, [indices, values, shape], dtype=dtypes.variant)
return sparse_ops.deserialize_sparse(
result, dtype=values.dtype, rank=sparse_tensor_rank)
def _unwrap_or_tile(self, wrapped_tensor):
"""Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it."""
output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked
if is_stacked:
return output
else:
return _stack(output, self._loop_len_vector).t
def convert(self, y):
"""Returns the converted value corresponding to y.
Args:
y: A ops.Tensor or a ops.Operation object. If latter, y should not have
any outputs.
Returns:
If y does not need to be converted, it returns y as is. Else it returns
the "converted value" corresponding to y.
"""
if isinstance(y, sparse_tensor.SparseTensor):
return self._convert_sparse(y)
output = self._convert_helper(y)
if isinstance(output, WrappedTensor):
assert isinstance(y, ops.Tensor)
return self._unwrap_or_tile(output)
else:
assert isinstance(y, ops.Operation)
assert not y.outputs
assert isinstance(output, ops.Operation)
return output
def _was_converted(self, t):
"""True if t is not a conversion of itself."""
converted_t = self._conversion_map[t]
return converted_t.t is not t
def _add_conversion(self, old_output, new_output):
self._conversion_map[old_output] = new_output
def _convert_helper(self, op_or_tensor):
stack = [op_or_tensor]
while stack:
y = stack[0]
if y in self._conversion_map:
assert isinstance(self._conversion_map[y],
(WrappedTensor, ops.Operation))
stack.pop(0)
continue
if isinstance(y, ops.Operation):
assert not y.outputs, (
"We only support converting Operation objects with no outputs. "
"Got %s", y)
y_op = y
else:
assert isinstance(y, ops.Tensor), y
y_op = y.op
is_while_loop = y_op.type == "Exit"
if is_while_loop:
while_op = WhileOp(y, pfor_ops=self._pfor_ops)
is_inside_loop = while_op.is_inside_loop
# If all nodes in the while_loop graph were created inside the pfor, we
# treat the whole loop subgraph as a single op (y_op) and try to convert
# it. For while_loops that are created completely or partially outside,
# we treat them as external and should be able to simply return the Exit
# node output as is without needing any conversion. Note that for
# while_loops that are partially constructed inside, we assume they will
# be loop invariant. If that is not the case, it will create runtime
# errors since the converted graph would depend on the self._loop_var
# placeholder.
if is_inside_loop:
y_op = while_op
else:
is_inside_loop = self.op_is_inside_loop(y_op)
# If this op was not created inside the loop body, we will return as is.
# 1. Convert inputs and control inputs.
def _add_to_stack(x):
if x not in self._conversion_map:
stack.insert(0, x)
return True
else:
return False
if is_inside_loop:
added_to_stack = False
for inp in y_op.inputs:
added_to_stack |= _add_to_stack(inp)
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
added_to_stack |= _add_to_stack(t)
else:
added_to_stack |= _add_to_stack(cinp)
if added_to_stack:
continue
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
some_input_converted = any(
[self._was_converted(x) for x in y_op.inputs])
some_input_stacked = any([x.is_stacked for x in converted_inputs])
converted_control_ops = set()
some_control_input_converted = False
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
converted_t = self._conversion_map[t]
if self._was_converted(t):
some_control_input_converted = True
converted_control_ops.add(converted_t.t.op)
else:
converted_cinp = self._conversion_map[cinp]
assert isinstance(converted_cinp, ops.Operation)
if converted_cinp != cinp:
some_control_input_converted = True
converted_control_ops.add(converted_cinp)
converted_control_ops = list(converted_control_ops)
is_stateful = _is_stateful_pfor_op(y_op)
else:
converted_inputs = []
converted_control_ops = []
logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op,
converted_inputs, converted_control_ops)
# 2. Convert y_op
# If converting a while_loop, we let the while_loop convertor deal with
# putting the control dependencies appropriately.
control_dependencies = [] if is_while_loop else converted_control_ops
with ops.control_dependencies(control_dependencies), ops.name_scope(
y_op.name + "/pfor/"):
# None of the inputs and control inputs were converted.
if (not is_inside_loop or
(not is_stateful and not some_input_converted and
not some_control_input_converted)):
if y == y_op:
assert not isinstance(y_op, WhileOp)
new_outputs = y_op
else:
new_outputs = [wrap(x, False) for x in y_op.outputs]
elif not (is_stateful or is_while_loop or some_input_stacked):
# All inputs are unstacked or uncoverted but some control inputs are
# converted.
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked (i.e. any([x.is_sparse_stacked for x in converted_inputs]))
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
[x.dtype for x in y_op.outputs],
y_op.node_def.attr)
if y == y_op:
new_outputs = new_op
else:
new_outputs = [wrap(x, False) for x in new_op.outputs]
else:
# Either some inputs are not loop invariant or op is stateful.
if hasattr(y_op, "pfor_converter"):
converter = y_op.pfor_converter
else:
converter = _pfor_converter_registry.get(y_op.type, None)
if converter is None:
if flags.FLAGS.op_conversion_fallback_to_while_loop:
converter = _fallback_converter
else:
raise ValueError(
"No converter defined for %s\n%s\ninputs: %s. "
"\nEither add a converter or set "
"--op_conversion_fallback_to_while_loop=True, "
"which may run slower" % (y_op.type, y_op, converted_inputs))
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked. We should only call the converter if it supports handling
# those inputs.
new_outputs = converter(_PforInput(self, y_op, converted_inputs))
if isinstance(new_outputs, WrappedTensor):
new_outputs = [new_outputs]
assert isinstance(new_outputs,
(list, tuple, ops.Operation)), new_outputs
logging.vlog(2, "converted %s %s", y_op, new_outputs)
# Insert into self._conversion_map
if y == y_op:
assert isinstance(new_outputs, ops.Operation)
self._add_conversion(y_op, new_outputs)
else:
for old_output, new_output in zip(y_op.outputs, new_outputs):
assert isinstance(new_output, WrappedTensor), (new_output, y, y_op)
self._add_conversion(old_output, new_output)
stack.pop(0)
return self._conversion_map[op_or_tensor]
@property
def loop_len_vector(self):
"""Returns a single element vector whose value is number of iterations."""
return self._loop_len_vector
@property
def loop_var(self):
"""Returns placeholder loop variable."""
return self._loop_var
@property
def pfor_ops(self):
return self._pfor_ops
@property
def all_indices_partitioned(self):
"""all_indices_partitioned property.
Returns:
True if we are inside a control flow construct and not all pfor iterations
may be active.
"""
return self._all_indices_partitioned
# nn_ops
def _flatten_first_two_dims(x):
"""Merges first two dimensions."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_first_dim(x, first_dim):
"""Splits first dimension into [first_dim, -1]."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0)
return array_ops.reshape(x, new_shape)
def _inputs_with_flattening(pfor_input, input_indices):
"""Stacks and flattens first dim of inputs at indices `input_indices`."""
if input_indices is None:
input_indices = []
pfor_input.stack_inputs(stack_indices=input_indices)
inputs = []
for i in range(pfor_input.num_inputs):
if i in input_indices:
inp = pfor_input.stacked_input(i)
inp = _flatten_first_two_dims(inp)
else:
inp = pfor_input.unstacked_input(i)
inputs.append(inp)
return inputs
@RegisterPForWithArgs("Conv2D", dims=[0])
@RegisterPForWithArgs("AvgPool", dims=[0])
@RegisterPForWithArgs("MaxPool", dims=[0])
@RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1])
def _convert_flatten_batch(pfor_input, op_type, dims):
del op_type
inputs = _inputs_with_flattening(pfor_input, dims)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
_channel_flatten_input_cache = {}
def _channel_flatten_input(x, data_format):
"""Merge the stack dimension with the channel dimension.
If S is pfor's stacking dimension, then,
- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
should be cheap.
- for SNHWC, we transpose to NHWCS.
We then merge the S and C dimension.
Args:
x: ops.Tensor to transform.
data_format: "NCHW" or "NHWC".
Returns:
A 3-element tuple with the transformed value, along with the shape for
reshape and order for transpose required to transform back.
"""
graph = ops.get_default_graph()
cache_key = (graph, x, data_format)
if cache_key not in _channel_flatten_input_cache:
x_shape = array_ops.shape(x)
if data_format == b"NCHW":
order = [1, 0, 2, 3, 4]
shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0)
reverse_order = order
else:
order = [1, 2, 3, 0, 4]
shape = array_ops.concat([x_shape[1:4], [-1]], axis=0)
reverse_order = [3, 0, 1, 2, 4]
# Move S dimension next to C dimension.
x = array_ops.transpose(x, order)
reverse_shape = array_ops.shape(x)
# Reshape to merge the S and C dimension.
x = array_ops.reshape(x, shape)
outputs = x, reverse_order, reverse_shape
_channel_flatten_input_cache[cache_key] = outputs
else:
outputs = _channel_flatten_input_cache[cache_key]
return outputs
# Note that with training=True, running FusedBatchNorm on individual examples
# is very different from running FusedBatchNorm on a batch of those examples.
# This is because, for the latter case, the operation can be considered as first
# computing the mean and variance over all the examples and then using these
# to scale all those examples. This creates a data dependency between these
# different "iterations" since the inputs to the scaling step depends on the
# statistics coming from all these inputs.
# As with other kernels, the conversion here effectively runs the kernel
# independently for each iteration, and returns outputs by stacking outputs from
# each of those iterations.
@RegisterPFor("FusedBatchNorm")
def _convert_fused_batch_norm(pfor_input):
is_training = pfor_input.get_attr("is_training")
# When BatchNorm is used with training=False, mean and variance are provided
# externally and used as is by the op. Thus, we can merge the S and N
# dimensions as we do for regular operations.
# When BatchNorm is used with training=True, mean and variance are computed
# for each channel across the batch dimension (first one). If we merge S and N
# dimensions, mean and variances will be computed over a larger set. So, we
# merge the S and C dimensions instead.
if not is_training:
# We return zeros for batch_mean and batch_variance output. Note that CPU
# and GPU seem to have different behavior for those two outputs. CPU outputs
# zero because these values are not used during inference. GPU outputs
# something, probably real means and variances.
inputs = _inputs_with_flattening(pfor_input, [0])
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
n = pfor_input.pfor.loop_len_vector
y = _unflatten_first_dim(y, n)
mean = pfor_input.unstacked_input(3)
zeros = array_ops.zeros_like(mean)
return [wrap(y, True), wrap(zeros, False), wrap(zeros, False)]
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
# We merge the first dimension with the "C" dimension, run FusedBatchNorm, and
# then transpose back.
x = pfor_input.stacked_input(0)
x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format)
# Note that we stack all the other inputs as well so that they are the same
# size as the new size of the channel dimension.
inputs = [x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(1, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
y = array_ops.reshape(y, reverse_shape)
y = array_ops.transpose(y, reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [y] + outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("FusedBatchNormGrad")
def _convert_fused_batch_norm_grad(pfor_input):
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
y_backprop = pfor_input.stacked_input(0)
y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format)
x = pfor_input.stacked_input(1)
x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format)
inputs = [y_backprop, x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(2, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
x_backprop = outputs[0]
x_backprop = array_ops.reshape(x_backprop, x_reverse_shape)
x_backprop = array_ops.transpose(x_backprop, x_reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [x_backprop] + outputs
return [wrap(output, True) for output in outputs]
@RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0)
@RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0)
def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims,
shape_dim):
del op_type
inputs = _inputs_with_flattening(pfor_input, flatten_dims)
n = pfor_input.pfor.loop_len_vector
# Adjust the `input_sizes` input.
ones = array_ops.ones(
[array_ops.shape(inputs[shape_dim])[0] - 1], dtype=n.dtype)
inputs[shape_dim] *= array_ops.concat([n, ones], axis=0)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
@RegisterPFor("Conv2DBackpropFilter")
def _convert_conv2d_backprop_filter(pfor_input):
pfor_input.stack_inputs(stack_indices=[2])
inputs, inputs_stacked, _ = pfor_input.input(0)
filter_sizes = pfor_input.unstacked_input(1)
grads = pfor_input.stacked_input(2)
strides = pfor_input.get_attr("strides")
padding = pfor_input.get_attr("padding")
use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu")
data_format = pfor_input.get_attr("data_format")
dilations = pfor_input.get_attr("dilations")
if inputs_stacked:
# TODO(agarwal): Implement this efficiently.
logging.warn("Conv2DBackpropFilter uses a while_loop. Fix that!")
def while_body(i, ta):
inp_i = inputs[i, ...]
grad_i = grads[i, ...]
output = nn_ops.conv2d_backprop_filter(
inp_i,
filter_sizes,
grad_i,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
return i + 1, ta.write(i, array_ops.expand_dims(output, 0))
n = array_ops.reshape(pfor_input.pfor.loop_len_vector, [])
_, ta = control_flow_ops.while_loop(
lambda i, ta: i < n, while_body,
(0, tensor_array_ops.TensorArray(inputs.dtype, n)))
output = ta.concat()
return wrap(output, True)
else:
# We merge the stack dimension with the channel dimension of the gradients
# and pretend we had a larger filter (see change to filter_sizes below).
# Once the filter backprop is computed, we reshape and transpose back
# appropriately.
grads, _, _ = _channel_flatten_input(grads, data_format)
n = pfor_input.pfor.loop_len_vector
old_filter_sizes = filter_sizes
filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0)
output = nn_ops.conv2d_backprop_filter(
inputs,
filter_sizes,
grads,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0)
output = array_ops.reshape(output, new_filter_shape)
output = array_ops.transpose(output, [3, 0, 1, 2, 4])
return wrap(output, True)
# array_ops
@RegisterPForWithArgs("Identity", array_ops.identity)
@RegisterPForWithArgs("StopGradient", array_ops.stop_gradient)
def _convert_identity(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_dim = array_ops.shape(t)[:1]
new_shape = array_ops.concat([new_dim, shape], axis=0)
return wrap(array_ops.reshape(t, new_shape), True)
@RegisterPFor("ExpandDims")
def _convert_expanddims(pfor_input):
t = pfor_input.stacked_input(0)
dim = pfor_input.unstacked_input(1)
dim += math_ops.cast(dim >= 0, dtypes.int32)
return wrap(array_ops.expand_dims(t, axis=dim), True)
@RegisterPFor("Slice")
def _convert_slice(pfor_input):
t = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
size = pfor_input.unstacked_input(2)
begin = array_ops.concat([[0], begin], axis=0)
size = array_ops.concat([[-1], size], axis=0)
return wrap(array_ops.slice(t, begin, size), True)
@RegisterPFor("Tile")
def _convert_tile(pfor_input):
t = pfor_input.stacked_input(0)
multiples = pfor_input.unstacked_input(1)
multiples = array_ops.concat([[1], multiples], 0)
return wrap(array_ops.tile(t, multiples), True)
@RegisterPFor("Pack")
def _convert_pack(pfor_input):
pfor_input.stack_inputs()
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.stack([x.t for x in pfor_input.inputs], axis=axis), True)
@RegisterPFor("Unpack")
def _convert_unpack(pfor_input):
value = pfor_input.stacked_input(0)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
num = pfor_input.get_attr("num")
return [wrap(x, True) for x in array_ops.unstack(value, axis=axis, num=num)]
@RegisterPFor("Pad")
def _convert_pad(pfor_input):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("Split")
def _convert_split(pfor_input):
split_dim = pfor_input.unstacked_input(0)
t = pfor_input.stacked_input(1)
num_split = pfor_input.get_attr("num_split")
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)]
@RegisterPFor("Transpose")
def _convert_transpose(pfor_input):
t = pfor_input.stacked_input(0)
perm = pfor_input.unstacked_input(1)
new_perm = array_ops.concat([[0], perm + 1], axis=0)
return wrap(array_ops.transpose(t, new_perm), True)
@RegisterPFor("ZerosLike")
def _convert_zeroslike(pfor_input):
t = pfor_input.stacked_input(0)
shape = array_ops.shape(t)[1:]
return wrap(array_ops.zeros(shape, dtype=t.dtype), False)
@RegisterPFor("Gather")
@RegisterPFor("GatherV2")
def _convert_gather(pfor_input):
param, param_stacked, _ = pfor_input.input(0)
indices, indices_stacked, _ = pfor_input.input(1)
op_type = pfor_input.op_type
if op_type == "Gather":
validate_indices = pfor_input.get_attr("validate_indices")
axis = 0
else:
validate_indices = None
axis = pfor_input.unstacked_input(2)
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
if indices_stacked and not param_stacked:
if indices == pfor_input.pfor.all_indices and axis == 0:
param_shape0 = param.shape[0].value
indices_shape0 = indices.shape[0].value
if param_shape0 is not None and indices_shape0 == param_shape0:
# Note that with loops and conditionals, indices may not be contiguous.
# However they will be sorted and unique. So if the shape matches, then
# it must be picking up all the rows of param.
return wrap(param, True)
# TODO(agarwal): use array_ops.slice here.
output = array_ops.gather(
param, indices, validate_indices=validate_indices, axis=axis)
if axis != 0:
axis = control_flow_ops.cond(
axis < 0, lambda: axis + array_ops.rank(param), lambda: axis)
order = array_ops.concat(
[[axis],
math_ops.range(axis),
math_ops.range(axis + 1, array_ops.rank(output))],
axis=0)
output = control_flow_ops.cond(
math_ops.equal(axis, 0), lambda: output,
lambda: array_ops.transpose(output, order))
return wrap(output, True)
if param_stacked:
loop_len_vector = pfor_input.pfor.loop_len_vector
pfor_input.stack_inputs(stack_indices=[1])
indices = pfor_input.stacked_input(1)
param_flat = _flatten_first_two_dims(param)
# Recompute indices to handle stacked param.
indices_offset = math_ops.range(
loop_len_vector[0]) * array_ops.shape(param)[1]
# Reshape indices_offset to allow broadcast addition
ones = array_ops.ones([array_ops.rank(indices) - 1], dtype=dtypes.int32)
new_shape = array_ops.concat([loop_len_vector, ones], axis=0)
indices_offset = array_ops.reshape(indices_offset, new_shape)
indices += indices_offset
# TODO(agarwal): handle axis != 0. May need to transpose param or
# array_ops.gather_nd.
if isinstance(axis, ops.Tensor):
axis_value = tensor_util.constant_value(axis)
else:
try:
axis_value = int(axis)
except TypeError:
axis_value = None
msg = ("Gather, where indices and param are both loop dependent, currently "
"requires axis=0")
if axis_value is not None and axis_value != 0:
raise ValueError("Error while converting %s. %s. Got axis=%d" %
(pfor_input.op, msg, axis))
with ops.control_dependencies(
[check_ops.assert_equal(axis, 0, message=msg)]):
output = array_ops.gather(param_flat, indices)
return wrap(output, True)
@RegisterPFor("ConcatV2")
def _convert_concatv2(pfor_input):
n = pfor_input.num_inputs
pfor_input.stack_inputs(stack_indices=range(n - 1))
axis = pfor_input.unstacked_input(n - 1)
axis += math_ops.cast(axis >= 0, axis.dtype)
return wrap(
array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis),
True)
@RegisterPFor("StridedSlice")
def _convert_strided_slice(pfor_input):
inp = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice(
inp,
begin,
end,
strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("StridedSliceGrad")
def _convert_strided_slice_grad(pfor_input):
shape = pfor_input.unstacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
dy = pfor_input.stacked_input(4)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice_grad(
shape,
begin,
end,
strides,
dy,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
# math_ops
@RegisterPFor("MatMul")
def _convert_matmul(pfor_input):
# TODO(agarwal): Check if tiling is faster than two transposes.
a, a_stacked, _ = pfor_input.input(0)
b, b_stacked, _ = pfor_input.input(1)
tr_a = pfor_input.get_attr("transpose_a")
tr_b = pfor_input.get_attr("transpose_b")
if a_stacked and b_stacked:
output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True)
return output
elif a_stacked:
if tr_a:
a = array_ops.transpose(a, [0, 2, 1])
if a.shape.is_fully_defined():
x, y, z = a.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(a), 3)
]
a = array_ops.reshape(a, [x * y, z])
prod = math_ops.matmul(a, b, transpose_b=tr_b)
return wrap(array_ops.reshape(prod, [x, y, -1]), True)
else:
assert b_stacked
if tr_b:
perm = [2, 0, 1]
b = array_ops.transpose(b, perm)
else:
# As an optimization, if one of the first two dimensions is 1, then we can
# reshape instead of transpose.
# TODO(agarwal): This check can be done inside Transpose kernel.
b_shape = array_ops.shape(b)
min_dim = math_ops.minimum(b_shape[0], b_shape[1])
perm = control_flow_ops.cond(
math_ops.equal(min_dim, 1), lambda: [0, 1, 2], lambda: [1, 0, 2])
new_shape = array_ops.stack([b_shape[1], b_shape[0], b_shape[2]])
b = array_ops.transpose(b, perm)
b = array_ops.reshape(b, new_shape)
if b.shape.is_fully_defined():
x, y, z = b.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(b), 3)
]
b = array_ops.reshape(b, [x, y * z])
prod = math_ops.matmul(a, b, transpose_a=tr_a)
prod = array_ops.reshape(prod, [-1, y, z])
prod = array_ops.transpose(prod, [1, 0, 2])
return wrap(prod, True)
@RegisterPFor("BatchMatMul")
def _convert_batch_mat_mul(pfor_input):
# TODO(agarwal): There may be a more efficient way to do this instead of
# stacking the inputs.
pfor_input.stack_inputs()
x = pfor_input.stacked_input(0)
y = pfor_input.stacked_input(1)
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
x = _flatten_first_two_dims(x)
y = _flatten_first_two_dims(y)
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector)
return wrap(output, True)
@RegisterPForWithArgs("Sum", math_ops.reduce_sum)
@RegisterPForWithArgs("Prod", math_ops.reduce_prod)
@RegisterPForWithArgs("Max", math_ops.reduce_max)
@RegisterPForWithArgs("Min", math_ops.reduce_min)
def _convert_reduction(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
indices += math_ops.cast(indices >= 0, dtypes.int32)
keep_dims = pfor_input.get_attr("keep_dims")
return wrap(op_func(t, indices, keepdims=keep_dims), True)
@RegisterPForWithArgs("Cumsum", math_ops.cumsum)
@RegisterPForWithArgs("Cumprod", math_ops.cumprod)
def _convert_cumfoo(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
axis += math_ops.cast(axis >= 0, dtypes.int32)
exclusive = pfor_input.get_attr("exclusive")
reverse = pfor_input.get_attr("reverse")
return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True)
@RegisterPFor("BiasAdd")
def _convert_biasadd(pfor_input):
t = pfor_input.stacked_input(0)
bias = pfor_input.unstacked_input(1)
data_format = pfor_input.get_attr("data_format")
if data_format != b"NCHW":
return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True)
shape = array_ops.shape(t)
flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0)
t = array_ops.reshape(t, flattened_shape)
t = nn_ops.bias_add(t, bias, data_format=b"NCHW")
t = array_ops.reshape(t, shape)
return wrap(t, True)
@RegisterPFor("UnsortedSegmentSum")
def _convert_unsortedsegmentsum(pfor_input):
data, data_stacked, _ = pfor_input.input(0)
# TODO(agarwal): handle unstacked?
segment_ids = pfor_input.stacked_input(1)
# TODO(agarwal): handle stacked?
num_segments = pfor_input.unstacked_input(2)
if not data_stacked:
data = _stack(data, pfor_input.pfor.loop_len_vector).t
segment_shape = array_ops.shape(segment_ids)
n = segment_shape[0]
ones = array_ops.ones_like(segment_shape)[1:]
segment_offset = num_segments * math_ops.range(n)
segment_offset = array_ops.reshape(segment_offset,
array_ops.concat([[n], ones], axis=0))
segment_ids += segment_offset
num_segments *= n
output = math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
new_output_shape = array_ops.concat(
[[n, -1], array_ops.shape(output)[1:]], axis=0)
output = array_ops.reshape(output, new_output_shape)
return wrap(output, True)
@RegisterPFor("Cast")
def _convert_cast(pfor_input):
inp = pfor_input.stacked_input(0)
dtype = pfor_input.get_attr("DstT")
return wrap(math_ops.cast(inp, dtype), True)
# Note that ops handled here do not have attributes except "T", and hence don't
# need extra arguments passed to the cwise_op call below.
@RegisterPForWithArgs("Add", math_ops.add)
@RegisterPForWithArgs("Ceil", math_ops.ceil)
@RegisterPForWithArgs("Equal", math_ops.equal)
@RegisterPForWithArgs("NotEqual", math_ops.not_equal)
@RegisterPForWithArgs("Floor", math_ops.floor)
@RegisterPForWithArgs("Greater", math_ops.greater)
@RegisterPForWithArgs("GreaterEqual", math_ops.greater_equal)
@RegisterPForWithArgs("Less", math_ops.less)
@RegisterPForWithArgs("LessEqual", math_ops.less_equal)
@RegisterPForWithArgs("LogicalOr", math_ops.logical_or)
@RegisterPForWithArgs("LogicalAnd", math_ops.logical_and)
@RegisterPForWithArgs("LogicalNot", math_ops.logical_not)
@RegisterPForWithArgs("LogicalXor", math_ops.logical_xor)
@RegisterPForWithArgs("Maximum", math_ops.maximum)
@RegisterPForWithArgs("Minimum", math_ops.minimum)
@RegisterPForWithArgs("Mul", math_ops.multiply)
@RegisterPForWithArgs("Neg", math_ops.negative)
@RegisterPForWithArgs("RealDiv", math_ops.divide)
@RegisterPForWithArgs("Relu", nn_ops.relu)
@RegisterPForWithArgs("Sigmoid", math_ops.sigmoid)
@RegisterPForWithArgs("Square", math_ops.square)
@RegisterPForWithArgs("Sub", math_ops.subtract)
@RegisterPForWithArgs("Tanh", math_ops.tanh)
def _convert_cwise(pfor_input, op_type, op_func):
del op_type
pfor_input.expanddim_inputs_for_broadcast()
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("Shape")
def _convert_shape(pfor_input):
out_type = pfor_input.get_attr("out_type")
return wrap(
array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:],
False)
@RegisterPFor("ShapeN")
def _convert_shape_n(pfor_input):
out_type = pfor_input.get_attr("out_type")
shapes = [
array_ops.shape(x, out_type=out_type)[1:]
if stacked else array_ops.shape(x) for x, stacked, _ in pfor_input.inputs
]
return [wrap(x, False) for x in shapes]
@RegisterPFor("Size")
def _convert_size(pfor_input):
out_type = pfor_input.get_attr("out_type")
n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type)
return wrap(
array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n,
False)
@RegisterPFor("Rank")
def _convert_rank(pfor_input):
return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False)
@RegisterPFor("AddN")
def _convert_addn(pfor_input):
# AddN does not support broadcasting.
pfor_input.stack_inputs()
return wrap(math_ops.add_n([x.t for x in pfor_input.inputs]), True)
@RegisterPFor("BiasAddGrad")
def _convert_biasaddgrad(pfor_input):
grad = pfor_input.stacked_input(0)
fmt = pfor_input.get_attr("data_format")
if fmt == b"NCHW":
output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False)
else:
grad_shape = array_ops.shape(grad)
last_dim_shape = grad_shape[-1]
first_dim_shape = grad_shape[0]
output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape])
output = math_ops.reduce_sum(output, axis=[1], keepdims=False)
return wrap(output, True)
# Some required ops are not exposed under the tf namespace. Hence relying on
# _create_op to create them.
@RegisterPForWithArgs("ReluGrad")
@RegisterPForWithArgs("TanhGrad")
@RegisterPForWithArgs("SigmoidGrad")
def _convert_grads(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
# TODO(agarwal): Looks like these ops don't support broadcasting. Hence we
# have to use tiling here.
pfor_input.stack_inputs()
outputs = _create_op(
op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("Select")
def _convert_select(pfor_input):
pfor_input.stack_inputs()
cond = pfor_input.stacked_input(0)
t = pfor_input.stacked_input(1)
e = pfor_input.stacked_input(2)
cond_rank = array_ops.rank(cond)
cond, t, e = control_flow_ops.cond(
cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]),
lambda: [cond, t, e])
outputs = _create_op(
pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
out = control_flow_ops.cond(cond_rank > 1,
lambda: _unflatten_first_dim(outputs[0], n),
lambda: outputs[0])
return [wrap(out, True) for x in outputs]
# random_ops
@RegisterPForWithArgs("RandomUniform")
@RegisterPForWithArgs("RandomUniformInt")
@RegisterPForWithArgs("RandomStandardNormal")
@RegisterPForWithArgs("TruncatedNormal")
@RegisterPForWithArgs("RandomGamma")
@RegisterPForWithArgs("RandomPoissonV2")
def _convert_random(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)]
# inputs[0] is "shape"
inputs[0] = array_ops.concat(
[pfor_input.pfor.loop_len_vector, inputs[0]], axis=0)
logging.warning(
"Note that %s inside pfor op may not give same output as "
"inside a sequential loop.", op_type)
outputs = _create_op(
op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
# logging_ops
@RegisterPFor("Assert")
def _convert_assert(pfor_input):
cond, cond_stacked, _ = pfor_input.input(0)
if cond_stacked:
cond = math_ops.reduce_all(cond)
data_list = [x.t for x in pfor_input.inputs][1:]
return _create_op("Assert", [cond] + data_list, [],
attrs=pfor_input.op.node_def.attr)
@RegisterPFor("Print")
def _convert_print(pfor_input):
# Note that we don't stack all the inputs. Hence unstacked values are printed
# once here vs multiple times in a while_loop.
pfor_input.stack_inputs([0])
outputs = _create_op(
"Print", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
# data_flow_ops
# TensorArray conversion is tricky since we don't support arrays of
# TensorArrays. For converting them, we consider two distinct cases:
#
# 1. The array is constructed outside the pfor call, and read/written inside the
# loop.
# This is an easier case since we don't need to make an array of TensorArrays.
# A correctness requirement is that these parallel iterations shouldn't attempt
# to write to the same location. Hence at conversion time we disallow indices to
# be loop-invariant as that would guarantee a collision. Even if the indices are
# not loop-invariant, they could conflict and that shall trigger runtime errors.
#
# 2. The array is constructed and used entirely inside each pfor iteration.
# For simplicity, here we require that the indices used for write/scatter are
# "unstacked". Otherwise it becomes hard to merge the TensorArrays created in
# different pfor iterations. We consider two sub_cases:
#
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
# In this case we don't increase the dimensions to avoid redundant tiling. Each
# iteration is trying to write the same value. So we convert that to a single
# write.
#
# Here are some tricks used to implement the above:
# - TensorArrayV3 constructor encodes the element shape as an attr. Instead of
# trying to trace whether future writes are stacked or unstacked in order to set
# this attr, we set it to correspond to unknown shape.
# - We use the "flow" output of the different ops to track whether the array
# elements are stacked or unstacked. If a stacked write/scatter is done, we make
# the flow stacked as well.
# - We use some heuristic traversal of the graph to track whether the
# TensorArray handle was created inside or outside the pfor loop.
@RegisterPFor("TensorArrayV3")
def _convert_tensor_array_v3(pfor_input):
size = pfor_input.unstacked_input(0)
dtype = pfor_input.get_attr("dtype")
dynamic_size = pfor_input.get_attr("dynamic_size")
clear_after_read = pfor_input.get_attr("clear_after_read")
identical_element_shapes = pfor_input.get_attr("identical_element_shapes")
tensor_array_name = pfor_input.get_attr("tensor_array_name")
handle, flow = data_flow_ops.tensor_array_v3(
size,
dtype=dtype,
# We don't set element shape since we don't know if writes are stacked or
# not yet.
element_shape=None,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name)
# Note we keep flow unstacked for now since we don't know if writes will be
# stacked or not.
return wrap(handle, False), wrap(flow, False)
@RegisterPFor("TensorArraySizeV3")
def _convert_tensor_array_size_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
size = data_flow_ops.tensor_array_size_v3(handle, flow)
return wrap(size, False)
def _handle_inside_pfor(pfor_input, handle):
"""Returns True if handle was created inside the pfor loop."""
# We use some heuristic to find the original TensorArray creation op.
# The logic should handle the common cases (except cond based subgraphs).
# In theory the user could perform different operations on the handle (like
# Reshape, stack multiple handles, etc) which could break this logic.
# TODO(agarwal): handle Switch/Merge.
while handle.op.type in ("Enter", "Identity"):
handle = handle.op.inputs[0]
if handle.op.type not in [
"TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"]:
raise ValueError("Unable to find source for handle %s" % handle)
else:
return pfor_input.pfor.op_is_inside_loop(handle.op)
def _unstack_flow(value):
# TODO(agarwal): consider looking if this is a Tile op then get its input.
# This may avoid running the Tile operations.
return array_ops.gather(value, 0)
@RegisterPFor("TensorArrayReadV3")
def _convert_tensor_array_read_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
dtype = pfor_input.get_attr("dtype")
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside_pfor:
# Note that if we are inside a control flow construct inside the pfor, and
# only some of the iterations are doing the read (i.e.
# `all_indices_partitioned` is True), then the read operation should only
# return values for the currently active pfor iterations (`all_indices`
# below). Hence, whenever the returned value is stacked (i.e. `flow` is
# stacked), we may need to do an extra gather after reading the values. Also
# note that if `is_inside` is false, then values in the tensor array are
# unstacked. So the check is only needed in this branch.
all_indices = pfor_input.pfor.all_indices
all_indices_partitioned = pfor_input.pfor.all_indices_partitioned
# Note: flow_stacked indicates if values in the TensorArray are stacked or
# not.
if index_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayReadV3 was called on a TensorArray whose"
" values are not loop-invariant, and the read indices were also"
" not loop invariant. This is currently unsupported.")
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
return wrap(value, True)
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
if flow_stacked and all_indices_partitioned:
value = array_ops.gather(value, all_indices)
return wrap(value, flow_stacked)
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on index_stacked.
if index_stacked:
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
else:
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
return wrap(value, index_stacked)
@RegisterPFor("TensorArrayWriteV3")
def _convert_tensor_array_write_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if value_stacked and pfor_input.pfor.all_indices_partitioned:
# Looks like we are in a control flow in a pfor where not all iterations are
# active now. We don't allow that since that could lead to different indices
# having different shapes which will be hard to merge later.
raise ValueError("Writing non loop invariant values to TensorArray from "
"inside a while_loop/cond not supported.")
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if index_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return wrap(flow_out, False)
else:
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
# TODO(agarwal): Note that if flow is unstacked and value is stacked, then
# this may or may not be a safe situation. flow is unstacked both for a
# freshly created TensorArray, as well as after unstacked values are
# written to it. If it is the latter, then we cannot write a stacked value
# now since that may cause runtime errors due to different shapes in the
# array. At the moment we are not able to handle this gracefully and
# distinguish between the two cases. That would require some heuristic
# traversal of the graph to figure out whether all the writes are
# unstacked or not.
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
else:
if not index_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
# Note that even when index_stacked is true, actual values in index may
# still not be unique. However that will cause runtime error when executing
# the scatter operation below.
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
def _transpose_first_two_dims(value):
# TODO(agarwal): optimize if one of the dims == 1.
value_shape = array_ops.shape(value)
v0 = value_shape[0]
v1 = value_shape[1]
value = array_ops.reshape(value, [v0, v1, -1])
value = array_ops.transpose(value, [1, 0, 2])
new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0)
return array_ops.reshape(value, new_shape)
@RegisterPFor("TensorArrayGatherV3")
def _convert_tensor_array_gather_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
dtype = pfor_input.get_attr("dtype")
# TODO(agarwal): support element_shape attr?
n = pfor_input.pfor.loop_len_vector
value = data_flow_ops.tensor_array_gather_v3(
handle, indices, flow, dtype=dtype)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
# flow_stacked indicates if values in the TensorArray are stacked or not.
if indices_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayGatherV3 was called on a TensorArray "
"whose values are not loop-invariant, and the indices were also "
"not loop invariant. This is currently unsupported.")
else:
value = _unflatten_first_dim(value, n)
return wrap(value, True)
else:
if flow_stacked:
# Since elements in this array are stacked and `value` was produced by
# gather, its first two dims are "gathered elements" and "stack
# dimension". Our semantics require these two to be flipped.
value = _transpose_first_two_dims(value)
return wrap(value, flow_stacked)
else:
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on indices_stacked.
if indices_stacked:
value = _unflatten_first_dim(value, n)
return wrap(value, indices_stacked)
@RegisterPFor("TensorArrayScatterV3")
def _convert_tensor_array_scatter_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if indices_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
# Note that flow_stacked indicates if existing values in the array are
# stacked or not.
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return wrap(flow_out, False)
if not value_stacked:
# TODO(agarwal): tile in the second dimension directly instead of
# transposing below.
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _transpose_first_two_dims(value)
# TODO(agarwal): Note that if a previous write was unstacked, flow will be
# unstacked, and a stacked value may be written here which may cause
# runtime error due to different elements having different shape. We do
# not try to prevent that.
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
if not indices_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _flatten_first_two_dims(value)
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
@RegisterPFor("TensorArrayGradV3")
def _convert_tensor_array_grad_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
source = pfor_input.get_attr("source")
# TODO(agarwal): For now, we assume that gradients are stacked if the
# TensorArrayGradV3 call is being done inside the pfor. Getting that wrong
# will give runtime error due to incorrect shape being written to the
# accumulator. It is difficult to know in advance if gradients written will be
# stacked or not. Note that flow being stacked is not indicative of the
# gradient being stacked or not. Revisit this later.
shape_to_prepend = pfor_input.pfor.loop_len_vector
grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape(
handle=handle,
flow_in=flow,
shape_to_prepend=shape_to_prepend,
source=source)
flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t
return [wrap(grad_handle, False), wrap(flow_out, True)]
# StackV2 conversion is tricky since we don't have arrays of StackV2. So similar
# to TensorArrays, we convert them by changing the dimension of the elements
# inside the stack.
#
# We consider two cases:
#
# 1. StackV2 is constructed and used entirely inside the pfor loop.
# We keep a single Stack and perform the push/pop operations of all the
# iterations in lock-step. We also assume that all the iterations perform these
# operations. In case of dynamic control flow, if only some of the iterations
# try to perform a push/pop, then the conversion may not work correctly and may
# cause undefined behavior.
# TODO(agarwal): test StackV2 with dynamic control flow.
#
# 2. StackV2 is constructed outside the pfor loop.
# Performing stack push/pop in a parallel fashion is ill-defined. However given
# that reading stacks created externally is a common operation when computing
# jacobians, we provide some special semantics here as follows.
# - disallow push operations to the stack
# - pop operations are performed in lock step by all iterations, similar to the
# case when the stack is created inside. A single value is popped during the
# lock-step operation and broadcast to all the iterations. Values in the stack
# are assumed to be loop-invariant.
#
# Some other implementation details:
# We use an ugly logic to find whether values in Stack data structure are
# loop invariant or not. When converting push/pop operations, we keep track of
# whether the last conversion used a stacked value or not (see _stack_cache
# below). As a result if an unstacked value is written first, subsequent stacked
# writes are disallowed when they could have been allowed in theory.
# Map from cache key based on StackV2 handle to a bool indicating whether values
# are stacked or not.
# TODO(agarwal): move _stack_cache inside pfor?
_stack_cache = {}
def _stack_cache_key(pfor_input):
"""Create cache key corresponding to a stack handle."""
op_type = pfor_input.op_type
assert op_type in ["StackPushV2", "StackPopV2"], op_type
orig_handle = pfor_input.op.inputs[0]
while orig_handle.op.type in ["Identity", "Enter"]:
orig_handle = orig_handle.op.inputs[0]
assert orig_handle.op.type == "StackV2", orig_handle.op
return ops.get_default_graph(), pfor_input.pfor, orig_handle
def _stack_handle_inside_pfor(handle, pfor_input):
while handle.op.type in ["Identity", "Enter"]:
handle = handle.op.inputs[0]
assert handle.op.type == "StackV2", (
"Unable to find StackV2 op. Got %s" % handle.op)
return pfor_input.pfor.op_is_inside_loop(handle.op)
@RegisterPFor("StackPushV2")
def _convert_stack_push_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
elem, elem_stacked, _ = pfor_input.input(1)
swap_memory = pfor_input.get_attr("swap_memory")
if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input):
raise ValueError("StackPushV2 not allowed on stacks created outside pfor")
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
if stacked is None:
stacked = elem_stacked
_stack_cache[stack_cache_key] = stacked
else:
# If we previously made it unstacked then we can't revert to being stacked.
if not stacked and elem_stacked:
raise ValueError(
"It looks like the stack was previously determined to be loop"
" invariant, but we are now trying to push a loop dependent value"
" to it. This is currently unsupported.")
if stacked and not elem_stacked:
elem = _stack(elem, pfor_input.pfor.loop_len_vector).t
out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory)
return wrap(out, stacked)
# Note that inputs to this convertor will be unstacked. However it should get
# called since it is a stateful op.
@RegisterPFor("StackPopV2")
def _convert_stack_pop_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
# If a StackPushV2 has not been converted yet, we default to unstacked since
# the push could be outside of pfor, or the covertor may not be called if the
# inputs are unconverted.
if stacked is None:
stacked = False
_stack_cache[stack_cache_key] = False
elem_type = pfor_input.get_attr("elem_type")
out = data_flow_ops.stack_pop_v2(handle, elem_type)
return wrap(out, stacked)
# parsing_ops
@RegisterPFor("DecodeCSV")
def _convert_decode_csv(pfor_input):
lines = pfor_input.stacked_input(0)
record_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
field_delim = pfor_input.get_attr("field_delim")
use_quote_delim = pfor_input.get_attr("use_quote_delim")
select_cols = pfor_input.get_attr("select_cols")
if not select_cols:
select_cols = None
return [
wrap(t, True) for t in parsing_ops.decode_csv(
lines,
record_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
select_cols=select_cols)
]
@RegisterPFor("ParseSingleExample")
def _convert_parse_single_example(pfor_input):
serialized = pfor_input.stacked_input(0)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
sparse_keys = pfor_input.get_attr("sparse_keys")
dense_keys = pfor_input.get_attr("dense_keys")
sparse_types = pfor_input.get_attr("sparse_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
output = gen_parsing_ops.parse_example(
serialized=serialized,
names=[],
dense_defaults=dense_defaults,
sparse_keys=sparse_keys,
dense_keys=dense_keys,
sparse_types=sparse_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
| apache-2.0 |
danviv/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
plaes/numpy | doc/source/conf.py | 6 | 8773 | # -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "0.5":
raise RuntimeError("Sphinx 0.5.dev or newer required")
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest',
'plot_directive']
if sphinx.__version__ >= "0.7":
extensions.append('sphinx.ext.autosummary')
else:
extensions.append('autosummary')
extensions.append('only_directives')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
#master_doc = 'index'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print version, release
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'scipy.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Manual (DRAFT)" % (project, version)
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = 'scipyshiny_small.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'indexcontent.html',
}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".html").
#html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'numpy'
# Pngmath should try to align formulas properly
pngmath_use_preview = True
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
import matplotlib
matplotlib.rcParams.update({
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
| bsd-3-clause |
smallyear/linuxLearn | salt/salt/client/ssh/state.py | 1 | 6047 | # -*- coding: utf-8 -*-
'''
Create ssh executor system
'''
from __future__ import absolute_import
# Import python libs
import os
import tarfile
import tempfile
import json
import shutil
from contextlib import closing
# Import salt libs
import salt.client.ssh.shell
import salt.client.ssh
import salt.utils
import salt.utils.thin
import salt.utils.url
import salt.roster
import salt.state
import salt.loader
import salt.minion
class SSHState(salt.state.State):
'''
Create a State object which wraps the SSH functions for state operations
'''
def __init__(self, opts, pillar=None, wrapper=None):
self.wrapper = wrapper
super(SSHState, self).__init__(opts, pillar)
def load_modules(self, data=None, proxy=None):
'''
Load up the modules for remote compilation via ssh
'''
self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils)
self.rend = salt.loader.render(self.opts, self.functions)
def check_refresh(self, data, ret):
'''
Stub out check_refresh
'''
return
def module_refresh(self):
'''
Module refresh is not needed, stub it out
'''
return
class SSHHighState(salt.state.BaseHighState):
'''
Used to compile the highstate on the master
'''
stack = []
def __init__(self, opts, pillar=None, wrapper=None, fsclient=None):
self.client = fsclient
salt.state.BaseHighState.__init__(self, opts)
self.state = SSHState(opts, pillar, wrapper)
self.matcher = salt.minion.Matcher(self.opts)
def load_dynamic(self, matches):
'''
Stub out load_dynamic
'''
return
def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
refs = {}
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = 'base'
crefs = []
for state in chunk:
if state == '__env__':
saltenv = chunk[state]
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
refs[env].append([x])
return refs
def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, str):
if data.startswith(proto) and data not in ret:
ret.append(data)
if isinstance(data, list):
for comp in data:
salt_refs(comp, ret)
if isinstance(data, dict):
for comp in data:
salt_refs(data[comp], ret)
return ret
def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None):
'''
Generate the execution package from the saltenv file refs and a low state
data structure
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.fopen(lowfn, 'w+') as fp_:
fp_.write(json.dumps(chunks))
if pillar:
with salt.utils.fopen(pillarfn, 'w+') as fp_:
fp_.write(json.dumps(pillar))
cachedir = os.path.join('salt-ssh', id_)
for saltenv in file_refs:
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
os.makedirs(env_root)
for ref in file_refs[saltenv]:
for name in ref:
short = salt.utils.url.parse(name)[0]
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(path, tgt)
continue
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
if files:
for filename in files:
fn = filename[filename.find(short) + len(short):]
if fn.startswith('/'):
fn = fn.strip('/')
tgt = os.path.join(
env_root,
short,
fn,
)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(filename, tgt)
continue
try: # cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in os.walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
return trans_tar
| apache-2.0 |
doganov/edx-platform | common/djangoapps/enrollment/data.py | 41 | 9880 | """
Data Aggregation Layer of the Enrollment API. Collects all enrollment specific data into a single
source to be used throughout the API.
"""
import logging
from django.contrib.auth.models import User
from opaque_keys.edx.keys import CourseKey
from enrollment.errors import (
CourseEnrollmentClosedError, CourseEnrollmentFullError,
CourseEnrollmentExistsError, UserNotFoundError, InvalidEnrollmentAttribute
)
from enrollment.serializers import CourseEnrollmentSerializer, CourseSerializer
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.exceptions import CourseNotFoundError
from student.models import (
CourseEnrollment, NonExistentCourseError, EnrollmentClosedError,
CourseFullError, AlreadyEnrolledError, CourseEnrollmentAttribute
)
log = logging.getLogger(__name__)
def get_course_enrollments(user_id):
"""Retrieve a list representing all aggregated data for a user's course enrollments.
Construct a representation of all course enrollment data for a specific user.
Args:
user_id (str): The name of the user to retrieve course enrollment information for.
Returns:
A serializable list of dictionaries of all aggregated enrollment data for a user.
"""
qset = CourseEnrollment.objects.filter(
user__username=user_id,
is_active=True
).order_by('created')
enrollments = CourseEnrollmentSerializer(qset, many=True).data
# Find deleted courses and filter them out of the results
deleted = []
valid = []
for enrollment in enrollments:
if enrollment.get("course_details") is not None:
valid.append(enrollment)
else:
deleted.append(enrollment)
if deleted:
log.warning(
(
u"Course enrollments for user %s reference "
u"courses that do not exist (this can occur if a course is deleted)."
), user_id,
)
return valid
def get_course_enrollment(username, course_id):
"""Retrieve an object representing all aggregated data for a user's course enrollment.
Get the course enrollment information for a specific user and course.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
Returns:
A serializable dictionary representing the course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
enrollment = CourseEnrollment.objects.get(
user__username=username, course_id=course_key
)
return CourseEnrollmentSerializer(enrollment).data
except CourseEnrollment.DoesNotExist:
return None
def create_course_enrollment(username, course_id, mode, is_active):
"""Create a new course enrollment for the given user.
Creates a new course enrollment for the specified user username.
Args:
username (str): The name of the user to create a new course enrollment for.
course_id (str): The course to create the course enrollment for.
mode (str): (Optional) The mode for the new enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the new course enrollment.
Raises:
CourseNotFoundError
CourseEnrollmentFullError
EnrollmentClosedError
CourseEnrollmentExistsError
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.enroll(user, course_key, check_access=True)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except NonExistentCourseError as err:
raise CourseNotFoundError(err.message)
except EnrollmentClosedError as err:
raise CourseEnrollmentClosedError(err.message)
except CourseFullError as err:
raise CourseEnrollmentFullError(err.message)
except AlreadyEnrolledError as err:
enrollment = get_course_enrollment(username, course_id)
raise CourseEnrollmentExistsError(err.message, enrollment)
def update_course_enrollment(username, course_id, mode=None, is_active=None):
"""Modify a course enrollment for a user.
Allows updates to a specific course enrollment.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
mode (str): (Optional) If specified, modify the mode for this enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the modified course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.objects.get(user=user, course_id=course_key)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except CourseEnrollment.DoesNotExist:
return None
def add_or_update_enrollment_attr(user_id, course_id, attributes):
"""Set enrollment attributes for the enrollment of given user in the
course provided.
Args:
course_id (str): The Course to set enrollment attributes for.
user_id (str): The User to set enrollment attributes for.
attributes (list): Attributes to be set.
Example:
>>>add_or_update_enrollment_attr(
"Bob",
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
)
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if not _invalid_attribute(attributes) and enrollment is not None:
CourseEnrollmentAttribute.add_enrollment_attr(enrollment, attributes)
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attributes for given user for provided course.
Args:
user_id: The User to get enrollment attributes for
course_id (str): The Course to get enrollment attributes for.
Example:
>>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015")
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
Returns: list
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
return CourseEnrollmentAttribute.get_enrollment_attributes(enrollment)
def _get_user(user_id):
"""Retrieve user with provided user_id
Args:
user_id(str): username of the user for which object is to retrieve
Returns: obj
"""
try:
return User.objects.get(username=user_id)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=user_id)
log.warn(msg)
raise UserNotFoundError(msg)
def _update_enrollment(enrollment, is_active=None, mode=None):
enrollment.update_enrollment(is_active=is_active, mode=mode)
enrollment.save()
return CourseEnrollmentSerializer(enrollment).data
def _invalid_attribute(attributes):
"""Validate enrollment attribute
Args:
attributes(dict): dict of attribute
Return:
list of invalid attributes
"""
invalid_attributes = []
for attribute in attributes:
if "namespace" not in attribute:
msg = u"'namespace' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("namespace")
raise InvalidEnrollmentAttribute(msg)
if "name" not in attribute:
msg = u"'name' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("name")
raise InvalidEnrollmentAttribute(msg)
if "value" not in attribute:
msg = u"'value' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("value")
raise InvalidEnrollmentAttribute(msg)
return invalid_attributes
def get_course_enrollment_info(course_id, include_expired=False):
"""Returns all course enrollment information for the given course.
Based on the course id, return all related course information.
Args:
course_id (str): The course to retrieve enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary representing the course's enrollment information.
Raises:
CourseNotFoundError
"""
course_key = CourseKey.from_string(course_id)
try:
course = CourseOverview.get_from_id(course_key)
except CourseOverview.DoesNotExist:
msg = u"Requested enrollment information for unknown course {course}".format(course=course_id)
log.warning(msg)
raise CourseNotFoundError(msg)
else:
return CourseSerializer(course, include_expired=include_expired).data
| agpl-3.0 |
xodus7/tensorflow | tensorflow/java/maven/tensorflow-android/update.py | 27 | 3972 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fetch android artifacts and update pom properties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import string
import sys
import urllib2
def get_args():
"""Parse command line args."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--version', required=True, help='Version for the artifact.')
parser.add_argument(
'--dir',
required=True,
help='Directory where the pom and aar artifact will be written.')
parser.add_argument(
'--template', required=True, help='Path to pom template file.')
return parser.parse_args()
def get_json(url):
"""Load the contents of the URL as a json object."""
return json.load(urllib2.urlopen(url))
def get_commit_id(build_info):
"""Fetch the git commit id from the build info json object."""
release_commit_id = build_info.get('build_commit_id')
if release_commit_id:
return release_commit_id
actions = build_info.get('actions')
build_data = next(
a for a in actions
if a.get('_class') == 'hudson.plugins.git.util.BuildData')
if not build_data:
raise ValueError('Missing BuildData: %s' % build_info)
revision_info = build_data.get('lastBuiltRevision')
if not revision_info:
raise ValueError('Missing lastBuiltRevision: %s' % build_info)
return revision_info.get('SHA1')
def get_aar_url(build_info):
"""Given the json build info, find the URL to the tensorflow.aar artifact."""
base_url = build_info.get('url')
if not base_url:
raise ValueError('Missing url: %s' % build_info)
build_class = build_info.get('_class')
if (build_class == 'hudson.model.FreeStyleBuild' or
build_class == 'hudson.matrix.MatrixRun'):
aar_info = next(
a for a in build_info.get('artifacts')
if a.get('fileName') == 'tensorflow.aar')
if not aar_info:
raise ValueError('Missing aar artifact: %s' % build_info)
return '%s/artifact/%s' % (base_url, aar_info.get('relativePath'))
raise ValueError('Unknown build_type %s' % build_info)
def read_template(path):
with open(path) as f:
return string.Template(f.read())
def main():
args = get_args()
release_prefix = 'https://storage.googleapis.com/tensorflow/libtensorflow'
info_url = '%s/android_buildinfo-%s.json' % (release_prefix, args.version)
aar_url = '%s/tensorflow-%s.aar' % (release_prefix, args.version)
build_type = 'release-android'
# Retrieve build information
build_info = get_json(info_url)
# Check all required build info is present
build_commit_id = get_commit_id(build_info)
if not build_commit_id:
raise ValueError('Missing commit id: %s' % build_info)
# Write the pom file updated with build attributes.
template = read_template(args.template)
with open('%s/pom-android.xml' % args.dir, 'w') as f:
f.write(
template.substitute({
'build_commit_id': build_commit_id,
'build_type': build_type,
'version': args.version
}))
# Retrieve the aar location if needed.
if not aar_url:
aar_url = get_aar_url(build_info)
# And download the aar to the desired location.
with open('%s/tensorflow.aar' % args.dir, 'w') as f:
aar = urllib2.urlopen(aar_url)
f.write(aar.read())
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
marcoarruda/MissionPlanner | Lib/site-packages/scipy/fftpack/benchmarks/bench_basic.py | 63 | 7559 | """ Test functions for fftpack.basic module
"""
import sys
from numpy.testing import *
from scipy.fftpack import ifft, fft, fftn, irfft, rfft
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
import numpy.fft
from numpy.random import rand
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)/n
return y
class TestFft(TestCase):
def bench_random(self):
from numpy.fft import fft as numpy_fft
print
print ' Fast Fourier Transform'
print '================================================='
print ' | real input | complex input '
print '-------------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '-------------------------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
for x in [random([size]).astype(double),
random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
]:
if size > 500: y = fft(x)
else: y = direct_dft(x)
assert_array_almost_equal(fft(x),y)
print '|%8.2f' % measure('fft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_fft(x),y)
print '|%8.2f' % measure('numpy_fft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestIfft(TestCase):
def bench_random(self):
from numpy.fft import ifft as numpy_ifft
print
print ' Inverse Fast Fourier Transform'
print '==============================================='
print ' | real input | complex input '
print '-----------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '-----------------------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
for x in [random([size]).astype(double),
random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
]:
if size > 500: y = ifft(x)
else: y = direct_idft(x)
assert_array_almost_equal(ifft(x),y)
print '|%8.2f' % measure('ifft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_ifft(x),y)
print '|%8.2f' % measure('numpy_ifft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestRfft(TestCase):
def bench_random(self):
from numpy.fft import rfft as numpy_rfft
print
print 'Fast Fourier Transform (real data)'
print '=================================='
print ' size | scipy | numpy '
print '----------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
x = random([size]).astype(double)
print '|%8.2f' % measure('rfft(x)',repeat),
sys.stdout.flush()
print '|%8.2f' % measure('numpy_rfft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestIrfft(TestCase):
def bench_random(self):
from numpy.fft import irfft as numpy_irfft
print
print 'Inverse Fast Fourier Transform (real data)'
print '=================================='
print ' size | scipy | numpy '
print '----------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
x = random([size]).astype(double)
x1 = zeros(size/2+1,dtype=cdouble)
x1[0] = x[0]
for i in range(1,size/2):
x1[i] = x[2*i-1] + 1j * x[2*i]
if not size%2:
x1[-1] = x[-1]
y = irfft(x)
print '|%8.2f' % measure('irfft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_irfft(x1,size),y)
print '|%8.2f' % measure('numpy_irfft(x1,size)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestFftn(TestCase):
def bench_random(self):
from numpy.fft import fftn as numpy_fftn
print
print ' Multi-dimensional Fast Fourier Transform'
print '==================================================='
print ' | real input | complex input '
print '---------------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '---------------------------------------------------'
for size,repeat in [((100,100),100),((1000,100),7),
((256,256),10),
((512,512),3),
]:
print '%9s' % ('%sx%s'%size),
sys.stdout.flush()
for x in [random(size).astype(double),
random(size).astype(cdouble)+random(size).astype(cdouble)*1j
]:
y = fftn(x)
#if size > 500: y = fftn(x)
#else: y = direct_dft(x)
assert_array_almost_equal(fftn(x),y)
print '|%8.2f' % measure('fftn(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_fftn(x),y)
print '|%8.2f' % measure('numpy_fftn(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
cbeck88/fifengine | engine/python/fife/extensions/cegui/ceguibasicapplication.py | 2 | 4813 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
"""
The basic application and main loop.
See the L{ApplicationBase} documentation.
"""
from fife import fife
from fife.extensions.basicapplication import ApplicationBase
import PyCEGUI
class CEGUIEventListener(fife.IKeyListener, fife.ICommandListener):
"""
Default, rudimentary event listener.
Will cause the application to quit on pressing ESC.
"""
def __init__(self, app):
self.app = app
self.engine = app.engine
eventmanager = self.engine.getEventManager()
#eventmanager.setNonConsumableKeys([fife.Key.ESCAPE])
fife.IKeyListener.__init__(self)
eventmanager.addKeyListener(self)
fife.ICommandListener.__init__(self)
eventmanager.addCommandListener(self)
self.quitrequested = False
self.debuggeractive = False
def keyPressed(self, evt):
keyval = evt.getKey().getValue()
if keyval == fife.Key.ESCAPE:
self.app.quit()
def keyReleased(self, evt):
pass
def onCommand(self, command):
if command.getCommandType() == fife.CMD_QUIT_GAME:
self.quitrequested = True
command.consume()
DEFAULT_GUI_DIR = "gui/"
class CEGUIApplicationBase(ApplicationBase):
def __init__(self, setting=None):
super(CEGUIApplicationBase, self).__init__(setting)
self._initGuiManager()
self._loadCEGuiSettings()
def _initGuiManager(self):
settings = self.engine.getSettings()
major_v, minor_v = map(int, PyCEGUI.Version__.split('.')[:2])
#For CEGUI versions lower than 0.8.0 we use the old CEGuiManager
if major_v == 0 and minor_v <= 7:
guimanager = fife.CEGuiManager()
else:
guimanager = fife.CEGui_0Manager()
#transfer ownership to the engine
guimanager.thisown = 0
self.guimanager = guimanager
self.engine.setGuiManager(self.guimanager)
self.engine.getEventManager().addSdlEventListener(self.guimanager)
def _loadCEGuiSettings(self):
self._loadResourcePaths()
def _loadResourcePaths(self):
resourceprovider = PyCEGUI.System.getSingleton().getResourceProvider()
major_v, minor_v = map(int, PyCEGUI.Version__.split('.')[:2])
if major_v == 0 and minor_v <= 7:
resourcetypemap = { "schemes" : PyCEGUI.Scheme.setDefaultResourceGroup,
"imagesets" : PyCEGUI.Imageset.setDefaultResourceGroup,
"fonts" : PyCEGUI.Font.setDefaultResourceGroup,
"layouts" : PyCEGUI.WindowManager.setDefaultResourceGroup,
"looksnfeels" : PyCEGUI.WidgetLookManager.setDefaultResourceGroup,
}
else:
resourcetypemap = { "schemes" : PyCEGUI.Scheme.setDefaultResourceGroup,
"imagesets" : PyCEGUI.ImageManager.setImagesetDefaultResourceGroup,
"fonts" : PyCEGUI.Font.setDefaultResourceGroup,
"layouts" : PyCEGUI.WindowManager.setDefaultResourceGroup,
"looksnfeels" : PyCEGUI.WidgetLookManager.setDefaultResourceGroup,
}
if not self._setting:
for restype, res_setfunc in resourcetypemap.iteritems():
resourceprovider.setResourceGroupDirectory(restype, DEFAULT_GUI_DIR + restype)
res_setfunc(restype)
else:
for restype, res_setfunc in resourcetypemap.iteritems():
path = self._setting.get("CEGUI", restype)
if path:
resourceprovider.setResourceGroupDirectory(restype, path)
res_setfunc(restype)
else:
#set default path
resourceprovider.setResourceGroupDirectory(restype, DEFAULT_GUI_DIR + restype)
res_setfunc(restype)
parser = PyCEGUI.System.getSingleton().getXMLParser()
if parser.isPropertyPresent("SchemaDefaultResourceGroup"):
path = self._setting.get("CEGUI", "schemas")
if path:
rp.setResourceGroupDirectory("schemas", path)
else:
rp.setResourceGroupDirectory("schemas", DEFAULT_GUI_DIR + "schemas")
parser.setProperty("SchemaDefaultResourceGroup", "schemas")
def createListener(self):
self._listener = CEGUIEventListener(self)
return self._listener
| lgpl-2.1 |
shuangshuangwang/spark | examples/src/main/python/mllib/gradient_boosting_classification_example.py | 27 | 2446 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Gradient Boosted Trees Classification Example.
"""
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonGradientBoostedTreesClassificationExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GradientBoostedTrees model.
# Notes: (a) Empty categoricalFeaturesInfo indicates all features are continuous.
# (b) Use more iterations in practice.
model = GradientBoostedTrees.trainClassifier(trainingData,
categoricalFeaturesInfo={}, numIterations=3)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(
lambda lp: lp[0] != lp[1]).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification GBT model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myGradientBoostingClassificationModel")
sameModel = GradientBoostedTreesModel.load(sc,
"target/tmp/myGradientBoostingClassificationModel")
# $example off$
| apache-2.0 |
rapidpro/chatpro | chatpro/rooms/models.py | 1 | 2494 | from __future__ import absolute_import, unicode_literals
from chatpro.profiles.tasks import sync_org_contacts
from dash.orgs.models import Org
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Room(models.Model):
"""
Corresponds to a RapidPro contact group
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='rooms')
name = models.CharField(verbose_name=_("Name"), max_length=128, blank=True,
help_text=_("Name of this room"))
users = models.ManyToManyField(User, verbose_name=_("Users"), related_name='rooms',
help_text=_("Users who can chat in this room"))
managers = models.ManyToManyField(User, verbose_name=_("Managers"), related_name='manage_rooms',
help_text=_("Users who can manage contacts in this room"))
is_active = models.BooleanField(default=True, help_text="Whether this room is active")
@classmethod
def create(cls, org, name, uuid):
return cls.objects.create(org=org, name=name, uuid=uuid)
@classmethod
def get_all(cls, org):
return cls.objects.filter(org=org, is_active=True)
@classmethod
def update_room_groups(cls, org, group_uuids):
"""
Updates an org's chat rooms based on the selected groups UUIDs
"""
# de-activate rooms not included
org.rooms.exclude(uuid__in=group_uuids).update(is_active=False)
# fetch group details
groups = org.get_temba_client().get_groups()
group_names = {group.uuid: group.name for group in groups}
for group_uuid in group_uuids:
existing = org.rooms.filter(uuid=group_uuid).first()
if existing:
existing.name = group_names[group_uuid]
existing.is_active = True
existing.save()
else:
cls.create(org, group_names[group_uuid], group_uuid)
sync_org_contacts.delay(org.id)
def get_contacts(self):
return self.contacts.filter(is_active=True)
def get_users(self):
return self.users.filter(is_active=True).select_related('profile')
def get_managers(self):
return self.managers.filter(is_active=True).select_related('profile')
def __unicode__(self):
return self.name
| bsd-3-clause |
sbalde/edxplatform | common/djangoapps/third_party_auth/tests/specs/test_testshib.py | 24 | 12276 | """
Third_party_auth integration tests using a mock version of the TestShib provider
"""
import json
import unittest
import httpretty
from mock import patch
from django.core.urlresolvers import reverse
from openedx.core.lib.json_utils import EscapedEdxJSONEncoder
from student.tests.factories import UserFactory
from third_party_auth.tasks import fetch_saml_metadata
from third_party_auth.tests import testutil
TESTSHIB_ENTITY_ID = 'https://idp.testshib.org/idp/shibboleth'
TESTSHIB_METADATA_URL = 'https://mock.testshib.org/metadata/testshib-providers.xml'
TESTSHIB_SSO_URL = 'https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO'
TPA_TESTSHIB_LOGIN_URL = '/auth/login/tpa-saml/?auth_entry=login&next=%2Fdashboard&idp=testshib'
TPA_TESTSHIB_REGISTER_URL = '/auth/login/tpa-saml/?auth_entry=register&next=%2Fdashboard&idp=testshib'
TPA_TESTSHIB_COMPLETE_URL = '/auth/complete/tpa-saml/'
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class TestShibIntegrationTest(testutil.SAMLTestCase):
"""
TestShib provider Integration Test, to test SAML functionality
"""
def setUp(self):
super(TestShibIntegrationTest, self).setUp()
self.login_page_url = reverse('signin_user')
self.register_page_url = reverse('register_user')
self.enable_saml(
private_key=self._get_private_key(),
public_key=self._get_public_key(),
entity_id="https://saml.example.none",
)
# Mock out HTTP requests that may be made to TestShib:
httpretty.enable()
def metadata_callback(_request, _uri, headers):
""" Return a cached copy of TestShib's metadata by reading it from disk """
return (200, headers, self.read_data_file('testshib_metadata.xml'))
httpretty.register_uri(httpretty.GET, TESTSHIB_METADATA_URL, content_type='text/xml', body=metadata_callback)
self.addCleanup(httpretty.disable)
self.addCleanup(httpretty.reset)
# Configure the SAML library to use the same request ID for every request.
# Doing this and freezing the time allows us to play back recorded request/response pairs
uid_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.generate_unique_id', return_value='TESTID')
uid_patch.start()
self.addCleanup(uid_patch.stop)
def test_login_before_metadata_fetched(self):
self._configure_testshib_provider(fetch_metadata=False)
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to back to the login page:
self.assertEqual(try_login_response.status_code, 302)
self.assertEqual(try_login_response['Location'], self.url_prefix + self.login_page_url)
# When loading the login page, the user will see an error message:
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn('Authentication with TestShib is currently unavailable.', response.content)
def test_register(self):
self._configure_testshib_provider()
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
# The user goes to the register page, and sees a button to register with TestShib:
self._check_register_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_REGISTER_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
testshib_response = self._fake_testshib_login_and_return()
# We should be redirected to the register screen since this account is not linked to an edX account:
self.assertEqual(testshib_response.status_code, 302)
self.assertEqual(testshib_response['Location'], self.url_prefix + self.register_page_url)
register_response = self.client.get(self.register_page_url)
# We'd now like to see if the "You've successfully signed into TestShib" message is
# shown, but it's managed by a JavaScript runtime template, and we can't run JS in this
# type of test, so we just check for the variable that triggers that message.
self.assertIn('"currentProvider": "TestShib"', register_response.content)
self.assertIn('"errorMessage": null', register_response.content)
# Now do a crude check that the data (e.g. email) from the provider is displayed in the form:
self.assertIn('"defaultValue": "[email protected]"', register_response.content)
self.assertIn('"defaultValue": "Me Myself And I"', register_response.content)
# Now complete the form:
ajax_register_response = self.client.post(
reverse('user_api_registration'),
{
'email': '[email protected]',
'name': 'Myself',
'username': 'myself',
'honor_code': True,
}
)
self.assertEqual(ajax_register_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self.verify_user_email('[email protected]')
self._test_return_login()
def test_login(self):
self._configure_testshib_provider()
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
user = UserFactory.create()
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
testshib_response = self._fake_testshib_login_and_return()
# We should be redirected to the login screen since this account is not linked to an edX account:
self.assertEqual(testshib_response.status_code, 302)
self.assertEqual(testshib_response['Location'], self.url_prefix + self.login_page_url)
login_response = self.client.get(self.login_page_url)
# We'd now like to see if the "You've successfully signed into TestShib" message is
# shown, but it's managed by a JavaScript runtime template, and we can't run JS in this
# type of test, so we just check for the variable that triggers that message.
self.assertIn('"currentProvider": "TestShib"', login_response.content)
self.assertIn('"errorMessage": null', login_response.content)
# Now the user enters their username and password.
# The AJAX on the page will log them in:
ajax_login_response = self.client.post(
reverse('user_api_login_session'),
{'email': user.email, 'password': 'test'}
)
self.assertEqual(ajax_login_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self._test_return_login()
def _test_return_login(self):
""" Test logging in to an account that is already linked. """
# Make sure we're not logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 302)
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
login_response = self._fake_testshib_login_and_return()
# There will be one weird redirect required to set the login cookie:
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + TPA_TESTSHIB_COMPLETE_URL)
# And then we should be redirected to the dashboard:
login_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + reverse('dashboard'))
# Now we are logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 200)
def _freeze_time(self, timestamp):
""" Mock the current time for SAML, so we can replay canned requests/responses """
now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)
now_patch.start()
self.addCleanup(now_patch.stop)
def _check_login_page(self):
""" Load the login form and check that it contains a TestShib button """
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn("TestShib", response.content)
self.assertIn(json.dumps(TPA_TESTSHIB_LOGIN_URL, cls=EscapedEdxJSONEncoder), response.content)
return response
def _check_register_page(self):
""" Load the login form and check that it contains a TestShib button """
response = self.client.get(self.register_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn("TestShib", response.content)
self.assertIn(json.dumps(TPA_TESTSHIB_REGISTER_URL, cls=EscapedEdxJSONEncoder), response.content)
return response
def _configure_testshib_provider(self, **kwargs):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
fetch_metadata = kwargs.pop('fetch_metadata', True)
kwargs.setdefault('name', 'TestShib')
kwargs.setdefault('enabled', True)
kwargs.setdefault('idp_slug', 'testshib')
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
self.configure_saml_provider(**kwargs)
if fetch_metadata:
self.assertTrue(httpretty.is_enabled())
num_changed, num_failed, num_total = fetch_saml_metadata()
self.assertEqual(num_failed, 0)
self.assertEqual(num_changed, 1)
self.assertEqual(num_total, 1)
def _fake_testshib_login_and_return(self):
""" Mocked: the user logs in to TestShib and then gets redirected back """
# The SAML provider (TestShib) will authenticate the user, then get the browser to POST a response:
return self.client.post(
TPA_TESTSHIB_COMPLETE_URL,
content_type='application/x-www-form-urlencoded',
data=self.read_data_file('testshib_response.txt'),
)
| agpl-3.0 |
JoKnopp/wp-import | test/test_postgresql.py | 1 | 4427 | # -*- coding: UTF-8 -*-
# © Copyright 2009 Wolodja Wentland. All Rights Reserved.
# This file is part of wp-import.
#
# wp-import is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wp-import is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wp-import. If not, see <http://www.gnu.org/licenses/>.
"""Tests for wp_import.postgresql
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import tempfile
from nose.tools import *
import wp_import.utils as wpi_utils
import wp_import.postgresql as wpi_psql
PREFIX = os.path.join(*os.path.split(os.path.dirname(__file__))[:-1])
TEST_DATA_DIR = os.path.join(PREFIX, 'test', 'data')
DOWNLOAD_DIR = os.path.join(TEST_DATA_DIR, 'download')
EXPECTED_STMTS = {
'categorylinks': [
"""INSERT INTO "categorylinks" VALUES """ \
"(130,'Linux','Linux\u5185\u6838','2006-07-25T19:03:22Z')"],
'langlinks': [
"""INSERT INTO "langlinks" VALUES """ \
"(43017,'af','Dante Alighieri')"],
'pagelinks': [
"""INSERT INTO "pagelinks" VALUES (12,0,'P/NP\u554f\u984c')"""],
'redirect': [
"""INSERT INTO "redirect" VALUES (71247,0,'ASCII\u827a\u672f')"""]}
class FakeOptions(object):
pass
def test_insert_statements():
fn_pat = re.compile(
r'''(?P<language>\w+)wiki-(?P<date>\d{8})-(?P<table>[\w_]+).*''')
for dump_path in sorted(wpi_utils.find('*.sql.gz', DOWNLOAD_DIR)):
filename = os.path.basename(dump_path)
mat = fn_pat.match(filename)
stmts = list(wpi_psql.insert_statements(dump_path))
eq_(list(wpi_psql.insert_statements(dump_path)),
EXPECTED_STMTS[mat.group('table')])
def test_categorylink_pipeline():
for file_path in wpi_utils.find('*categorylinks*.sql.gz', DOWNLOAD_DIR):
with wpi_utils.open_compressed(file_path) as cl_file:
eq_(list(wpi_psql.categorylinks_pipeline(cl_file)),
EXPECTED_STMTS['categorylinks'])
def test_psql_quotation():
eq_(list(wpi_psql.psql_quotation(['f `b`', 'baz', 'shrubbery ``'])),
['f "b"', 'baz', 'shrubbery ""'])
def test_timestamp_to_iso_8601():
eq_(list(wpi_psql.timestamp_to_iso_8601([',20080218135752) foo'])),
[",'2008-02-18T13:57:52Z') foo"])
def test_parse_pgpass():
with tempfile.NamedTemporaryFile() as tmp_f:
tmp_f.write('*:*:*:*:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql._parse_pgpass(tmp_f.name).next(),
{'user': '*', 'host': '*', 'port': '*', 'database': '*',
'password': 'GrailQuest'})
tmp_f.write('hostname:port:database:username:password\n')
tmp_f.seek(0)
eq_(wpi_psql._parse_pgpass(tmp_f.name).next(),
{'user': 'username', 'host': 'hostname', 'port': 'port',
'database': 'database',
'password': 'password'})
def test_password_from_pgpass():
with tempfile.NamedTemporaryFile() as tmp_f:
options = FakeOptions()
options.pg_passfile = tmp_f.name
options.pg_user = 'KingArthur'
options.pg_port = '2342'
options.pg_host = 'Camelot'
# test generic pgpass line
tmp_f.write('*:*:*:*:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
# test specific pgpass line
tmp_f.write('Camelot:2342:postgres:KingArthur:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
# test pick most specific
tmp_f.write('Jerusalem:2342:postgres:Brian:Jehova\n')
tmp_f.write('Camelot:2342:postgres:KingArthur:GrailQuest\n')
tmp_f.write('*:*:*:*:UnladenSwallow\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
tmp_f.write('*:*:*:*\n')
tmp_f.seek(0)
assert_raises(KeyError, wpi_psql.password_from_pgpass,
options=options)
| gpl-3.0 |
simonfork/aquaria | ExternalLibs/freetype2/src/tools/glnames.py | 259 | 103407 | #!/usr/bin/env python
#
#
# FreeType 2 glyph name builder
#
# Copyright 1996-2000, 2003, 2005, 2007, 2008 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""\
usage: %s <output-file>
This python script generates the glyph names tables defined in the
`psnames' module.
Its single argument is the name of the header file to be created.
"""
import sys, string, struct, re, os.path
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
#
# See
#
# http://fonts.apple.com/TTRefMan/RM06/Chap6post.html
#
# for the official list.
#
mac_standard_names = \
[
# 0
".notdef", ".null", "nonmarkingreturn", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent", "ampersand",
# 10
"quotesingle", "parenleft", "parenright", "asterisk", "plus",
"comma", "hyphen", "period", "slash", "zero",
# 20
"one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "colon",
# 30
"semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D",
# 40
"E", "F", "G", "H", "I",
"J", "K", "L", "M", "N",
# 50
"O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X",
# 60
"Y", "Z", "bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "a", "b",
# 70
"c", "d", "e", "f", "g",
"h", "i", "j", "k", "l",
# 80
"m", "n", "o", "p", "q",
"r", "s", "t", "u", "v",
# 90
"w", "x", "y", "z", "braceleft",
"bar", "braceright", "asciitilde", "Adieresis", "Aring",
# 100
"Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
"aacute", "agrave", "acircumflex", "adieresis", "atilde",
# 110
"aring", "ccedilla", "eacute", "egrave", "ecircumflex",
"edieresis", "iacute", "igrave", "icircumflex", "idieresis",
# 120
"ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
"otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
# 130
"dagger", "degree", "cent", "sterling", "section",
"bullet", "paragraph", "germandbls", "registered", "copyright",
# 140
"trademark", "acute", "dieresis", "notequal", "AE",
"Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
# 150
"yen", "mu", "partialdiff", "summation", "product",
"pi", "integral", "ordfeminine", "ordmasculine", "Omega",
# 160
"ae", "oslash", "questiondown", "exclamdown", "logicalnot",
"radical", "florin", "approxequal", "Delta", "guillemotleft",
# 170
"guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
"Otilde", "OE", "oe", "endash", "emdash",
# 180
"quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
"lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
# 190
"guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
"periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
"Acircumflex",
# 200
"Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
"Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
# 210
"apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
"dotlessi", "circumflex", "tilde", "macron", "breve",
# 220
"dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
"caron", "Lslash", "lslash", "Scaron", "scaron",
# 230
"Zcaron", "zcaron", "brokenbar", "Eth", "eth",
"Yacute", "yacute", "Thorn", "thorn", "minus",
# 240
"multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
"onequarter", "threequarters", "franc", "Gbreve", "gbreve",
# 250
"Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
"Ccaron", "ccaron", "dcroat"
]
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
# http://partners.adobe.com/asn/developer/pdfs/tn/5176.CFF.pdf.
#
sid_standard_names = \
[
# 0
".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft",
# 10
"parenright", "asterisk", "plus", "comma", "hyphen",
"period", "slash", "zero", "one", "two",
# 20
"three", "four", "five", "six", "seven",
"eight", "nine", "colon", "semicolon", "less",
# 30
"equal", "greater", "question", "at", "A",
"B", "C", "D", "E", "F",
# 40
"G", "H", "I", "J", "K",
"L", "M", "N", "O", "P",
# 50
"Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z",
# 60
"bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
"quoteleft", "a", "b", "c", "d",
# 70
"e", "f", "g", "h", "i",
"j", "k", "l", "m", "n",
# 80
"o", "p", "q", "r", "s",
"t", "u", "v", "w", "x",
# 90
"y", "z", "braceleft", "bar", "braceright",
"asciitilde", "exclamdown", "cent", "sterling", "fraction",
# 100
"yen", "florin", "section", "currency", "quotesingle",
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
# 110
"fl", "endash", "dagger", "daggerdbl", "periodcentered",
"paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
# 120
"guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
"acute", "circumflex", "tilde", "macron", "breve",
# 130
"dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
"ogonek", "caron", "emdash", "AE", "ordfeminine",
# 140
"Lslash", "Oslash", "OE", "ordmasculine", "ae",
"dotlessi", "lslash", "oslash", "oe", "germandbls",
# 150
"onesuperior", "logicalnot", "mu", "trademark", "Eth",
"onehalf", "plusminus", "Thorn", "onequarter", "divide",
# 160
"brokenbar", "degree", "thorn", "threequarters", "twosuperior",
"registered", "minus", "eth", "multiply", "threesuperior",
# 170
"copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
# 180
"Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
"Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
# 190
"Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
"Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
# 200
"aacute", "acircumflex", "adieresis", "agrave", "aring",
"atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
# 210
"egrave", "iacute", "icircumflex", "idieresis", "igrave",
"ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
# 220
"otilde", "scaron", "uacute", "ucircumflex", "udieresis",
"ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
# 230
"Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
"Acutesmall",
"parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "zerooldstyle",
# 240
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
"fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"commasuperior",
# 250
"threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
"bsuperior",
"centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
# 260
"msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
# 270
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall",
"Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
# 280
"Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
"Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
# 290
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
"Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
# 300
"colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
"centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall",
# 310
"Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
"hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"questiondownsmall",
# 320
"oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
"twothirds", "zerosuperior", "foursuperior", "fivesuperior",
"sixsuperior",
# 330
"sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
"oneinferior",
"twoinferior", "threeinferior", "fourinferior", "fiveinferior",
"sixinferior",
# 340
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior",
"periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
"Acircumflexsmall",
# 350
"Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
"Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
"Igravesmall",
# 360
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
"Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall",
# 370
"OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
"001.000",
# 380
"001.001", "001.002", "001.003", "Black", "Bold",
"Book", "Light", "Medium", "Regular", "Roman",
# 390
"Semibold"
]
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_standard_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
148, 149, 0, 0, 0, 0
]
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_expert_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378
]
# This data has been taken literally from the file `glyphlist.txt',
# version 2.0, 22 Sept 2002. It is available from
#
# http://partners.adobe.com/asn/developer/typeforum/unicodegn.html
# http://partners.adobe.com/public/developer/en/opentype/glyphlist.txt
#
adobe_glyph_list = """\
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
"""
# string table management
#
class StringTable:
def __init__( self, name_list, master_table_name ):
self.names = name_list
self.master_table = master_table_name
self.indices = {}
index = 0
for name in name_list:
self.indices[name] = index
index += len( name ) + 1
self.total = index
def dump( self, file ):
write = file.write
write( " static const char " + self.master_table +
"[" + repr( self.total ) + "] =\n" )
write( " {\n" )
line = ""
for name in self.names:
line += " '"
line += string.join( ( re.findall( ".", name ) ), "','" )
line += "', 0,\n"
write( line + " };\n\n\n" )
def dump_sublist( self, file, table_name, macro_name, sublist ):
write = file.write
write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
write( " /* Values are offsets into the `" +
self.master_table + "' table */\n\n" )
write( " static const short " + table_name +
"[" + macro_name + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for name in sublist:
line += comma
line += "%4d" % self.indices[name]
col += 1
comma = ","
if col == 14:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
# We now store the Adobe Glyph List in compressed form. The list is put
# into a data structure called `trie' (because it has a tree-like
# appearance). Consider, for example, that you want to store the
# following name mapping:
#
# A => 1
# Aacute => 6
# Abalon => 2
# Abstract => 4
#
# It is possible to store the entries as follows.
#
# A => 1
# |
# +-acute => 6
# |
# +-b
# |
# +-alon => 2
# |
# +-stract => 4
#
# We see that each node in the trie has:
#
# - one or more `letters'
# - an optional value
# - zero or more child nodes
#
# The first step is to call
#
# root = StringNode( "", 0 )
# for word in map.values():
# root.add( word, map[word] )
#
# which creates a large trie where each node has only one children.
#
# Executing
#
# root = root.optimize()
#
# optimizes the trie by merging the letters of successive nodes whenever
# possible.
#
# Each node of the trie is stored as follows.
#
# - First the node's letter, according to the following scheme. We
# use the fact that in the AGL no name contains character codes > 127.
#
# name bitsize description
# ----------------------------------------------------------------
# notlast 1 Set to 1 if this is not the last letter
# in the word.
# ascii 7 The letter's ASCII value.
#
# - The letter is followed by a children count and the value of the
# current key (if any). Again we can do some optimization because all
# AGL entries are from the BMP; this means that 16 bits are sufficient
# to store its Unicode values. Additionally, no node has more than
# 127 children.
#
# name bitsize description
# -----------------------------------------
# hasvalue 1 Set to 1 if a 16-bit Unicode value follows.
# num_children 7 Number of children. Can be 0 only if
# `hasvalue' is set to 1.
# value 16 Optional Unicode value.
#
# - A node is finished by a list of 16bit absolute offsets to the
# children, which must be sorted in increasing order of their first
# letter.
#
# For simplicity, all 16bit quantities are stored in big-endian order.
#
# The root node has first letter = 0, and no value.
#
class StringNode:
def __init__( self, letter, value ):
self.letter = letter
self.value = value
self.children = {}
def __cmp__( self, other ):
return ord( self.letter[0] ) - ord( other.letter[0] )
def add( self, word, value ):
if len( word ) == 0:
self.value = value
return
letter = word[0]
word = word[1:]
if self.children.has_key( letter ):
child = self.children[letter]
else:
child = StringNode( letter, 0 )
self.children[letter] = child
child.add( word, value )
def optimize( self ):
# optimize all children first
children = self.children.values()
self.children = {}
for child in children:
self.children[child.letter[0]] = child.optimize()
# don't optimize if there's a value,
# if we don't have any child or if we
# have more than one child
if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
return self
child = children[0]
self.letter += child.letter
self.value = child.value
self.children = child.children
return self
def dump_debug( self, write, margin ):
# this is used during debugging
line = margin + "+-"
if len( self.letter ) == 0:
line += "<NOLETTER>"
else:
line += self.letter
if self.value:
line += " => " + repr( self.value )
write( line + "\n" )
if self.children:
margin += "| "
for child in self.children.values():
child.dump_debug( write, margin )
def locate( self, index ):
self.index = index
if len( self.letter ) > 0:
index += len( self.letter ) + 1
else:
index += 2
if self.value != 0:
index += 2
children = self.children.values()
children.sort()
index += 2 * len( children )
for child in children:
index = child.locate( index )
return index
def store( self, storage ):
# write the letters
l = len( self.letter )
if l == 0:
storage += struct.pack( "B", 0 )
else:
for n in range( l ):
val = ord( self.letter[n] )
if n < l - 1:
val += 128
storage += struct.pack( "B", val )
# write the count
children = self.children.values()
children.sort()
count = len( children )
if self.value != 0:
storage += struct.pack( "!BH", count + 128, self.value )
else:
storage += struct.pack( "B", count )
for child in children:
storage += struct.pack( "!H", child.index )
for child in children:
storage = child.store( storage )
return storage
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
subfields = string.split( fields[1], ' ' )
if len( subfields ) == 1:
glyphs.append( fields[0] )
values.append( fields[1] )
return glyphs, values
def filter_glyph_names( alist, filter ):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
extras = []
for name in alist:
try:
filtered_index = filter.index( name )
except:
extras.append( name )
return extras
def dump_encoding( file, encoding_name, encoding_list ):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( " static const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for value in encoding_list:
line += comma
line += "%3d" % value
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
def dump_array( the_array, write, array_name ):
"""dumps a given encoding"""
write( " static const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "L] =\n" )
write( " {\n" )
line = ""
comma = " "
col = 0
for value in the_array:
line += comma
line += "%3d" % ord( value )
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
if len( line ) > 1024:
write( line )
line = ""
write( line + "\n };\n\n\n" )
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n")
# Now run the main routine
#
main()
# END
| gpl-2.0 |
ep1cman/workload-automation | wlauto/instrumentation/daq/__init__.py | 2 | 20324 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
from __future__ import division
import os
import sys
import csv
import shutil
import tempfile
from collections import OrderedDict, defaultdict
from string import ascii_lowercase
from multiprocessing import Process, Queue
from wlauto import Instrument, Parameter
from wlauto.core import signal
from wlauto.exceptions import ConfigError, InstrumentError, DeviceError
from wlauto.utils.misc import ensure_directory_exists as _d
from wlauto.utils.types import list_of_ints, list_of_strs, boolean
# pylint: disable=wrong-import-position,wrong-import-order
daqpower_path = os.path.join(os.path.dirname(__file__), '..', '..', 'external', 'daq_server', 'src')
sys.path.insert(0, daqpower_path)
try:
import daqpower.client as daq # pylint: disable=F0401
from daqpower.config import DeviceConfiguration, ServerConfiguration, ConfigurationError # pylint: disable=F0401
except ImportError, e:
daq, DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None, None
import_error_mesg = e.message
sys.path.pop(0)
UNITS = {
'energy': 'Joules',
'power': 'Watts',
'voltage': 'Volts',
}
GPIO_ROOT = '/sys/class/gpio'
TRACE_MARKER_PATH = '/sys/kernel/debug/tracing/trace_marker'
def dict_or_bool(value):
"""
Ensures that either a dictionary or a boolean is used as a parameter.
"""
if isinstance(value, dict):
return value
return boolean(value)
class Daq(Instrument):
name = 'daq'
description = """
DAQ instrument obtains the power consumption of the target device's core
measured by National Instruments Data Acquisition(DAQ) device.
WA communicates with a DAQ device server running on a Windows machine
(Please refer to :ref:`daq_setup`) over a network. You must specify the IP
address and port the server is listening on in the config file as follows ::
daq_server_host = '10.1.197.176'
daq_server_port = 45677
These values will be output by the server when you run it on Windows.
You must also specify the values of resistors (in Ohms) across which the
voltages are measured (Please refer to :ref:`daq_setup`). The values should be
specified as a list with an entry for each resistor, e.g.::
daq_resistor_values = [0.005, 0.005]
In addition to this mandatory configuration, you can also optionally specify the
following::
:daq_labels: Labels to be used for ports. Defaults to ``'PORT_<pnum>'``, where
'pnum' is the number of the port.
:daq_device_id: The ID under which the DAQ is registered with the driver.
Defaults to ``'Dev1'``.
:daq_v_range: Specifies the voltage range for the SOC voltage channel on the DAQ
(please refer to :ref:`daq_setup` for details). Defaults to ``2.5``.
:daq_dv_range: Specifies the voltage range for the resistor voltage channel on
the DAQ (please refer to :ref:`daq_setup` for details).
Defaults to ``0.2``.
:daq_sampling_rate: DAQ sampling rate. DAQ will take this many samples each
second. Please note that this maybe limitted by your DAQ model
and then number of ports you're measuring (again, see
:ref:`daq_setup`). Defaults to ``10000``.
:daq_channel_map: Represents mapping from logical AI channel number to physical
connector on the DAQ (varies between DAQ models). The default
assumes DAQ 6363 and similar with AI channels on connectors
0-7 and 16-23.
"""
parameters = [
Parameter('server_host', kind=str, default='localhost',
global_alias='daq_server_host',
description='The host address of the machine that runs the daq Server which the '
'instrument communicates with.'),
Parameter('server_port', kind=int, default=45677,
global_alias='daq_server_port',
description='The port number for daq Server in which daq instrument communicates '
'with.'),
Parameter('device_id', kind=str, default='Dev1',
global_alias='daq_device_id',
description='The ID under which the DAQ is registered with the driver.'),
Parameter('v_range', kind=float, default=2.5,
global_alias='daq_v_range',
description='Specifies the voltage range for the SOC voltage channel on the DAQ '
'(please refer to :ref:`daq_setup` for details).'),
Parameter('dv_range', kind=float, default=0.2,
global_alias='daq_dv_range',
description='Specifies the voltage range for the resistor voltage channel on '
'the DAQ (please refer to :ref:`daq_setup` for details).'),
Parameter('sampling_rate', kind=int, default=10000,
global_alias='daq_sampling_rate',
description='DAQ sampling rate. DAQ will take this many samples each '
'second. Please note that this maybe limitted by your DAQ model '
'and then number of ports you\'re measuring (again, see '
':ref:`daq_setup`)'),
Parameter('resistor_values', kind=list, mandatory=True,
global_alias='daq_resistor_values',
description='The values of resistors (in Ohms) across which the voltages are measured on '
'each port.'),
Parameter('channel_map', kind=list_of_ints, default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
global_alias='daq_channel_map',
description='Represents mapping from logical AI channel number to physical '
'connector on the DAQ (varies between DAQ models). The default '
'assumes DAQ 6363 and similar with AI channels on connectors '
'0-7 and 16-23.'),
Parameter('labels', kind=list_of_strs,
global_alias='daq_labels',
description='List of port labels. If specified, the lenght of the list must match '
'the length of ``resistor_values``. Defaults to "PORT_<pnum>", where '
'"pnum" is the number of the port.'),
Parameter('negative_samples', default='keep', allowed_values=['keep', 'zero', 'drop', 'abs'],
global_alias='daq_negative_samples',
description="""
Specifies how negative power samples should be handled. The following
methods are possible:
:keep: keep them as they are
:zero: turn negative values to zero
:drop: drop samples if they contain negative values. *warning:* this may result in
port files containing different numbers of samples
:abs: take the absoulte value of negave samples
"""),
Parameter('gpio_sync', kind=int, constraint=lambda x: x > 0,
description="""
If specified, the instrument will simultaneously set the
specified GPIO pin high and put a marker into ftrace. This is
to facillitate syncing kernel trace events to DAQ power
trace.
"""),
Parameter('merge_channels', kind=dict_or_bool, default=False,
description="""
If set to ``True``, channels with consecutive letter suffixes will be summed.
e.g. If you have channels A7a, A7b, A7c, A15a, A15b they will be summed to A7, A15
You can also manually specify the name of channels to be merged and the name of the
result like so:
merge_channels:
A15: [A15dvfs, A15ram]
NonCPU: [GPU, RoS, Mem]
In the above exaples the DAQ channels labeled A15a and A15b will be summed together
with the results being saved as 'channel' ''a''. A7, GPU and RoS will be summed to 'c'
""")
]
def initialize(self, context):
status, devices = self._execute_command('list_devices')
if status == daq.Status.OK and not devices:
raise InstrumentError('DAQ: server did not report any devices registered with the driver.')
self._results = OrderedDict()
self.gpio_path = None
if self.gpio_sync:
if not self.device.file_exists(GPIO_ROOT):
raise InstrumentError('GPIO sysfs not enabled on the device.')
try:
export_path = self.device.path.join(GPIO_ROOT, 'export')
self.device.set_sysfile_value(export_path, self.gpio_sync, verify=False)
pin_root = self.device.path.join(GPIO_ROOT, 'gpio{}'.format(self.gpio_sync))
direction_path = self.device.path.join(pin_root, 'direction')
self.device.set_sysfile_value(direction_path, 'out')
self.gpio_path = self.device.path.join(pin_root, 'value')
self.device.set_sysfile_value(self.gpio_path, 0, verify=False)
signal.connect(self.insert_start_marker, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
signal.connect(self.insert_stop_marker, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
except DeviceError as e:
raise InstrumentError('Could not configure GPIO on device: {}'.format(e))
def setup(self, context):
self.logger.debug('Initialising session.')
self._execute_command('configure', config=self.device_config)
def slow_start(self, context):
self.logger.debug('Starting collecting measurements.')
self._execute_command('start')
def slow_stop(self, context):
self.logger.debug('Stopping collecting measurements.')
self._execute_command('stop')
def update_result(self, context): # pylint: disable=R0914
self.logger.debug('Downloading data files.')
output_directory = _d(os.path.join(context.output_directory, 'daq'))
self._execute_command('get_data', output_directory=output_directory)
if self.merge_channels:
self._merge_channels(context)
for entry in os.listdir(output_directory):
context.add_iteration_artifact('DAQ_{}'.format(os.path.splitext(entry)[0]),
path=os.path.join('daq', entry),
kind='data',
description='DAQ power measurments.')
port = os.path.splitext(entry)[0]
path = os.path.join(output_directory, entry)
key = (context.spec.id, context.spec.label, context.current_iteration)
if key not in self._results:
self._results[key] = {}
temp_file = os.path.join(tempfile.gettempdir(), entry)
writer, wfh = None, None
with open(path) as fh:
if self.negative_samples != 'keep':
wfh = open(temp_file, 'wb')
writer = csv.writer(wfh)
reader = csv.reader(fh)
metrics = reader.next()
if writer:
writer.writerow(metrics)
self._metrics |= set(metrics)
rows = _get_rows(reader, writer, self.negative_samples)
data = zip(*rows)
if writer:
wfh.close()
shutil.move(temp_file, os.path.join(output_directory, entry))
n = len(data[0])
means = [s / n for s in map(sum, data)]
for metric, value in zip(metrics, means):
metric_name = '{}_{}'.format(port, metric)
context.result.add_metric(metric_name, round(value, 3), UNITS[metric])
self._results[key][metric_name] = round(value, 3)
energy = sum(data[metrics.index('power')]) * (self.sampling_rate / 1000000)
context.result.add_metric('{}_energy'.format(port), round(energy, 3), UNITS['energy'])
def teardown(self, context):
self.logger.debug('Terminating session.')
self._execute_command('close')
def finalize(self, context):
if self.gpio_path:
unexport_path = self.device.path.join(GPIO_ROOT, 'unexport')
self.device.set_sysfile_value(unexport_path, self.gpio_sync, verify=False)
def validate(self): # pylint: disable=too-many-branches
if not daq:
raise ImportError(import_error_mesg)
self._results = None
self._metrics = set()
if self.labels:
if len(self.labels) != len(self.resistor_values):
raise ConfigError('Number of DAQ port labels does not match the number of resistor values.')
duplicates = set([x for x in self.labels if self.labels.count(x) > 1])
if len(duplicates) > 0:
raise ConfigError('Duplicate labels: {}'.format(', '.join(duplicates)))
else:
self.labels = ['PORT_{}'.format(i) for i, _ in enumerate(self.resistor_values)]
self.server_config = ServerConfiguration(host=self.server_host,
port=self.server_port)
self.device_config = DeviceConfiguration(device_id=self.device_id,
v_range=self.v_range,
dv_range=self.dv_range,
sampling_rate=self.sampling_rate,
resistor_values=self.resistor_values,
channel_map=self.channel_map,
labels=self.labels)
try:
self.server_config.validate()
self.device_config.validate()
except ConfigurationError, ex:
raise ConfigError('DAQ configuration: ' + ex.message) # Re-raise as a WA error
self.grouped_suffixes = defaultdict(str)
if isinstance(self.merge_channels, bool):
if self.merge_channels:
# Create a dict of potential prefixes and a list of their suffixes
grouped_suffixes = defaultdict(list)
for label in sorted(self.labels):
if len(label) > 1:
grouped_suffixes[label[:-1]].append(label)
# Only merge channels if more than one channel has the same prefix and the prefixes
# are consecutive letters starting with 'a'.
self.label_map = {}
for channel, suffixes in grouped_suffixes.iteritems():
if len(suffixes) > 1:
if "".join([s[-1] for s in suffixes]) in ascii_lowercase[:len(suffixes)]:
self.label_map[channel] = suffixes
elif isinstance(self.merge_channels, dict):
# Check if given channel names match labels
for old_names in self.merge_channels.values():
for name in old_names:
if name not in self.labels:
raise ConfigError("No channel with label {} specified".format(name))
self.label_map = self.merge_channels # pylint: disable=redefined-variable-type
self.merge_channels = True
else: # Should never reach here
raise AssertionError("``merge_channels`` is of invalid type")
def before_overall_results_processing(self, context):
if self._results:
headers = ['id', 'workload', 'iteration']
metrics = ['{}_{}'.format(p, m) for p in self.labels for m in sorted(self._metrics)]
headers += metrics
rows = [headers]
for key, value in self._results.iteritems():
rows.append(list(key) + [value[m] for m in metrics])
outfile = os.path.join(context.output_directory, 'daq_power.csv')
with open(outfile, 'wb') as fh:
writer = csv.writer(fh)
writer.writerows(rows)
def insert_start_marker(self, context):
if self.gpio_path:
command = 'echo DAQ_START_MARKER > {}; echo 1 > {}'.format(TRACE_MARKER_PATH, self.gpio_path)
self.device.execute(command, as_root=self.device.is_rooted)
def insert_stop_marker(self, context):
if self.gpio_path:
command = 'echo DAQ_STOP_MARKER > {}; echo 0 > {}'.format(TRACE_MARKER_PATH, self.gpio_path)
self.device.execute(command, as_root=self.device.is_rooted)
def _execute_command(self, command, **kwargs):
# pylint: disable=E1101
q = Queue()
p = Process(target=_send_daq_command, args=(q, self.server_config, command), kwargs=kwargs)
p.start()
result = q.get()
p.join()
if result.status == daq.Status.OK:
pass # all good
elif result.status == daq.Status.OKISH:
self.logger.debug(result.message)
elif result.status == daq.Status.ERROR:
raise InstrumentError('DAQ: {}'.format(result.message))
else:
raise InstrumentError('DAQ: Unexpected result: {} - {}'.format(result.status, result.message))
return (result.status, result.data)
def _merge_channels(self, context): # pylint: disable=r0914
output_directory = _d(os.path.join(context.output_directory, 'daq'))
for name, labels in self.label_map.iteritems():
summed = None
for label in labels:
path = os.path.join(output_directory, "{}.csv".format(label))
with open(path) as fh:
reader = csv.reader(fh)
metrics = reader.next()
rows = _get_rows(reader, None, self.negative_samples)
if summed:
summed = [[x + y for x, y in zip(a, b)] for a, b in zip(rows, summed)]
else:
summed = rows
output_path = os.path.join(output_directory, "{}.csv".format(name))
with open(output_path, 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerow(metrics)
for row in summed:
writer.writerow(row)
def _send_daq_command(q, *args, **kwargs):
result = daq.execute_command(*args, **kwargs)
q.put(result)
def _get_rows(reader, writer, negative_samples):
rows = []
for row in reader:
row = map(float, row)
if negative_samples == 'keep':
rows.append(row)
elif negative_samples == 'zero':
def nonneg(v):
return v if v >= 0 else 0
rows.append([nonneg(v) for v in row])
elif negative_samples == 'drop':
if all(v >= 0 for v in row):
rows.append(row)
elif negative_samples == 'abs':
rows.append([abs(v) for v in row])
else:
raise AssertionError(negative_samples) # should never get here
if writer:
writer.writerow(row)
return rows
| apache-2.0 |
danielchalef/gensim | gensim/corpora/bleicorpus.py | 68 | 4496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Blei's LDA-C format.
"""
from __future__ import with_statement
from os import path
import logging
from gensim import interfaces, utils
from gensim.corpora import IndexedCorpus
from six.moves import xrange
logger = logging.getLogger('gensim.corpora.bleicorpus')
class BleiCorpus(IndexedCorpus):
"""
Corpus in Blei's LDA-C format.
The corpus is represented as two files: one describing the documents, and another
describing the mapping between words and their ids.
Each document is one line::
N fieldId1:fieldValue1 fieldId2:fieldValue2 ... fieldIdN:fieldValueN
The vocabulary is a file with words, one word per line; word at line K has an
implicit ``id=K``.
"""
def __init__(self, fname, fname_vocab=None):
"""
Initialize the corpus from a file.
`fname_vocab` is the file with vocabulary; if not specified, it defaults to
`fname.vocab`.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s" % fname)
if fname_vocab is None:
fname_base, _ = path.splitext(fname)
fname_dir = path.dirname(fname)
for fname_vocab in [
utils.smart_extension(fname, '.vocab'),
utils.smart_extension(fname, '/vocab.txt'),
utils.smart_extension(fname_base, '.vocab'),
utils.smart_extension(fname_dir, '/vocab.txt'),
]:
if path.exists(fname_vocab):
break
else:
raise IOError('BleiCorpus: could not find vocabulary file')
self.fname = fname
with utils.smart_open(fname_vocab) as fin:
words = [utils.to_unicode(word).rstrip() for word in fin]
self.id2word = dict(enumerate(words))
def __iter__(self):
"""
Iterate over the corpus, returning one sparse vector at a time.
"""
lineno = -1
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
yield self.line2doc(line)
self.length = lineno + 1
def line2doc(self, line):
parts = utils.to_unicode(line).split()
if int(parts[0]) != len(parts) - 1:
raise ValueError("invalid format in %s: %s" % (self.fname, repr(line)))
doc = [part.rsplit(':', 1) for part in parts[1:]]
doc = [(int(p1), float(p2)) for p1, p2 in doc]
return doc
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""
Save a corpus in the LDA-C format.
There are actually two files saved: `fname` and `fname.vocab`, where
`fname.vocab` is the vocabulary file.
This function is automatically called by `BleiCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = 1 + max([-1] + id2word.keys())
logger.info("storing corpus in Blei's LDA-C format into %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
offsets = []
for doc in corpus:
doc = list(doc)
offsets.append(fout.tell())
parts = ["%i:%g" % p for p in doc if abs(p[1]) > 1e-7]
fout.write(utils.to_utf8("%i %s\n" % (len(doc), ' '.join(parts))))
# write out vocabulary, in a format compatible with Blei's topics.py script
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s" % (num_terms, fname_vocab))
with utils.smart_open(fname_vocab, 'wb') as fout:
for featureid in xrange(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
return offsets
def docbyoffset(self, offset):
"""
Return the document stored at file position `offset`.
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
# endclass BleiCorpus
| gpl-3.0 |
40223220/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/draw.py | 603 | 6456 | from javascript import console
from browser import timer
import math
class Queue:
def __init__(self):
self._list=[]
def empty(self):
return len(self._list) == 0
def put(self, element):
self._list.append(element)
def get(self):
if len(self._list) == 0:
raise BaseError
_element=self._list[0]
if len(self._list) == 1:
self._list=[]
else:
self._list=self._list[1:]
return _element
dm={}
def aaline(canvas, color, startpos, endpos, width, outline, blend=1):
#console.log("aaline")
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
_dl=DrawLine(startpos[0], startpos[1], endpos[0], endpos[1], color,
width, outline, speed=10)
dm[canvas].add_line(_dl) #color, startpos, endpos, width, outline)
def aapolygon(canvas, color, coordinates, width, outline, blend=1):
#console.log("aapolygon")
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
_dp=DrawPolygon(coordinates, color, width, outline, speed=10)
dm[canvas].add_polygon(_dp)
def aapolygon_bg(canvas, shape):
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
dm[canvas].add_polygon_bg(shape)
class DrawPolygon:
def __init__(self, coordinates, color, width, outline, speed=10):
self.moveTo=coordinates[0]
self.segments=coordinates[1:]
self.color=color
self.width=width
self.outline=outline
class DrawLine:
def __init__(self, x0, y0, x1, y1, color, width, outline, speed=None):
self._type='LINE'
self._x0=x0
self._x1=x1
self._y0=y0
self._y1=y1
self._speed=speed
self._color=color
self._width=width
self._outline=outline
def get_segments(self):
if self._speed==0: #no animate since speed is 0 (return one segment)
return [{'type': self._type, 'x0':self._x0, 'y0': self._y0,
'x1': self._x1, 'y1': self._y1, 'color': self._color}]
#need to figure out how to translate speed into pixels, etc
#maybe speed is pixels per ms? 10 = 10 pixels per millisecond?
_x=(self._x1 - self._x0)
_x*=_x
_y=(self._y1 - self._y0)
_y*=_y
_distance=math.sqrt(_x + _y)
if _distance < self._speed: # we can do this in one segment
return [{'type': self._type, 'x0':self._x0, 'y0': self._y0,
'x1': self._x1, 'y1': self._y1, 'color': self._color}]
_segments=[]
_num_segments=math.floor(_distance/self._speed)
_pos_x=self._x0
_pos_y=self._y0
_x_diff=self._x1 - self._x0
_y_diff=self._y1 - self._y0
for _i in range(1,_num_segments+1):
_x=self._x0 + _i/_num_segments * _x_diff
_y=self._y0 + _i/_num_segments * _y_diff
_segments.append({'type': 'LINE': 'x0': _pos_x, 'y0': _pos_y,
'x1': _x, 'y1': _y, 'color': self._color})
_pos_x=_x
_pos_y=_y
if _pos_x != self._x1 or _pos_y != self._y1:
_segments.append({'type': 'LINE': 'x0': _pos_x, 'y0': _pos_y,
'x1': _x, 'y1': _y, 'color': self._color})
return _segments
class DrawManager:
def __init__(self, canvas):
self._queue=Queue()
self._canvas=canvas
self._ctx=canvas.getContext('2d')
self._interval=None
self._bg=None #used to capture bg before polygon is drawn
def __del__(self):
if self._interval is not None:
timer.clear_Interval(self._interval)
self._interval=None
del self._queue
def rect_from_shape(self, points):
_width=self._canvas.width
_height=self._canvas.height
_min_x=_width
_max_x=0
_min_y=_height
_max_y=0
for _point in points:
_x, _y = _point
_min_x=min(_min_x, _x)
_min_y=min(_min_y, _y)
_max_x=max(_max_x, _x)
_max_y=max(_max_y, _y)
_w2=_width/2
_h2=_height/2
return math.floor(_min_x-0.5)+_w2, math.floor(_min_y-0.5+_h2), \
math.ceil(_max_x+0.5)+_w2, math.ceil(_max_y+0.5+_h2)
def __interval(self):
if not self._queue.empty():
_dict=self._queue.get()
if _dict['type'] == 'LINE':
self._ctx.beginPath()
self._ctx.moveTo(_dict['x0'], _dict['y0'])
self._ctx.lineTo(_dict['x1'], _dict['y1'])
#if _dict['outline'] is not None:
# self._ctx.strokeStyle=_dict['outline'] #set line color
if _dict['color'] is not None:
self._ctx.fillStyle=_dict['color']
self._ctx.stroke()
elif _dict['type'] == 'POLYGON':
if self._bg is not None:
self._ctx.putImageData(self._bg[0], self._bg[1], self._bg[2])
console.log(self._bg[0])
self._bg=None
self._ctx.beginPath()
_moveTo=_dict['moveTo']
self._ctx.moveTo(_moveTo[0], _moveTo[1])
for _segment in _dict['segments']:
self._ctx.lineTo(_segment[0], _segment[1])
if _dict['width']:
self._ctx.lineWidth=_dict['width']
if _dict['outline']:
self._ctx.strokeStyle=_dict['outline']
if _dict['color']:
self._ctx.fillStyle=_dict['color']
self._ctx.fill()
self._ctx.closePath()
self._ctx.stroke()
elif _dict['type'] == 'POLYGON_BG':
_x0,_y0,_x1,_y1=self.rect_from_shape(_dict['shape'])
console.log(_x0,_y0,_x1, _y1)
self._bg=[]
self._bg.append(self._ctx.getImageData(_x0,_y0,abs(_x1)-abs(_x0),abs(_y1)-abs(_y0)))
self._bg.append(_x0)
self._bg.append(_y0)
def process(self):
self._interval=timer.set_interval(self.__interval, 10)
def add_line(self, dl): #color, startpos, endpos, width, outline, speed=None):
for _segment in dl.get_segments():
self._queue.put(_segment)
def add_polygon(self, dp):
self._queue.put({'type': 'POLYGON', 'moveTo': dp.moveTo,
'segments': dp.segments, 'color': dp.color,
'outline': dp.outline, 'width': dp.width})
def add_polygon_bg(self, shape):
self._queue.put({'type': 'POLYGON_BG', 'shape': shape})
| gpl-3.0 |
BadDNA/anolis | web/env/lib/python2.6/site-packages/pip-0.7.2-py2.6.egg/pip/locations.py | 3 | 1508 | """Locations where we look for configs, install stuff, etc"""
import sys
import os
from distutils import sysconfig
if getattr(sys, 'real_prefix', None):
## FIXME: is build/ a good name?
build_prefix = os.path.join(sys.prefix, 'build')
src_prefix = os.path.join(sys.prefix, 'src')
else:
## FIXME: this isn't a very good default
build_prefix = os.path.join(os.getcwd(), 'build')
src_prefix = os.path.join(os.getcwd(), 'src')
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
bin_py = os.path.join(sys.prefix, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
user_dir = os.environ.get('APPDATA', user_dir) # Use %APPDATA% for roaming
default_storage_dir = os.path.join(user_dir, 'pip')
default_config_file = os.path.join(default_storage_dir, 'pip.ini')
default_log_file = os.path.join(default_storage_dir, 'pip.log')
else:
bin_py = os.path.join(sys.prefix, 'bin')
default_storage_dir = os.path.join(user_dir, '.pip')
default_config_file = os.path.join(default_storage_dir, 'pip.conf')
default_log_file = os.path.join(default_storage_dir, 'pip.log')
# Forcing to use /usr/local/bin for standard Mac OS X framework installs
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/qtconsole/mainwindow.py | 7 | 31388 | """The Qt MainWindow for the QtConsole
This is a tabbed pseudo-terminal of Jupyter sessions, with a menu bar for
common actions.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
import webbrowser
from threading import Thread
from qtconsole.qt import QtGui,QtCore
from qtconsole.usage import gui_reference
def background(f):
"""call a function in a simple thread, to prevent blocking"""
t = Thread(target=f)
t.start()
return t
class MainWindow(QtGui.QMainWindow):
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, app,
confirm_exit=True,
new_frontend_factory=None, slave_frontend_factory=None,
):
""" Create a tabbed MainWindow for managing FrontendWidgets
Parameters
----------
app : reference to QApplication parent
confirm_exit : bool, optional
Whether we should prompt on close of tabs
new_frontend_factory : callable
A callable that returns a new JupyterWidget instance, attached to
its own running kernel.
slave_frontend_factory : callable
A callable that takes an existing JupyterWidget, and returns a new
JupyterWidget instance, attached to the same kernel.
"""
super(MainWindow, self).__init__()
self._kernel_counter = 0
self._app = app
self.confirm_exit = confirm_exit
self.new_frontend_factory = new_frontend_factory
self.slave_frontend_factory = slave_frontend_factory
self.tab_widget = QtGui.QTabWidget(self)
self.tab_widget.setDocumentMode(True)
self.tab_widget.setTabsClosable(True)
self.tab_widget.tabCloseRequested[int].connect(self.close_tab)
self.setCentralWidget(self.tab_widget)
# hide tab bar at first, since we have no tabs:
self.tab_widget.tabBar().setVisible(False)
# prevent focus in tab bar
self.tab_widget.setFocusPolicy(QtCore.Qt.NoFocus)
def update_tab_bar_visibility(self):
""" update visibility of the tabBar depending of the number of tab
0 or 1 tab, tabBar hidden
2+ tabs, tabBar visible
send a self.close if number of tab ==0
need to be called explicitly, or be connected to tabInserted/tabRemoved
"""
if self.tab_widget.count() <= 1:
self.tab_widget.tabBar().setVisible(False)
else:
self.tab_widget.tabBar().setVisible(True)
if self.tab_widget.count()==0 :
self.close()
@property
def next_kernel_id(self):
"""constantly increasing counter for kernel IDs"""
c = self._kernel_counter
self._kernel_counter += 1
return c
@property
def active_frontend(self):
return self.tab_widget.currentWidget()
def create_tab_with_new_frontend(self):
"""create a new frontend and attach it to a new tab"""
widget = self.new_frontend_factory()
self.add_tab_with_frontend(widget)
def create_tab_with_current_kernel(self):
"""create a new frontend attached to the same kernel as the current tab"""
current_widget = self.tab_widget.currentWidget()
current_widget_index = self.tab_widget.indexOf(current_widget)
current_widget_name = self.tab_widget.tabText(current_widget_index)
widget = self.slave_frontend_factory(current_widget)
if 'slave' in current_widget_name:
# don't keep stacking slaves
name = current_widget_name
else:
name = '(%s) slave' % current_widget_name
self.add_tab_with_frontend(widget,name=name)
def close_tab(self,current_tab):
""" Called when you need to try to close a tab.
It takes the number of the tab to be closed as argument, or a reference
to the widget inside this tab
"""
# let's be sure "tab" and "closing widget" are respectively the index
# of the tab to close and a reference to the frontend to close
if type(current_tab) is not int :
current_tab = self.tab_widget.indexOf(current_tab)
closing_widget=self.tab_widget.widget(current_tab)
# when trying to be closed, widget might re-send a request to be
# closed again, but will be deleted when event will be processed. So
# need to check that widget still exists and skip if not. One example
# of this is when 'exit' is sent in a slave tab. 'exit' will be
# re-sent by this function on the master widget, which ask all slave
# widgets to exit
if closing_widget is None:
return
#get a list of all slave widgets on the same kernel.
slave_tabs = self.find_slave_widgets(closing_widget)
keepkernel = None #Use the prompt by default
if hasattr(closing_widget,'_keep_kernel_on_exit'): #set by exit magic
keepkernel = closing_widget._keep_kernel_on_exit
# If signal sent by exit magic (_keep_kernel_on_exit, exist and not None)
# we set local slave tabs._hidden to True to avoid prompting for kernel
# restart when they get the signal. and then "forward" the 'exit'
# to the main window
if keepkernel is not None:
for tab in slave_tabs:
tab._hidden = True
if closing_widget in slave_tabs:
try :
self.find_master_tab(closing_widget).execute('exit')
except AttributeError:
self.log.info("Master already closed or not local, closing only current tab")
self.tab_widget.removeTab(current_tab)
self.update_tab_bar_visibility()
return
kernel_client = closing_widget.kernel_client
kernel_manager = closing_widget.kernel_manager
if keepkernel is None and not closing_widget._confirm_exit:
# don't prompt, just terminate the kernel if we own it
# or leave it alone if we don't
keepkernel = closing_widget._existing
if keepkernel is None: #show prompt
if kernel_client and kernel_client.channels_running:
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if closing_widget._may_close:
msg = "You are closing the tab : "+'"'+self.tab_widget.tabText(current_tab)+'"'
info = "Would you like to quit the Kernel and close all attached Consoles as well?"
justthis = QtGui.QPushButton("&No, just this Tab", self)
justthis.setShortcut('N')
closeall = QtGui.QPushButton("&Yes, close all", self)
closeall.setShortcut('Y')
# allow ctrl-d ctrl-d exit, like in terminal
closeall.setShortcut('Ctrl+D')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(justthis, QtGui.QMessageBox.NoRole)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
if reply == 1: # close All
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
kernel_manager.shutdown_kernel()
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
elif reply == 0: # close Console
if not closing_widget._existing:
# Have kernel: don't quit, just close the tab
closing_widget.execute("exit True")
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else:
reply = QtGui.QMessageBox.question(self, title,
"Are you sure you want to close this Console?"+
"\nThe Kernel and other Consoles will remain active.",
okay|cancel,
defaultButton=okay
)
if reply == okay:
self.tab_widget.removeTab(current_tab)
elif keepkernel: #close console but leave kernel running (no prompt)
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else: #close console and kernel (no prompt)
self.tab_widget.removeTab(current_tab)
if kernel_client and kernel_client.channels_running:
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
if kernel_manager:
kernel_manager.shutdown_kernel()
background(kernel_client.stop_channels)
self.update_tab_bar_visibility()
def add_tab_with_frontend(self,frontend,name=None):
""" insert a tab with a given frontend in the tab bar, and give it a name
"""
if not name:
name = 'kernel %i' % self.next_kernel_id
self.tab_widget.addTab(frontend,name)
self.update_tab_bar_visibility()
self.make_frontend_visible(frontend)
frontend.exit_requested.connect(self.close_tab)
def next_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()+1))
def prev_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()-1))
def make_frontend_visible(self,frontend):
widget_index=self.tab_widget.indexOf(frontend)
if widget_index > 0 :
self.tab_widget.setCurrentIndex(widget_index)
def find_master_tab(self,tab,as_list=False):
"""
Try to return the frontend that owns the kernel attached to the given widget/tab.
Only finds frontend owned by the current application. Selection
based on port of the kernel might be inaccurate if several kernel
on different ip use same port number.
This function does the conversion tabNumber/widget if needed.
Might return None if no master widget (non local kernel)
Will crash if more than 1 masterWidget
When asList set to True, always return a list of widget(s) owning
the kernel. The list might be empty or containing several Widget.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate to be the owner of the kernel does have all the same port of the curent widget
# And should have a _may_close attribute
filtered_widget_list = [ widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file and
hasattr(widget,'_may_close') ]
# the master widget is the one that may close the kernel
master_widget= [ widget for widget in filtered_widget_list if widget._may_close]
if as_list:
return master_widget
assert(len(master_widget)<=1 )
if len(master_widget)==0:
return None
return master_widget[0]
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list
# Populate the menu bar with common actions and shortcuts
def add_menu_action(self, menu, action, defer_shortcut=False):
"""Add action to menu as well as self
So that when the menu bar is invisible, its actions are still available.
If defer_shortcut is True, set the shortcut context to widget-only,
where it will avoid conflict with shortcuts already bound to the
widgets themselves.
"""
menu.addAction(action)
self.addAction(action)
if defer_shortcut:
action.setShortcutContext(QtCore.Qt.WidgetShortcut)
def init_menu_bar(self):
#create menu in the order they should appear in the menu bar
self.init_file_menu()
self.init_edit_menu()
self.init_view_menu()
self.init_kernel_menu()
self.init_window_menu()
self.init_help_menu()
def init_file_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
self.new_kernel_tab_act = QtGui.QAction("New Tab with &New kernel",
self,
shortcut="Ctrl+T",
triggered=self.create_tab_with_new_frontend)
self.add_menu_action(self.file_menu, self.new_kernel_tab_act)
self.slave_kernel_tab_act = QtGui.QAction("New Tab with Sa&me kernel",
self,
shortcut="Ctrl+Shift+T",
triggered=self.create_tab_with_current_kernel)
self.add_menu_action(self.file_menu, self.slave_kernel_tab_act)
self.file_menu.addSeparator()
self.close_action=QtGui.QAction("&Close Tab",
self,
shortcut=QtGui.QKeySequence.Close,
triggered=self.close_active_frontend
)
self.add_menu_action(self.file_menu, self.close_action)
self.export_action=QtGui.QAction("&Save to HTML/XHTML",
self,
shortcut=QtGui.QKeySequence.Save,
triggered=self.export_action_active_frontend
)
self.add_menu_action(self.file_menu, self.export_action, True)
self.file_menu.addSeparator()
printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print)
if printkey.matches("Ctrl+P") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
printkey = "Ctrl+Shift+P"
self.print_action = QtGui.QAction("&Print",
self,
shortcut=printkey,
triggered=self.print_action_active_frontend)
self.add_menu_action(self.file_menu, self.print_action, True)
if sys.platform != 'darwin':
# OSX always has Quit in the Application menu, only add it
# to the File menu elsewhere.
self.file_menu.addSeparator()
self.quit_action = QtGui.QAction("&Quit",
self,
shortcut=QtGui.QKeySequence.Quit,
triggered=self.close,
)
self.add_menu_action(self.file_menu, self.quit_action)
def init_edit_menu(self):
self.edit_menu = self.menuBar().addMenu("&Edit")
self.undo_action = QtGui.QAction("&Undo",
self,
shortcut=QtGui.QKeySequence.Undo,
statusTip="Undo last action if possible",
triggered=self.undo_active_frontend
)
self.add_menu_action(self.edit_menu, self.undo_action)
self.redo_action = QtGui.QAction("&Redo",
self,
shortcut=QtGui.QKeySequence.Redo,
statusTip="Redo last action if possible",
triggered=self.redo_active_frontend)
self.add_menu_action(self.edit_menu, self.redo_action)
self.edit_menu.addSeparator()
self.cut_action = QtGui.QAction("&Cut",
self,
shortcut=QtGui.QKeySequence.Cut,
triggered=self.cut_active_frontend
)
self.add_menu_action(self.edit_menu, self.cut_action, True)
self.copy_action = QtGui.QAction("&Copy",
self,
shortcut=QtGui.QKeySequence.Copy,
triggered=self.copy_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_action, True)
self.copy_raw_action = QtGui.QAction("Copy (&Raw Text)",
self,
shortcut="Ctrl+Shift+C",
triggered=self.copy_raw_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_raw_action, True)
self.paste_action = QtGui.QAction("&Paste",
self,
shortcut=QtGui.QKeySequence.Paste,
triggered=self.paste_active_frontend
)
self.add_menu_action(self.edit_menu, self.paste_action, True)
self.edit_menu.addSeparator()
selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll)
if selectall.matches("Ctrl+A") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
selectall = "Ctrl+Shift+A"
self.select_all_action = QtGui.QAction("Select &All",
self,
shortcut=selectall,
triggered=self.select_all_active_frontend
)
self.add_menu_action(self.edit_menu, self.select_all_action, True)
def init_view_menu(self):
self.view_menu = self.menuBar().addMenu("&View")
if sys.platform != 'darwin':
# disable on OSX, where there is always a menu bar
self.toggle_menu_bar_act = QtGui.QAction("Toggle &Menu Bar",
self,
shortcut="Ctrl+Shift+M",
statusTip="Toggle visibility of menubar",
triggered=self.toggle_menu_bar)
self.add_menu_action(self.view_menu, self.toggle_menu_bar_act)
fs_key = "Ctrl+Meta+F" if sys.platform == 'darwin' else "F11"
self.full_screen_act = QtGui.QAction("&Full Screen",
self,
shortcut=fs_key,
statusTip="Toggle between Fullscreen and Normal Size",
triggered=self.toggleFullScreen)
self.add_menu_action(self.view_menu, self.full_screen_act)
self.view_menu.addSeparator()
self.increase_font_size = QtGui.QAction("Zoom &In",
self,
shortcut=QtGui.QKeySequence.ZoomIn,
triggered=self.increase_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.increase_font_size, True)
self.decrease_font_size = QtGui.QAction("Zoom &Out",
self,
shortcut=QtGui.QKeySequence.ZoomOut,
triggered=self.decrease_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.decrease_font_size, True)
self.reset_font_size = QtGui.QAction("Zoom &Reset",
self,
shortcut="Ctrl+0",
triggered=self.reset_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.reset_font_size, True)
self.view_menu.addSeparator()
self.clear_action = QtGui.QAction("&Clear Screen",
self,
shortcut='Ctrl+L',
statusTip="Clear the console",
triggered=self.clear_active_frontend)
self.add_menu_action(self.view_menu, self.clear_action)
self.pager_menu = self.view_menu.addMenu("&Pager")
hsplit_action = QtGui.QAction(".. &Horizontal Split",
self,
triggered=lambda: self.set_paging_active_frontend('hsplit'))
vsplit_action = QtGui.QAction(" : &Vertical Split",
self,
triggered=lambda: self.set_paging_active_frontend('vsplit'))
inside_action = QtGui.QAction(" &Inside Pager",
self,
triggered=lambda: self.set_paging_active_frontend('inside'))
self.pager_menu.addAction(hsplit_action)
self.pager_menu.addAction(vsplit_action)
self.pager_menu.addAction(inside_action)
def init_kernel_menu(self):
self.kernel_menu = self.menuBar().addMenu("&Kernel")
# Qt on OSX maps Ctrl to Cmd, and Meta to Ctrl
# keep the signal shortcuts to ctrl, rather than
# platform-default like we do elsewhere.
ctrl = "Meta" if sys.platform == 'darwin' else "Ctrl"
self.interrupt_kernel_action = QtGui.QAction("&Interrupt current Kernel",
self,
triggered=self.interrupt_kernel_active_frontend,
shortcut=ctrl+"+C",
)
self.add_menu_action(self.kernel_menu, self.interrupt_kernel_action)
self.restart_kernel_action = QtGui.QAction("&Restart current Kernel",
self,
triggered=self.restart_kernel_active_frontend,
shortcut=ctrl+"+.",
)
self.add_menu_action(self.kernel_menu, self.restart_kernel_action)
self.kernel_menu.addSeparator()
self.confirm_restart_kernel_action = QtGui.QAction("&Confirm kernel restart",
self,
checkable=True,
checked=self.active_frontend.confirm_restart,
triggered=self.toggle_confirm_restart_active_frontend
)
self.add_menu_action(self.kernel_menu, self.confirm_restart_kernel_action)
self.tab_widget.currentChanged.connect(self.update_restart_checkbox)
def init_window_menu(self):
self.window_menu = self.menuBar().addMenu("&Window")
if sys.platform == 'darwin':
# add min/maximize actions to OSX, which lacks default bindings.
self.minimizeAct = QtGui.QAction("Mini&mize",
self,
shortcut="Ctrl+m",
statusTip="Minimize the window/Restore Normal Size",
triggered=self.toggleMinimized)
# maximize is called 'Zoom' on OSX for some reason
self.maximizeAct = QtGui.QAction("&Zoom",
self,
shortcut="Ctrl+Shift+M",
statusTip="Maximize the window/Restore Normal Size",
triggered=self.toggleMaximized)
self.add_menu_action(self.window_menu, self.minimizeAct)
self.add_menu_action(self.window_menu, self.maximizeAct)
self.window_menu.addSeparator()
prev_key = "Ctrl+Shift+Left" if sys.platform == 'darwin' else "Ctrl+PgUp"
self.prev_tab_act = QtGui.QAction("Pre&vious Tab",
self,
shortcut=prev_key,
statusTip="Select previous tab",
triggered=self.prev_tab)
self.add_menu_action(self.window_menu, self.prev_tab_act)
next_key = "Ctrl+Shift+Right" if sys.platform == 'darwin' else "Ctrl+PgDown"
self.next_tab_act = QtGui.QAction("Ne&xt Tab",
self,
shortcut=next_key,
statusTip="Select next tab",
triggered=self.next_tab)
self.add_menu_action(self.window_menu, self.next_tab_act)
def init_help_menu(self):
# please keep the Help menu in Mac Os even if empty. It will
# automatically contain a search field to search inside menus and
# please keep it spelled in English, as long as Qt Doesn't support
# a QAction.MenuRole like HelpMenuRole otherwise it will lose
# this search field functionality
self.help_menu = self.menuBar().addMenu("&Help")
# Help Menu
self.help_action = QtGui.QAction("Show &QtConsole help", self,
triggered=self._show_help)
self.online_help_action = QtGui.QAction("Open online &help", self,
triggered=self._open_online_help)
self.add_menu_action(self.help_menu, self.help_action)
self.add_menu_action(self.help_menu, self.online_help_action)
def _set_active_frontend_focus(self):
# this is a hack, self.active_frontend._control seems to be
# a private member. Unfortunately this is the only method
# to set focus reliably
QtCore.QTimer.singleShot(200, self.active_frontend._control.setFocus)
# minimize/maximize/fullscreen actions:
def toggle_menu_bar(self):
menu_bar = self.menuBar()
if menu_bar.isVisible():
menu_bar.setVisible(False)
else:
menu_bar.setVisible(True)
def toggleMinimized(self):
if not self.isMinimized():
self.showMinimized()
else:
self.showNormal()
def _show_help(self):
self.active_frontend._page(gui_reference)
def _open_online_help(self):
filename="http://ipython.org/ipython-doc/stable/index.html"
webbrowser.open(filename, new=1, autoraise=True)
def toggleMaximized(self):
if not self.isMaximized():
self.showMaximized()
else:
self.showNormal()
# Min/Max imizing while in full screen give a bug
# when going out of full screen, at least on OSX
def toggleFullScreen(self):
if not self.isFullScreen():
self.showFullScreen()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(False)
self.minimizeAct.setEnabled(False)
else:
self.showNormal()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(True)
self.minimizeAct.setEnabled(True)
def set_paging_active_frontend(self, paging):
self.active_frontend._set_paging(paging)
def close_active_frontend(self):
self.close_tab(self.active_frontend)
def restart_kernel_active_frontend(self):
self.active_frontend.request_restart_kernel()
def interrupt_kernel_active_frontend(self):
self.active_frontend.request_interrupt_kernel()
def toggle_confirm_restart_active_frontend(self):
widget = self.active_frontend
widget.confirm_restart = not widget.confirm_restart
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def update_restart_checkbox(self):
if self.active_frontend is None:
return
widget = self.active_frontend
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def clear_active_frontend(self):
self.active_frontend.clear()
def cut_active_frontend(self):
widget = self.active_frontend
if widget.can_cut():
widget.cut()
def copy_active_frontend(self):
widget = self.active_frontend
widget.copy()
def copy_raw_active_frontend(self):
self.active_frontend._copy_raw_action.trigger()
def paste_active_frontend(self):
widget = self.active_frontend
if widget.can_paste():
widget.paste()
def undo_active_frontend(self):
self.active_frontend.undo()
def redo_active_frontend(self):
self.active_frontend.redo()
def print_action_active_frontend(self):
self.active_frontend.print_action.trigger()
def export_action_active_frontend(self):
self.active_frontend.export_action.trigger()
def select_all_active_frontend(self):
self.active_frontend.select_all_action.trigger()
def increase_font_size_active_frontend(self):
self.active_frontend.increase_font_size.trigger()
def decrease_font_size_active_frontend(self):
self.active_frontend.decrease_font_size.trigger()
def reset_font_size_active_frontend(self):
self.active_frontend.reset_font_size.trigger()
#---------------------------------------------------------------------------
# QWidget interface
#---------------------------------------------------------------------------
def closeEvent(self, event):
""" Forward the close event to every tabs contained by the windows
"""
if self.tab_widget.count() == 0:
# no tabs, just close
event.accept()
return
# Do Not loop on the widget count as it change while closing
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
accept_role = QtGui.QMessageBox.AcceptRole
if self.confirm_exit:
if self.tab_widget.count() > 1:
msg = "Close all tabs, stop all kernels, and Quit?"
else:
msg = "Close console, stop kernel, and Quit?"
info = "Kernels not started here (e.g. notebooks) will be left alone."
closeall = QtGui.QPushButton("&Quit", self)
closeall.setShortcut('Q')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
else:
reply = okay
if reply == cancel:
event.ignore()
return
if reply == okay or reply == accept_role:
while self.tab_widget.count() >= 1:
# prevent further confirmations:
widget = self.active_frontend
widget._confirm_exit = False
self.close_tab(widget)
event.accept()
| gpl-3.0 |
CSC-ORG/Dynamic-Dashboard-2015 | engine/lib/python2.7/site-packages/django/contrib/contenttypes/tests/tests.py | 12 | 10973 | from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.shortcuts import get_current_site
from django.core.management import call_command
from django.http import HttpRequest, Http404
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import override_system_checks
from django.utils import six
from .models import ConcreteModel, ProxyModel, FooWithoutUrl, FooWithUrl, FooWithBrokenAbsoluteUrl
class ContentTypesTests(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
def tearDown(self):
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ProxyModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel,
for_concrete_model=False))
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel,
for_concrete_model=False))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel))
self.assertEqual(proxy_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel,
for_concrete_model=False))
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
})
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
})
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
})
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
})
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % get_current_site(request).domain,
response._headers.get("location")[1])
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
self.assertRaises(Http404, shortcut, request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
Check that the shortcut view does not catch an AttributeError raised
by the model's get_absolute_url method.
Refs #8997.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
self.assertRaises(AttributeError, shortcut, request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Ensures that displaying content types in admin (or anywhere) doesn't
break on leftover content type records in the DB for which no model
is defined anymore.
"""
ct = ContentType.objects.create(
name='Old model',
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(six.text_type(ct), 'Old model')
self.assertIsNone(ct.model_class())
# Make sure stale ContentTypes can be fetched like any other object.
# Before Django 1.6 this caused a NoneType error in the caching mechanism.
# Instead, just return the ContentType object and let the app detect stale states.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
class MigrateTests(TestCase):
@skipUnlessDBFeature('can_rollback_ddl')
@override_system_checks([])
def test_unmigrating_first_migration_post_migrate_signal(self):
"""
#24075 - When unmigrating an app before its first migration,
post_migrate signal handler must be aware of the missing tables.
"""
try:
with override_settings(
INSTALLED_APPS=["django.contrib.contenttypes"],
MIGRATION_MODULES={'contenttypes': 'django.contrib.contenttypes.migrations'},
):
call_command("migrate", "contenttypes", "zero", verbosity=0)
finally:
call_command("migrate", verbosity=0)
| mit |
koichi626/hadoop-gpu | hadoop-gpu-0.20.1/build/contrib/hod/testing/testModule.py | 182 | 2187 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
excludes = ['test_MINITEST3']
# All test-case classes should have the naming convention test_.*
class test_MINITEST1(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class test_MINITEST2(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class test_MINITEST3(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class ModuleTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunModuleTests():
# modulename_suite
suite = ModuleTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunModuleTests()
| apache-2.0 |
sonnyhu/numpy | numpy/lib/info.py | 61 | 6353 | """
Basic functions used by several sub-packages and
useful to have in the main name-space.
Type Handling
-------------
================ ===================
iscomplexobj Test for complex object, scalar result
isrealobj Test for real object, scalar result
iscomplex Test for complex elements, array result
isreal Test for real elements, array result
imag Imaginary part
real Real part
real_if_close Turns complex number with tiny imaginary part to real
isneginf Tests for negative infinity, array result
isposinf Tests for positive infinity, array result
isnan Tests for nans, array result
isinf Tests for infinity, array result
isfinite Tests for finite numbers, array result
isscalar True if argument is a scalar
nan_to_num Replaces NaN's with 0 and infinities with large numbers
cast Dictionary of functions to force cast to each type
common_type Determine the minimum common type code for a group
of arrays
mintypecode Return minimal allowed common typecode.
================ ===================
Index Tricks
------------
================ ===================
mgrid Method which allows easy construction of N-d
'mesh-grids'
``r_`` Append and construct arrays: turns slice objects into
ranges and concatenates them, for 2d arrays appends rows.
index_exp Konrad Hinsen's index_expression class instance which
can be useful for building complicated slicing syntax.
================ ===================
Useful Functions
----------------
================ ===================
select Extension of where to multiple conditions and choices
extract Extract 1d array from flattened array according to mask
insert Insert 1d array of values into Nd array according to mask
linspace Evenly spaced samples in linear space
logspace Evenly spaced samples in logarithmic space
fix Round x to nearest integer towards zero
mod Modulo mod(x,y) = x % y except keeps sign of y
amax Array maximum along axis
amin Array minimum along axis
ptp Array max-min along axis
cumsum Cumulative sum along axis
prod Product of elements along axis
cumprod Cumluative product along axis
diff Discrete differences along axis
angle Returns angle of complex argument
unwrap Unwrap phase along given axis (1-d algorithm)
sort_complex Sort a complex-array (based on real, then imaginary)
trim_zeros Trim the leading and trailing zeros from 1D array.
vectorize A class that wraps a Python function taking scalar
arguments into a generalized function which can handle
arrays of arguments using the broadcast rules of
numerix Python.
================ ===================
Shape Manipulation
------------------
================ ===================
squeeze Return a with length-one dimensions removed.
atleast_1d Force arrays to be > 1D
atleast_2d Force arrays to be > 2D
atleast_3d Force arrays to be > 3D
vstack Stack arrays vertically (row on row)
hstack Stack arrays horizontally (column on column)
column_stack Stack 1D arrays as columns into 2D array
dstack Stack arrays depthwise (along third dimension)
stack Stack arrays along a new axis
split Divide array into a list of sub-arrays
hsplit Split into columns
vsplit Split into rows
dsplit Split along third dimension
================ ===================
Matrix (2D Array) Manipulations
-------------------------------
================ ===================
fliplr 2D array with columns flipped
flipud 2D array with rows flipped
rot90 Rotate a 2D array a multiple of 90 degrees
eye Return a 2D array with ones down a given diagonal
diag Construct a 2D array from a vector, or return a given
diagonal from a 2D array.
mat Construct a Matrix
bmat Build a Matrix from blocks
================ ===================
Polynomials
-----------
================ ===================
poly1d A one-dimensional polynomial class
poly Return polynomial coefficients from roots
roots Find roots of polynomial given coefficients
polyint Integrate polynomial
polyder Differentiate polynomial
polyadd Add polynomials
polysub Substract polynomials
polymul Multiply polynomials
polydiv Divide polynomials
polyval Evaluate polynomial at given argument
================ ===================
Import Tricks
-------------
================ ===================
ppimport Postpone module import until trying to use it
ppimport_attr Postpone module import until trying to use its attribute
ppresolve Import postponed module and return it.
================ ===================
Machine Arithmetics
-------------------
================ ===================
machar_single Single precision floating point arithmetic parameters
machar_double Double precision floating point arithmetic parameters
================ ===================
Threading Tricks
----------------
================ ===================
ParallelExec Execute commands in parallel thread.
================ ===================
1D Array Set Operations
-----------------------
Set operations for 1D numeric arrays based on sort() function.
================ ===================
ediff1d Array difference (auxiliary function).
unique Unique elements of an array.
intersect1d Intersection of 1D arrays with unique elements.
setxor1d Set exclusive-or of 1D arrays with unique elements.
in1d Test whether elements in a 1D array are also present in
another array.
union1d Union of 1D arrays with unique elements.
setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
"""
from __future__ import division, absolute_import, print_function
depends = ['core', 'testing']
global_symbols = ['*']
| bsd-3-clause |
fillycheezstake/MissionPlanner | ExtLibs/Mavlink/mavgen.py | 34 | 3007 | #!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a python implementation
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
def mavgen(opts, args) :
"""Generate mavlink message formatters and parsers (C and Python ) using options
and args where args are a list of xml files. This function allows python
scripts under Windows to control mavgen using the same interface as
shell scripts under Unix"""
import sys, textwrap, os
import mavparse
import mavgen_python
import mavgen_c
import mavgen_csharp
xml = []
for fname in args:
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# expand includes
for x in xml[:]:
for i in x.include:
fname = os.path.join(os.path.dirname(x.filename), i)
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# include message lengths and CRCs too
for idx in range(0, 256):
if x.message_lengths[idx] == 0:
x.message_lengths[idx] = xml[-1].message_lengths[idx]
x.message_crcs[idx] = xml[-1].message_crcs[idx]
x.message_names[idx] = xml[-1].message_names[idx]
# work out max payload size across all includes
largest_payload = 0
for x in xml:
if x.largest_payload > largest_payload:
largest_payload = x.largest_payload
for x in xml:
x.largest_payload = largest_payload
if mavparse.check_duplicates(xml):
sys.exit(1)
print("Found %u MAVLink message types in %u XML files" % (
mavparse.total_msgs(xml), len(xml)))
if opts.language == 'python':
mavgen_python.generate(opts.output, xml)
elif opts.language == 'C':
mavgen_c.generate(opts.output, xml)
elif opts.language == 'csharp':
mavgen_csharp.generate(opts.output, xml)
else:
print("Unsupported language %s" % opts.language)
if __name__=="__main__":
import sys, textwrap, os
from optparse import OptionParser
# allow import from the parent directory, where mavutil.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import mavparse
import mavgen_python
import mavgen_c
parser = OptionParser("mavgen.py [options] <XML files>")
parser.add_option("-o", "--output", dest="output", default="mavlink", help="output base name")
parser.add_option("--lang", dest="language", default="python", help="language to generate")
parser.add_option("--wire-protocol", dest="wire_protocol", default=mavparse.PROTOCOL_0_9, help="wire protocol version")
(opts, args) = parser.parse_args()
if len(args) < 1:
parser.error("You must supply at least one MAVLink XML protocol definition")
mavgen(opts, args)
| gpl-3.0 |
SebastianLloret/Clever-Bot | libpasteurize/fixes/fix_kwargs.py | 61 | 6008 | u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template %{u'name':name, u'kwargs':new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template %{u'name':name, u'default':default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(_if_template %{u'assign':_assign_template %{u'name':name, u'kwargs':new_kwargs}, u'name':name, u'kwargs':new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
| gpl-3.0 |
jazkarta/edx-platform | common/lib/xmodule/xmodule/vertical_block.py | 56 | 5689 | """
VerticalBlock - an XBlock which renders its children in a column.
"""
import logging
from copy import copy
from lxml import etree
from xblock.core import XBlock
from xblock.fragment import Fragment
from xmodule.mako_module import MakoTemplateBlockBase
from xmodule.progress import Progress
from xmodule.seq_module import SequenceFields
from xmodule.studio_editable import StudioEditableBlock
from xmodule.x_module import STUDENT_VIEW, XModuleFields
from xmodule.xml_module import XmlParserMixin
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
CLASS_PRIORITY = ['video', 'problem']
class VerticalBlock(SequenceFields, XModuleFields, StudioEditableBlock, XmlParserMixin, MakoTemplateBlockBase, XBlock):
"""
Layout XBlock for rendering subblocks vertically.
"""
mako_template = 'widgets/sequence-edit.html'
js_module_name = "VerticalBlock"
has_children = True
show_in_read_only_mode = True
def student_view(self, context):
"""
Renders the student view of the block in the LMS.
"""
fragment = Fragment()
contents = []
child_context = {} if not context else copy(context)
child_context['child_of_vertical'] = True
# pylint: disable=no-member
for child in self.get_display_items():
rendered_child = child.render(STUDENT_VIEW, child_context)
fragment.add_frag_resources(rendered_child)
contents.append({
'id': child.location.to_deprecated_string(),
'content': rendered_child.content
})
fragment.add_content(self.system.render_template('vert_module.html', {
'items': contents,
'xblock_context': context,
}))
return fragment
def author_view(self, context):
"""
Renders the Studio preview view, which supports drag and drop.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location # pylint: disable=no-member
# For the container page we want the full drag-and-drop, but for unit pages we want
# a more concise version that appears alongside the "View =>" link-- unless it is
# the unit page and the vertical being rendered is itself the unit vertical (is_root == True).
if is_root or not context.get('is_unit_page'):
self.render_children(context, fragment, can_reorder=True, can_add=True)
return fragment
def get_progress(self):
"""
Returns the progress on this block and all children.
"""
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def get_icon_class(self):
"""
Returns the highest priority icon class.
"""
child_classes = set(child.get_icon_class() for child in self.get_children())
new_class = 'other'
for higher_class in CLASS_PRIORITY:
if higher_class in child_classes:
new_class = higher_class
return new_class
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode')) # pylint: disable=no-member
children.append(child_block.scope_ids.usage_id)
except Exception as exc: # pylint: disable=broad-except
log.exception("Unable to load child when parsing Vertical. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(exc))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('vertical') # pylint: disable=no-member
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
Gather all fields which can't be edited.
"""
non_editable_fields = super(VerticalBlock, self).non_editable_metadata_fields
non_editable_fields.extend([
self.fields['due'],
])
return non_editable_fields
def studio_view(self, context):
fragment = super(VerticalBlock, self).studio_view(context)
# This continues to use the old XModuleDescriptor javascript code to enabled studio editing.
# TODO: Remove this when studio better supports editing of pure XBlocks.
fragment.add_javascript('VerticalBlock = XModule.Descriptor;')
return fragment
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(VerticalBlock, self).index_dictionary()
index_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(index_body)
else:
xblock_body["content"] = index_body
# We use "Sequence" for sequentials and verticals
xblock_body["content_type"] = "Sequence"
return xblock_body
| agpl-3.0 |
jonathonwalz/ansible | lib/ansible/modules/storage/zfs/zpool_facts.py | 69 | 6653 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zpool_facts
short_description: Gather facts about ZFS pools.
description:
- Gather facts from ZFS pool properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS pool name.
aliases: [ "pool", "zpool" ]
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zpool(1M) man page.
aliases: [ "props" ]
default: all
required: false
'''
EXAMPLES = '''
# Gather facts about ZFS pool rpool
zpool_facts: pool=rpool
# Gather space usage about all imported ZFS pools
zpool_facts: properties='free,size'
debug: msg='ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
with_items: '{{ ansible_zfs_pools }}'
'''
RETURN = '''
name:
description: ZFS pool name
returned: always
type: string
sample: rpool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
zfs_pools:
description: ZFS pool facts
returned: always
type: string
sample:
{
"allocated": "3.46G",
"altroot": "-",
"autoexpand": "off",
"autoreplace": "off",
"bootfs": "rpool/ROOT/openindiana",
"cachefile": "-",
"capacity": "6%",
"comment": "-",
"dedupditto": "0",
"dedupratio": "1.00x",
"delegation": "on",
"expandsize": "-",
"failmode": "wait",
"feature@async_destroy": "enabled",
"feature@bookmarks": "enabled",
"feature@edonr": "enabled",
"feature@embedded_data": "active",
"feature@empty_bpobj": "active",
"feature@enabled_txg": "active",
"feature@extensible_dataset": "enabled",
"feature@filesystem_limits": "enabled",
"feature@hole_birth": "active",
"feature@large_blocks": "enabled",
"feature@lz4_compress": "active",
"feature@multi_vdev_crash_dump": "enabled",
"feature@sha512": "enabled",
"feature@skein": "enabled",
"feature@spacemap_histogram": "active",
"fragmentation": "3%",
"free": "46.3G",
"freeing": "0",
"guid": "15729052870819522408",
"health": "ONLINE",
"leaked": "0",
"listsnapshots": "off",
"name": "rpool",
"readonly": "off",
"size": "49.8G",
"version": "-"
}
'''
import os
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
class ZPoolFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self._pools = defaultdict(dict)
self.facts = []
def pool_exists(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
if self.name:
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
pool, property, value = line.split('\t')
self._pools[pool].update({property: value})
for k, v in iteritems(self._pools):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_pools': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
),
supports_check_mode=True
)
zpool_facts = ZPoolFacts(module)
result = {}
result['changed'] = False
result['name'] = zpool_facts.name
if zpool_facts.parsable:
result['parsable'] = zpool_facts.parsable
if zpool_facts.name is not None:
if zpool_facts.pool_exists():
result['ansible_facts'] = zpool_facts.get_facts()
else:
module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
else:
result['ansible_facts'] = zpool_facts.get_facts()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
zzicewind/nova | nova/tests/unit/objects/test_virt_cpu_topology.py | 94 | 1397 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import objects
from nova.tests.unit.objects import test_objects
_top_dict = {
'sockets': 2,
'cores': 4,
'threads': 8
}
class _TestVirtCPUTopologyObject(object):
def test_object_from_dict(self):
top_obj = objects.VirtCPUTopology.from_dict(_top_dict)
self.compare_obj(top_obj, _top_dict)
def test_object_to_dict(self):
top_obj = objects.VirtCPUTopology()
top_obj.sockets = 2
top_obj.cores = 4
top_obj.threads = 8
spec = top_obj.to_dict()
self.assertEqual(_top_dict, spec)
class TestVirtCPUTopologyObject(test_objects._LocalTest,
_TestVirtCPUTopologyObject):
pass
class TestRemoteVirtCPUTopologyObject(test_objects._RemoteTest,
_TestVirtCPUTopologyObject):
pass
| apache-2.0 |
ChristianKniep/QNIB | serverfiles/usr/local/lib/networkx-1.6/build/lib/networkx/algorithms/tests/test_product.py | 3 | 10011 | import networkx as nx
from networkx import tensor_product,cartesian_product,lexicographic_product,strong_product
from nose.tools import assert_raises, assert_true, assert_equal
def test_tensor_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,tensor_product,G,H)
def test_tensor_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=tensor_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=tensor_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_tensor_product_size():
P5 = nx.path_graph(5)
K3 = nx.complete_graph(3)
K5 = nx.complete_graph(5)
G=tensor_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=tensor_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
def test_tensor_product_classic_result():
K2 = nx.complete_graph(2)
G = nx.petersen_graph()
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.desargues_graph()))
G = nx.cycle_graph(5)
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.cycle_graph(10)))
G = nx.tetrahedral_graph()
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
def test_tensor_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = tensor_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if H.has_edge(u_H,v_H) and G.has_edge(u_G,v_G):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_cartesian_product_multigraph():
G=nx.MultiGraph()
G.add_edge(1,2,key=0)
G.add_edge(1,2,key=1)
H=nx.MultiGraph()
H.add_edge(3,4,key=0)
H.add_edge(3,4,key=1)
GH=cartesian_product(G,H)
assert_equal( set(GH) , set([(1, 3), (2, 3), (2, 4), (1, 4)]))
assert_equal( set(GH.edges(keys=True)) ,
set([((1, 3), (2, 3), 0), ((1, 3), (2, 3), 1),
((1, 3), (1, 4), 0), ((1, 3), (1, 4), 1),
((2, 3), (2, 4), 0), ((2, 3), (2, 4), 1),
((2, 4), (1, 4), 0), ((2, 4), (1, 4), 1)]))
def test_cartesian_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,cartesian_product,G,H)
def test_cartesian_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=cartesian_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=cartesian_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_cartesian_product_size():
# order(GXH)=order(G)*order(H)
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3=nx.complete_graph(3)
G=cartesian_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
assert_equal(nx.number_of_edges(G),
nx.number_of_edges(P5)*nx.number_of_nodes(K3)+
nx.number_of_edges(K3)*nx.number_of_nodes(P5))
G=cartesian_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
assert_equal(nx.number_of_edges(G),
nx.number_of_edges(K5)*nx.number_of_nodes(K3)+
nx.number_of_edges(K3)*nx.number_of_nodes(K5))
def test_cartesian_product_classic():
# test some classic product graphs
P2 = nx.path_graph(2)
P3 = nx.path_graph(3)
# cube = 2-path X 2-path
G=cartesian_product(P2,P2)
G=cartesian_product(P2,G)
assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
# 3x3 grid
G=cartesian_product(P3,P3)
assert_true(nx.is_isomorphic(G,nx.grid_2d_graph(3,3)))
def test_cartesian_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = cartesian_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if (u_G==v_G and H.has_edge(u_H,v_H)) or \
(u_H==v_H and G.has_edge(u_G,v_G)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_lexicographic_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,lexicographic_product,G,H)
def test_lexicographic_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=lexicographic_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=lexicographic_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_lexicographic_product_size():
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3=nx.complete_graph(3)
G=lexicographic_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=lexicographic_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
#No classic easily found classic results for lexicographic product
def test_lexicographic_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = lexicographic_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if G.has_edge(u_G,v_G) or (u_G==v_G and H.has_edge(u_H,v_H)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_strong_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,strong_product,G,H)
def test_strong_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=strong_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=strong_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_strong_product_size():
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3 = nx.complete_graph(3)
G=strong_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=strong_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
#No classic easily found classic results for strong product
def test_strong_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = strong_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if (u_G==v_G and H.has_edge(u_H,v_H)) or \
(u_H==v_H and G.has_edge(u_G,v_G)) or \
(G.has_edge(u_G,v_G) and H.has_edge(u_H,v_H)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
| gpl-2.0 |
tafaRU/odoo | addons/l10n_multilang/__init__.py | 438 | 1082 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
import l10n_multilang
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shiora/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/whoosh/lang/porter2.py | 117 | 8314 | """An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in stemming.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y') and len(word) > 1:
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| gpl-2.0 |
nxnfufunezn/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_static.py | 139 | 2863 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cStringIO import StringIO
from ..backends import static
# There aren't many tests here because it turns out to be way more convenient to
# use test_serializer for the majority of cases
class TestStatic(unittest.TestCase):
def parse(self, input_str):
return self.parser.parse(StringIO(input_str))
def compile(self, input_text, input_data):
return static.compile(input_text, input_data)
def test_get_0(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 2})
self.assertEquals(manifest.get("key"), "value")
children = list(item for item in manifest.iterchildren())
self.assertEquals(len(children), 1)
section = children[0]
self.assertEquals(section.name, "Heading 1")
self.assertEquals(section.get("other_key"), "value_2")
self.assertEquals(section.get("key"), "value")
def test_get_1(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 3})
children = list(item for item in manifest.iterchildren())
section = children[0]
self.assertEquals(section.get("other_key"), "value_3")
def test_get_3(self):
data = """key:
if a == "1": value_1
if a[0] == "ab"[0]: value_2
"""
manifest = self.compile(data, {"a": "1"})
self.assertEquals(manifest.get("key"), "value_1")
manifest = self.compile(data, {"a": "ac"})
self.assertEquals(manifest.get("key"), "value_2")
def test_get_4(self):
data = """key:
if not a: value_1
value_2
"""
manifest = self.compile(data, {"a": True})
self.assertEquals(manifest.get("key"), "value_2")
manifest = self.compile(data, {"a": False})
self.assertEquals(manifest.get("key"), "value_1")
def test_api(self):
data = """key:
if a == 1.5: value_1
value_2
key_1: other_value
"""
manifest = self.compile(data, {"a": 1.5})
self.assertFalse(manifest.is_empty)
self.assertEquals(manifest.root, manifest)
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
def test_is_empty_1(self):
data = """
[Section]
[Subsection]
"""
manifest = self.compile(data, {})
self.assertTrue(manifest.is_empty)
| mpl-2.0 |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/tests/test_cli_mgmt_databoxedge.py | 1 | 16519 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 49
# Methods Covered : 49
# Examples Total : 49
# Examples Tested : 49
# Coverage % : 100
# ----------------------
import unittest
import azure.mgmt.databoxedge
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtDataBoxEdgeTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtDataBoxEdgeTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.databoxedge.DataBoxEdgeManagementClient
)
@unittest.skip("skip test")
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_databoxedge(self, resource_group):
SERVICE_NAME = "myapimrndxyz"
DATA_BOX_EDGE_DEVICE_NAME = "mydivicename"
USER_NAME = "username"
ROLE_NAME = "rolename"
SHARE_NAME = "sharename"
ORDER_NAME = "ordername"
TRIGGER_NAME = "triggername"
STORAGE_ACCOUNT_NAME = "storageaccountname"
STORAGE_ACCOUNT_CREDENTIAL_NAME = "storageaccountcredentialname"
BANDWIDTH_SCHEDULE_NAME = "bandwidthschedulename"
CONTAINER_NAME = "containername"
OPERATIONS_STATUS_NAME = "operationsstatusname"
NETWORK_SETTING_NAME = "networksettingname"
UPDATE_SUMMARY_NAME = "updatesummaryname"
ALERT_NAME = "alertname"
JOB_NAME = "jobname"
SECURITY_SETTING_NAME = "securitysettingname"
# DataBoxEdgeDevicePut[put]
BODY = {
"location": "eastus",
"sku": {
"name": "Edge",
"tier": "Standard"
}
}
result = self.mgmt_client.devices.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, BODY, resource_group.name)
result = result.result()
"""
# UserPut[put]
BODY = {
"encrypted_password": {
"value": "Password@1",
"encryption_algorithm": "None",
"encryption_cert_thumbprint": "blah"
},
"share_access_rights": []
}
result = self.mgmt_client.users.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, USER_NAME, BODY, resource_group.name)
result = result.result()
# RolePut[put]
BODY = {
"kind": "IOT",
"host_platform": "Linux",
"io_tdevice_details": {
"device_id": "iotdevice",
"io_thost_hub": "iothub.azure-devices.net",
"authentication": {
"symmetric_key": {
"connection_string": {
"value": "Encrypted<<HostName=iothub.azure-devices.net;DeviceId=iotDevice;SharedAccessKey=2C750FscEas3JmQ8Bnui5yQWZPyml0/UiRt1bQwd8=>>",
"encryption_cert_thumbprint": "348586569999244",
"encryption_algorithm": "AES256"
}
}
}
},
"io_tedge_device_details": {
"device_id": "iotEdge",
"io_thost_hub": "iothub.azure-devices.net",
"authentication": {
"symmetric_key": {
"connection_string": {
"value": "Encrypted<<HostName=iothub.azure-devices.net;DeviceId=iotEdge;SharedAccessKey=2C750FscEas3JmQ8Bnui5yQWZPyml0/UiRt1bQwd8=>>",
"encryption_cert_thumbprint": "1245475856069999244",
"encryption_algorithm": "AES256"
}
}
}
},
"share_mappings": [],
"role_status": "Enabled"
}
result = self.mgmt_client.roles.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, ROLE_NAME, BODY, resource_group.name)
result = result.result()
# SharePut[put]
BODY = {
"description": "",
"share_status": "Online",
"monitoring_status": "Enabled",
"azure_container_info": {
"storage_account_credential_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/storageAccountCredentials/" + STORAGE_ACCOUNT_CREDENTIAL_NAME + "",
"container_name": "testContainerSMB",
"data_format": "BlockBlob"
},
"access_protocol": "SMB",
"user_access_rights": [
{
"user_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + resource_group.name + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/users/" + USER_NAME + "",
"access_type": "Change"
}
],
"data_policy": "Cloud"
}
result = self.mgmt_client.shares.create_or_update(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME, BODY)
result = result.result()
# OrderPut[put]
BODY = {
"contact_information": {
"contact_person": "John Mcclane",
"company_name": "Microsoft",
"phone": "(800) 426-9400",
"email_list": [
"[email protected]"
]
},
"shipping_address": {
"address_line1": "Microsoft Corporation",
"address_line2": "One Microsoft Way",
"address_line3": "Redmond",
"postal_code": "98052",
"city": "WA",
"state": "WA",
"country": "USA"
}
}
result = self.mgmt_client.orders.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, BODY, resource_group.name)
result = result.result()
# TriggerPut[put]
BODY = {
"properties": {
"custom_context_tag": "CustomContextTags-1235346475",
"source_info": {
"share_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/shares/" + SHARE_NAME + ""
},
"sink_info": {
"role_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/" + DATA_BOX_EDGE_DEVICE_NAME + "/roles/" + ROLE_NAME + ""
}
},
"kind": "FileEvent"
}
result = self.mgmt_client.triggers.create_or_update(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, TRIGGER_NAME, BODY)
result = result.result()
# BandwidthSchedulePut[put]
BODY = {
"start": "0:0:0",
"stop": "13:59:0",
"rate_in_mbps": "100",
"days": [
"Sunday",
"Monday"
]
}
result = self.mgmt_client.bandwidth_schedules.create_or_update(DATA_BOX_EDGE_DEVICE_NAME, BANDWIDTH_SCHEDULE_NAME, BODY, resource_group.name)
result = result.result()
# SACPut[put]
BODY = {
"properties": {
"alias": "sac1",
"user_name": "cisbvt",
"account_key": {
"value": "lAeZEYi6rNP1/EyNaVUYmTSZEYyaIaWmwUsGwek0+xiZj54GM9Ue9/UA2ed/ClC03wuSit2XzM/cLRU5eYiFBwks23rGwiQOr3sruEL2a74EjPD050xYjA6M1I2hu/w2yjVHhn5j+DbXS4Xzi+rHHNZK3DgfDO3PkbECjPck+PbpSBjy9+6Mrjcld5DIZhUAeMlMHrFlg+WKRKB14o/og56u5/xX6WKlrMLEQ+y6E18dUwvWs2elTNoVO8PBE8SM/CfooX4AMNvaNdSObNBPdP+F6Lzc556nFNWXrBLRt0vC7s9qTiVRO4x/qCNaK/B4y7IqXMllwQFf4Np9UQ2ECA==",
"encryption_cert_thumbprint": "2A9D8D6BE51574B5461230AEF02F162C5F01AD31",
"encryption_algorithm": "AES256"
},
"ssl_status": "Disabled",
"account_type": "BlobStorage"
}
}
result = self.mgmt_client.storage_account_credentials.create_or_update(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, STORAGE_ACCOUNT_CREDENTIAL_NAME, BODY)
result = result.result()
# SACGet[get]
result = self.mgmt_client.storage_account_credentials.get(DATA_BOX_EDGE_DEVICE_NAME, STORAGE_ACCOUNT_CREDENTIAL_NAME, resource_group.name)
# BandwidthScheduleGet[get]
result = self.mgmt_client.bandwidth_schedules.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, BANDWIDTH_SCHEDULE_NAME)
# OperationsStatusGet[get]
result = self.mgmt_client.operations_status.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, OPERATIONS_STATUS_NAME)
# NetworkSettingsGet[get]
result = self.mgmt_client.devices.get_network_settings(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, NETWORK_SETTING_NAME)
# UpdateSummaryGet[get]
result = self.mgmt_client.devices.get_update_summary(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, UPDATE_SUMMARY_NAME)
# TriggerGet[get]
result = self.mgmt_client.triggers.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, TRIGGER_NAME)
# SACGetAllInDevice[get]
result = self.mgmt_client.storage_account_credentials.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# AlertGet[get]
result = self.mgmt_client.alerts.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ALERT_NAME)
# ShareGet[get]
result = self.mgmt_client.shares.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME)
# OrderGet[get]
result = self.mgmt_client.orders.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ORDER_NAME)
# UserGet[get]
result = self.mgmt_client.users.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, USER_NAME)
# RoleGet[get]
result = self.mgmt_client.roles.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ROLE_NAME)
# JobsGet[get]
result = self.mgmt_client.jobs.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, JOB_NAME)
# BandwidthScheduleGetAllInDevice[get]
result = self.mgmt_client.bandwidth_schedules.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# TriggerGetAllInDevice[get]
result = self.mgmt_client.triggers.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# OrderGetAllInDevice[get]
result = self.mgmt_client.orders.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# AlertGetAllInDevice[get]
result = self.mgmt_client.alerts.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# ShareGetAllInDevice[get]
result = self.mgmt_client.shares.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# NodesGetAllInDevice[get]
result = self.mgmt_client.nodes.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# RoleGetAllInDevice[get]
result = self.mgmt_client.roles.list_by_data_box_edge_device(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# DataBoxEdgeDeviceGetByName[get]
result = self.mgmt_client.devices.get(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# DataBoxEdgeDeviceGetByResourceGroup[get]
result = self.mgmt_client.devices.list_by_resource_group(resource_group.name)
# DataBoxEdgeDeviceGetBySubscription[get]
result = self.mgmt_client.devices.list_by_subscription()
# OperationsGet[get]
result = self.mgmt_client.operations.list()
# CreateOrUpdateSecuritySettings[post]
BODY = {
"properties": {
"device_admin_password": {
"value": "jJ5MvXa/AEWvwxviS92uCjatCXeyLYTy8jx/k105MjQRXT7i6Do8qpEcQ8d+OBbwmQTnwKW0CYyzzVRCc0uZcPCf6PsWtP4l6wvcKGAP66PwK68eEkTUOmp+wUHc4hk02kWmTWeAjBZkuDBP3xK1RnZo95g2RE4i1UgKNP5BEKCLd71O104DW3AWW41mh9XLWNOaxw+VjQY7wmvlE6XkvpkMhcGuha2u7lx8zi9ZkcMvJVYDYK36Fb/K3KhBAmDjjDmVq04jtBlcSTXQObt0nlj4BwGGtdrpeIpr67zqr5i3cPm6e6AleIaIhp6sI/uyGSMiT3oev2eg49u2ii7kVA==",
"encryption_algorithm": "AES256",
"encryption_cert_thumbprint": "7DCBDFC44ED968D232C9A998FC105B5C70E84BE0"
}
}
}
result = self.mgmt_client.devices.create_or_update_security_settings(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SECURITY_SETTING_NAME, BODY)
result = result.result()
# ShareRefreshPost[post]
result = self.mgmt_client.shares.refresh(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME)
result = result.result()
# ExtendedInfoPost[post]
result = self.mgmt_client.devices.get_extended_information(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
# UploadCertificatePost[post]
BODY = {
"properties": {
"certificate": "MIIC9DCCAdygAwIBAgIQWJae7GNjiI9Mcv/gJyrOPTANBgkqhkiG9w0BAQUFADASMRAwDgYDVQQDDAdXaW5kb3dzMB4XDTE4MTEyNzAwMTA0NVoXDTIxMTEyODAwMTA0NVowEjEQMA4GA1UEAwwHV2luZG93czCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKxkRExqxf0qH1avnyORptIbRC2yQwqe3EIbJ2FPKr5jtAppGeX/dGKrFSnX+7/0HFr77aJHafdpEAtOiLyJ4zCAVs0obZCCIq4qJdmjYUTU0UXH/w/YzXfQA0d9Zh9AN+NJBX9xj05NzgsT24fkgsK2v6mWJQXT7YcWAsl5sEYPnx1e+MrupNyVSL/RUJmrS+etJSysHtFeWRhsUhVAs1DD5ExJvBLU3WH0IsojEvpXcjrutB5/MDQNrd/StGI6WovoSSPH7FyT9tgERx+q+Yg3YUGzfaIPCctlrRGehcdtzdNoKd0rsX62yCq0U6POoSfwe22NJu41oAUMd7e6R8cCAwEAAaNGMEQwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFDd0VxnS3LnMIfwc7xW4b4IZWG5GMA4GA1UdDwEB/wQEAwIFIDANBgkqhkiG9w0BAQUFAAOCAQEAPQRby2u9celvtvL/DLEb5Vt3/tPStRQC5MyTD62L5RT/q8E6EMCXVZNkXF5WlWucLJi/18tY+9PNgP9xWLJh7kpSWlWdi9KPtwMqKDlEH8L2TnQdjimt9XuiCrTnoFy/1X2BGLY/rCaUJNSd15QCkz2xeW+Z+YSk2GwAc/A/4YfNpqSIMfNuPrT76o02VdD9WmJUA3fS/HY0sU9qgQRS/3F5/0EPS+HYQ0SvXCK9tggcCd4O050ytNBMJC9qMOJ7yE0iOrFfOJSCfDAuPhn/rHFh79Kn1moF+/CE+nc0/2RPiLC8r54/rt5dYyyxJDfXg0a3VrrX39W69WZGW5OXiw=="
}
}
result = self.mgmt_client.devices.upload_certificate(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, BODY)
# DownloadUpdatesPost[post]
result = self.mgmt_client.devices.download_updates(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
result = result.result()
# ScanForUpdatesPost[post]
result = self.mgmt_client.devices.scan_for_updates(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
result = result.result()
# InstallUpdatesPost[post]
result = self.mgmt_client.devices.install_updates(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME)
result = result.result()
"""
# DataBoxEdgeDevicePatch[patch]
BODY = {
"tags": {
"key1": "value1",
"key2": "value2"
}
}
result = self.mgmt_client.devices.update(DATA_BOX_EDGE_DEVICE_NAME, BODY, resource_group.name)
"""
# SACDelete[delete]
result = self.mgmt_client.storage_account_credentials.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, STORAGE_ACCOUNT_CREDENTIAL_NAME)
result = result.result()
# BandwidthScheduleDelete[delete]
result = self.mgmt_client.bandwidth_schedules.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, BANDWIDTH_SCHEDULE_NAME)
result = result.result()
# TriggerDelete[delete]
result = self.mgmt_client.triggers.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, TRIGGER_NAME)
result = result.result()
# ShareDelete[delete]
result = self.mgmt_client.shares.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, SHARE_NAME)
result = result.result()
# OrderDelete[delete]
result = self.mgmt_client.orders.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ORDER_NAME)
result = result.result()
# UserDelete[delete]
result = self.mgmt_client.users.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, USER_NAME)
result = result.result()
# RoleDelete[delete]
result = self.mgmt_client.roles.delete(resource_group.name, DATA_BOX_EDGE_DEVICE_NAME, ROLE_NAME)
result = result.result()
"""
# DataBoxEdgeDeviceDelete[delete]
result = self.mgmt_client.devices.delete(DATA_BOX_EDGE_DEVICE_NAME, resource_group.name)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| mit |
dmacvicar/spacewalk | client/solaris/smartpm/smart/channels/rpm_md_info.py | 6 | 1125 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart import _
kind = "package"
name = _("RPM MetaData")
description = _("""
Repository created with the rpm-metadata project.
""")
fields = [("baseurl", _("Base URL"), str, None,
_("URL where repodata/ subdirectory is found"))]
| gpl-2.0 |
nicolargo/intellij-community | python/helpers/docutils/parsers/rst/languages/sv.py | 57 | 3988 | # $Id: sv.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Adam Chodorowski <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'observera': 'attention',
u'caution (translation required)': 'caution',
u'fara': 'danger',
u'fel': 'error',
u'v\u00e4gledning': 'hint',
u'viktigt': 'important',
u'notera': 'note',
u'tips': 'tip',
u'varning': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u'\u00e4mne': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'mellanrubrik': 'rubric',
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
# u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions',
# u'vanliga-fr\u00e5gor': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'meta': 'meta',
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image',
u'figur': 'figure',
u'inkludera': 'include',
u'r\u00e5': 'raw', # FIXME: Translation might be too literal.
u'ers\u00e4tt': 'replace',
u'unicode': 'unicode',
u'datum': 'date',
u'class (translation required)': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'inneh\u00e5ll': 'contents',
u'sektionsnumrering': 'sectnum',
u'target-notes (translation required)': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
}
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abbreviation (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'index (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'r\u00e5': 'raw',}
"""Mapping of Swedish role names to canonical role names for interpreted text.
"""
| apache-2.0 |
MrLoick/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/integration/POP3_TLS.py | 271 | 5466 | """TLS Lite + poplib."""
import socket
from poplib import POP3
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# POP TLS PORT
POP3_TLS_PORT = 995
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
### New code below (all else copied from poplib)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
###
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
| apache-2.0 |
hkemmel/tal | affichage.py | 1 | 2209 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 14:34:25 2017
@author: manfred.madelaine
"""
import time
def affStart():
msg1 = "*** Binvenue dans i-Opinion ou Opinion Way ***"
msg2 = "Le logiciel d'analyse et de classification des revues cinématographiques !"
listMsg = []
listMsg.append("")
listMsg.append(msg1)
listMsg.append("")
listMsg.append(msg2)
listMsg.append("")
print(affBox(listMsg, 1, 1, len(msg2)))
delai()
def affEnd():
msg1 = "*** Opinion Way vous remercie de votre viste, à bientôt ! ***"
msg = []
msg.append(msg1)
box = affBox(msg, 1, 1, len(msg1)-1)
print(box)
def affMessage(msg):
deb = "\n\t--- "
fin = " ---\n\n"
print(deb + msg + fin)
delai()
def delai():
time.sleep(0.8)
"""
Affiche un message dans une boite
msg : message à afficher
x : décalage horizontal
y : décalage vertical
L : largeur de la boite
"""
def affBox(msg, x, y, L):
box = ""
#décalage vertical
box += multChaine("\n", y)
indiceLine = 0
#gestion d'une ligne
for txt in msg:
#bord suppérieur
if(indiceLine == 0):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
#décalage horizontal
box += "\n" + multChaine("\t", x)
esp = ""
mult = 1
#message
if(len(txt) < L ):
esp = " "
mult = (L - len(txt)) / 2
box += "| " + multChaine(esp, mult) + txt + multChaine(esp, mult) + " |"
#bord inférieur
if(indiceLine == len(msg) - 1 ):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
indiceLine += 1
box+="\n"
return(box)
def affErr():
affMessage("Votre réponse est incorrecte !")
def multChaine(chaine, mult):
i = 0
msg = ""
while i < mult:
msg += chaine
i += 1
return msg
| gpl-3.0 |
wangyikai/linux | tools/perf/scripts/python/net_dropmonitor.py | 1812 | 1749 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
LuizGsa21/p4-conference-central | models.py | 1 | 7226 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
import datetime
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty(default='')
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.KeyProperty(kind='Conference', repeated=True)
wishList = ndb.KeyProperty(kind='Session', repeated=True)
def toForm(self):
form = ProfileForm(
displayName=self.displayName,
mainEmail=self.mainEmail,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize),
conferenceKeysToAttend=[key.urlsafe() for key in self.conferenceKeysToAttend]
)
form.check_initialized()
return form
def toMiniForm(self):
form = ProfileMiniForm(
displayName=self.displayName,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize)
)
form.check_initialized()
return form
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class Conference(ndb.Model):
"""Conference -- Conference object"""
required_fields_schema = ('name', 'organizerUserId', 'startDate', 'endDate')
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty(required=True)
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty(required=True)
month = ndb.IntegerProperty()
endDate = ndb.DateProperty(required=True)
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
@property
def sessions(self):
return Session.query(ancestor=self.key)
def toForm(self, display_name=''):
form = ConferenceForm(
websafeKey=self.key.urlsafe(),
name=self.name,
description=self.description,
organizerUserId=self.organizerUserId,
topics=self.topics,
city=self.city,
startDate=self.startDate.strftime('%Y-%m-%d'),
month=self.month,
endDate=self.endDate.strftime('%Y-%m-%d'),
maxAttendees=self.maxAttendees,
seatsAvailable=self.seatsAvailable,
organizerDisplayName=display_name
)
form.check_initialized()
return form
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class Speaker(ndb.Model):
"""Speaker -- Speaker object"""
name = ndb.StringProperty(required=True)
class Session(ndb.Model):
"""Session -- Session object"""
required_fields_schema = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime')
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StructuredProperty(modelclass=Speaker, required=True)
duration = ndb.IntegerProperty(required=True)
typeOfSession = ndb.StringProperty(required=True)
date = ndb.DateProperty(required=True)
startTime = ndb.TimeProperty(required=True)
def toForm(self):
form = SessionForm(
websafeKey=self.key.urlsafe(),
name=self.name,
highlights=self.highlights,
speaker=self.speaker.name,
duration=self.duration,
typeOfSession=self.typeOfSession,
date=self.date.strftime('%Y-%m-%d'),
startTime=self.startTime.strftime('%H:%M')
)
form.check_initialized()
return form
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
websafeKey = messages.StringField(1)
name = messages.StringField(2)
highlights = messages.StringField(3)
speaker = messages.StringField(4)
duration = messages.IntegerField(5)
typeOfSession = messages.StringField(6)
date = messages.StringField(7)
startTime = messages.StringField(8)
class SessionForms(messages.Message):
"""SessionForm -- multiple SessionForm outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound form message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
| apache-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/axis.py | 4 | 85084 | """
Classes for the ticks and x and y axis
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
import numpy as np
import warnings
GRIDLINE_INTERPOLATION_STEPS = 180
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size=None, # points
width=None,
color=None,
tickdir=None,
pad=None,
labelsize=None,
labelcolor=None,
zorder=None,
gridOn=None, # defaults to axes.grid depending on
# axes.grid.which
tick1On=True,
tick2On=True,
label1On=True,
label2On=False,
major=True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in points
"""
artist.Artist.__init__(self)
if gridOn is None:
if major and (rcParams['axes.grid.which'] in ('both', 'major')):
gridOn = rcParams['axes.grid']
elif (not major) and (rcParams['axes.grid.which']
in ('both', 'minor')):
gridOn = rcParams['axes.grid']
else:
gridOn = False
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
self._name = name
self._loc = loc
if size is None:
if major:
size = rcParams['%s.major.size' % name]
else:
size = rcParams['%s.minor.size' % name]
self._size = size
if width is None:
if major:
width = rcParams['%s.major.width' % name]
else:
width = rcParams['%s.minor.width' % name]
self._width = width
if color is None:
color = rcParams['%s.color' % name]
self._color = color
if pad is None:
if major:
pad = rcParams['%s.major.pad' % name]
else:
pad = rcParams['%s.minor.pad' % name]
self._base_pad = pad
if labelcolor is None:
labelcolor = rcParams['%s.color' % name]
self._labelcolor = labelcolor
if labelsize is None:
labelsize = rcParams['%s.labelsize' % name]
self._labelsize = labelsize
if zorder is None:
if major:
zorder = mlines.Line2D.zorder + 0.01
else:
zorder = mlines.Line2D.zorder
self._zorder = zorder
self.apply_tickdir(tickdir)
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def apply_tickdir(self, tickdir):
"""
Calculate self._pad and self._tickmarkers
"""
pass
def get_tickdir(self):
return self._tickdir
def get_tick_padding(self):
"""
Get the length of the tick outside of the axes.
"""
padding = {
'in': 0.0,
'inout': 0.5,
'out': 1.0
}
return self._size * padding[self._tickdir]
def get_children(self):
children = [self.tick1line, self.tick2line,
self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
self.gridline.set_clip_path(clippath, transform)
self.stale = True
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._base_pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
return False, {}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._apply_params(pad=val)
self.stale = True
def get_pad(self):
'Get the value of the tick label pad in points'
return self._base_pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
self.stale = False
return
renderer.open_group(self.__name__)
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
self.stale = False
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
self.stale = True
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
self.stale = True
def _set_artist_props(self, a):
a.set_figure(self.figure)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def _apply_params(self, **kw):
switchkw = ['gridOn', 'tick1On', 'tick2On', 'label1On', 'label2On']
switches = [k for k in kw if k in switchkw]
for k in switches:
setattr(self, k, kw.pop(k))
newmarker = [k for k in kw if k in ['size', 'width', 'pad', 'tickdir']]
if newmarker:
self._size = kw.pop('size', self._size)
# Width could be handled outside this block, but it is
# convenient to leave it here.
self._width = kw.pop('width', self._width)
self._base_pad = kw.pop('pad', self._base_pad)
# apply_tickdir uses _size and _base_pad to make _pad,
# and also makes _tickmarkers.
self.apply_tickdir(kw.pop('tickdir', self._tickdir))
self.tick1line.set_marker(self._tickmarkers[0])
self.tick2line.set_marker(self._tickmarkers[1])
for line in (self.tick1line, self.tick2line):
line.set_markersize(self._size)
line.set_markeredgewidth(self._width)
# _get_text1_transform uses _pad from apply_tickdir.
trans = self._get_text1_transform()[0]
self.label1.set_transform(trans)
trans = self._get_text2_transform()[0]
self.label2.set_transform(trans)
tick_kw = dict([kv for kv in six.iteritems(kw)
if kv[0] in ['color', 'zorder']])
if tick_kw:
self.tick1line.set(**tick_kw)
self.tick2line.set(**tick_kw)
for k, v in six.iteritems(tick_kw):
setattr(self, '_' + k, v)
label_list = [k for k in six.iteritems(kw)
if k[0] in ['labelsize', 'labelcolor']]
if label_list:
label_kw = dict([(k[5:], v) for (k, v) in label_list])
self.label1.set(**label_kw)
self.label2.set(**label_kw)
for k, v in six.iteritems(label_kw):
# for labelsize the text objects covert str ('small')
# -> points. grab the integer from the `Text` object
# instead of saving the string representation
v = getattr(self.label1, 'get_' + k)()
setattr(self, '_label' + k, v)
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
raise NotImplementedError('Derived must override')
def _get_text1_transform(self):
raise NotImplementedError('Derived must override')
def _get_text2_transform(self):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
elif self._tickdir == 'inout':
self._tickmarkers = ('|', '|')
else:
self._tickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._pad = self._base_pad + self.get_tick_padding()
self.stale = True
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,), color=self._color,
linestyle='None', marker=self._tickmarkers[0],
markersize=self._size,
markeredgewidth=self._width, zorder=self._zorder)
l.set_transform(self.axes.get_xaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(1,),
color=self._color,
linestyle='None',
marker=self._tickmarkers[1],
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_xaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
alpha=rcParams['grid.alpha'],
markersize=0)
l.set_transform(self.axes.get_xaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
elif self._tickdir == 'inout':
self._tickmarkers = ('_', '_')
else:
self._tickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = self._base_pad + self.get_tick_padding()
self.stale = True
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D((0,), (0,),
color=self._color,
marker=self._tickmarkers[0],
linestyle='None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_yaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D((1,), (0,),
color=self._color,
marker=self._tickmarkers[1],
linestyle='None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_yaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D(xdata=(0, 1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
alpha=rcParams['grid.alpha'],
markersize=0)
l.set_transform(self.axes.get_yaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y(y)
if self.label2On:
self.label2.set_y(y)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
class Ticker(object):
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`axes.transData` - transform data coords to display coords
* :attr:`axes.transAxes` - transform axis coords to display coords
* :attr:`labelpad` - number of points between the axis and its label
"""
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)" % tuple(self.axes.transAxes.transform_point((0, 0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry()
self._autolabelpos = True
self._smart_bounds = False
self.label = self._get_label()
self.labelpad = rcParams['axes.labelpad']
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
# Initialize here for testing; later add API
self._major_tick_kw = dict()
self._minor_tick_kw = dict()
self.cla()
self._set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
self.stale = True
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def _set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label, self.offsetText]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
self.set_label_text('')
self._set_artist_props(self.label)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry()
# whether the grids are on
self._gridOnMajor = (rcParams['axes.grid'] and
rcParams['axes.grid.which'] in ('both', 'major'))
self._gridOnMinor = (rcParams['axes.grid'] and
rcParams['axes.grid.which'] in ('both', 'minor'))
self.label.set_text('')
self._set_artist_props(self.label)
self.reset_ticks()
self.converter = None
self.units = None
self.set_units(None)
self.stale = True
def reset_ticks(self):
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
def set_tick_params(self, which='major', reset=False, **kw):
"""
Set appearance parameters for ticks and ticklabels.
For documentation of keyword arguments, see
:meth:`matplotlib.axes.Axes.tick_params`.
"""
dicts = []
if which == 'major' or which == 'both':
dicts.append(self._major_tick_kw)
if which == 'minor' or which == 'both':
dicts.append(self._minor_tick_kw)
kwtrans = self._translate_tick_kw(kw, to_init_kw=True)
for d in dicts:
if reset:
d.clear()
d.update(kwtrans)
if reset:
self.reset_ticks()
else:
if which == 'major' or which == 'both':
for tick in self.majorTicks:
tick._apply_params(**self._major_tick_kw)
if which == 'minor' or which == 'both':
for tick in self.minorTicks:
tick._apply_params(**self._minor_tick_kw)
if 'labelcolor' in kwtrans:
self.offsetText.set_color(kwtrans['labelcolor'])
self.stale = True
@staticmethod
def _translate_tick_kw(kw, to_init_kw=True):
# We may want to move the following function to
# a more visible location; or maybe there already
# is something like this.
def _bool(arg):
if cbook.is_string_like(arg):
if arg.lower() == 'on':
return True
if arg.lower() == 'off':
return False
raise ValueError('String "%s" should be "on" or "off"' % arg)
return bool(arg)
# The following lists may be moved to a more
# accessible location.
kwkeys0 = ['size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On']
kwkeys1 = ['length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop']
kwkeys = kwkeys0 + kwkeys1
kwtrans = dict()
if to_init_kw:
if 'length' in kw:
kwtrans['size'] = kw.pop('length')
if 'direction' in kw:
kwtrans['tickdir'] = kw.pop('direction')
if 'left' in kw:
kwtrans['tick1On'] = _bool(kw.pop('left'))
if 'bottom' in kw:
kwtrans['tick1On'] = _bool(kw.pop('bottom'))
if 'right' in kw:
kwtrans['tick2On'] = _bool(kw.pop('right'))
if 'top' in kw:
kwtrans['tick2On'] = _bool(kw.pop('top'))
if 'labelleft' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelleft'))
if 'labelbottom' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelbottom'))
if 'labelright' in kw:
kwtrans['label2On'] = _bool(kw.pop('labelright'))
if 'labeltop' in kw:
kwtrans['label2On'] = _bool(kw.pop('labeltop'))
if 'colors' in kw:
c = kw.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw:
if key not in kwkeys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, kwkeys))
kwtrans.update(kw)
else:
raise NotImplementedError("Inverse translation is deferred")
return kwtrans
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'''set the axis data limits'''
raise NotImplementedError('Derived must override')
def set_default_intervals(self):
'''set the default limits for the axis data and view interval if they
are not mutated'''
# this is mainly in support of custom object plotting. For
# example, if someone passes in a datetime object, we do not
# know automagically how to set the default min/max of the
# data and view limits. The unit conversion AxisInfo
# interface provides a hook for custom types to register
# default limits through the AxisInfo.default_limits
# attribute, and the derived code below will check for that
# and use it if is available (else just use 0..1)
pass
def _set_artist_props(self, a):
if a is None:
return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i)
for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def set_smart_bounds(self, value):
"""set the axis to have smart bounds"""
self._smart_bounds = value
self.stale = True
def get_smart_bounds(self):
"""get whether the axis has smart bounds"""
return self._smart_bounds
def _update_ticks(self, renderer):
"""
Update ticks (position and labels) using the current data
interval of the axes. Returns a list of ticks that will be
drawn.
"""
interval = self.get_view_interval()
tick_tups = [t for t in self.iter_ticks()]
if self._smart_bounds:
# handle inverted limits
view_low, view_high = min(*interval), max(*interval)
data_low, data_high = self.get_data_interval()
if data_low > data_high:
data_low, data_high = data_high, data_low
locs = [ti[1] for ti in tick_tups]
locs.sort()
locs = np.array(locs)
if len(locs):
if data_low <= view_low:
# data extends beyond view, take view as limit
ilow = view_low
else:
# data stops within view, take best tick
cond = locs <= data_low
good_locs = locs[cond]
if len(good_locs) > 0:
# last tick prior or equal to first data point
ilow = good_locs[-1]
else:
# No ticks (why not?), take first tick
ilow = locs[0]
if data_high >= view_high:
# data extends beyond view, take view as limit
ihigh = view_high
else:
# data stops within view, take best tick
cond = locs >= data_high
good_locs = locs[cond]
if len(good_locs) > 0:
# first tick after or equal to last data point
ihigh = good_locs[0]
else:
# No ticks (why not?), take last tick
ihigh = locs[-1]
tick_tups = [ti for ti in tick_tups
if (ti[1] >= ilow) and (ti[1] <= ihigh)]
# so that we don't lose ticks on the end, expand out the interval ever
# so slightly. The "ever so slightly" is defined to be the width of a
# half of a pixel. We don't want to draw a tick that even one pixel
# outside of the defined axis interval.
if interval[0] <= interval[1]:
interval_expanded = interval
else:
interval_expanded = interval[1], interval[0]
if hasattr(self, '_get_pixel_distance_along_axis'):
# normally, one does not want to catch all exceptions that
# could possibly happen, but it is not clear exactly what
# exceptions might arise from a user's projection (their
# rendition of the Axis object). So, we catch all, with
# the idea that one would rather potentially lose a tick
# from one side of the axis or another, rather than see a
# stack trace.
# We also catch users warnings here. These are the result of
# invalid numpy calculations that may be the result of out of
# bounds on axis with finite allowed intervals such as geo
# projections i.e. Mollweide.
with np.errstate(invalid='ignore'):
try:
ds1 = self._get_pixel_distance_along_axis(
interval_expanded[0], -0.5)
except:
warnings.warn("Unable to find pixel distance along axis "
"for interval padding of ticks; assuming no "
"interval padding needed.")
ds1 = 0.0
if np.isnan(ds1):
ds1 = 0.0
try:
ds2 = self._get_pixel_distance_along_axis(
interval_expanded[1], +0.5)
except:
warnings.warn("Unable to find pixel distance along axis "
"for interval padding of ticks; assuming no "
"interval padding needed.")
ds2 = 0.0
if np.isnan(ds2):
ds2 = 0.0
interval_expanded = (interval_expanded[0] - ds1,
interval_expanded[1] + ds2)
ticks_to_draw = []
for tick, loc, label in tick_tups:
if tick is None:
continue
if not mtransforms.interval_contains(interval_expanded, loc):
continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
ticks_to_draw.append(tick)
return ticks_to_draw
def _get_tick_bboxes(self, ticks, renderer):
"""
Given the list of ticks, return two lists of bboxes. One for
tick lable1's and another for tick label2's.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
for tick in ticks:
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
return ticklabelBoxes, ticklabelBoxes2
def get_tightbbox(self, renderer):
"""
Return a bounding box that encloses the axis. It only accounts
tick labels, axis label, and offsetText.
"""
if not self.get_visible():
return
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
bb = []
for a in [self.label, self.offsetText]:
if a.get_visible():
bb.append(a.get_window_extent(renderer))
bb.extend(ticklabelBoxes)
bb.extend(ticklabelBoxes2)
bb = [b for b in bb if b.width != 0 or b.height != 0]
if bb:
_bbox = mtransforms.Bbox.union(bb)
return _bbox
else:
return None
def get_tick_padding(self):
values = []
if len(self.majorTicks):
values.append(self.majorTicks[0].get_tick_padding())
if len(self.minorTicks):
values.append(self.minorTicks[0].get_tick_padding())
if len(values):
return max(values)
return 0.0
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
if not self.get_visible():
return
renderer.open_group(__name__)
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
for tick in ticks_to_draw:
tick.draw(renderer)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in self.majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
self.stale = False
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline',
[tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1 + labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1 + labels2)
def get_ticklabels(self, minor=False, which=None):
"""
Get the x tick labels as a list of :class:`~matplotlib.text.Text`
instances.
Parameters
----------
minor : bool
If True return the minor ticklabels,
else return the major ticklabels
which : None, ('minor', 'major', 'both')
Overrides `minor`.
Selects which ticklabels to return
Returns
-------
ret : list
List of :class:`~matplotlib.text.Text` instances.
"""
if which is not None:
if which == 'minor':
return self.get_minorticklabels()
elif which == 'major':
return self.get_majorticklabels()
elif which == 'both':
return self.get_majorticklabels() + self.get_minorticklabels()
else:
raise ValueError("`which` must be one of ('minor', 'major', "
"'both') not " + str(which))
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick instance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None:
return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_label_text(self):
'Get the text of the label'
return self.label.get_text()
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor:
tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor:
tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean. Use *which* =
'major' | 'minor' | 'both' to set the grid for major or minor ticks.
If *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True.
*kwargs* are used to set the line properties of the grids, e.g.,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs):
b = True
which = which.lower()
if which in ['minor', 'both']:
if b is None:
self._gridOnMinor = not self._gridOnMinor
else:
self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None:
continue
tick.gridOn = self._gridOnMinor
if len(kwargs):
tick.gridline.update(kwargs)
self._minor_tick_kw['gridOn'] = self._gridOnMinor
if which in ['major', 'both']:
if b is None:
self._gridOnMajor = not self._gridOnMajor
else:
self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None:
continue
tick.gridOn = self._gridOnMajor
if len(kwargs):
tick.gridline.update(kwargs)
self._major_tick_kw['gridOn'] = self._gridOnMajor
self.stale = True
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True*
if *data* is registered for unit conversion.
"""
converter = munits.registry.get_converter(data)
if converter is None:
return False
neednew = self.converter != converter
self.converter = converter
default = self.converter.default_units(data, self)
if default is not None and self.units is None:
self.set_units(default)
if neednew:
self._update_axisinfo()
self.stale = True
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units, self)
if info is None:
return
if info.majloc is not None and \
self.major.locator != info.majloc and self.isDefault_majloc:
self.set_major_locator(info.majloc)
self.isDefault_majloc = True
if info.minloc is not None and \
self.minor.locator != info.minloc and self.isDefault_minloc:
self.set_minor_locator(info.minloc)
self.isDefault_minloc = True
if info.majfmt is not None and \
self.major.formatter != info.majfmt and self.isDefault_majfmt:
self.set_major_formatter(info.majfmt)
self.isDefault_majfmt = True
if info.minfmt is not None and \
self.minor.formatter != info.minfmt and self.isDefault_minfmt:
self.set_minor_formatter(info.minfmt)
self.isDefault_minfmt = True
if info.label is not None and self.isDefault_label:
self.set_label_text(info.label)
self.isDefault_label = True
self.set_default_intervals()
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
return x
ret = self.converter.convert(x, self.units, self)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u != self.units:
self.units = u
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
self.stale = True
def get_units(self):
'return the units for axis'
return self.units
def set_label_text(self, label, fontdict=None, **kwargs):
""" Sets the text value of the axis label
ACCEPTS: A string value for the label
"""
self.isDefault_label = False
self.label.set_text(label)
if fontdict is not None:
self.label.update(fontdict)
self.label.update(kwargs)
self.stale = True
return self.label
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_majfmt = False
self.major.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_minfmt = False
self.minor.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_majloc = False
self.major.locator = locator
locator.set_axis(self)
self.stale = True
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_minloc = False
self.minor.locator = locator
locator.set_axis(self)
self.stale = True
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
All other kwargs are used to update the text object properties.
As for get_ticklabels, label1 (left or bottom) is
affected for a given tick only if its label1On attribute
is True, and similarly for label2. The list of returned
label text objects consists of all such label1 objects followed
by all such label2 objects.
The input *ticklabels* is assumed to match the set of
tick locations, regardless of the state of label1On and
label2On.
ACCEPTS: sequence of strings or Text objects
"""
get_labels = []
for t in ticklabels:
# try calling get_text() to check whether it is Text object
# if it is Text, get label content
try:
get_labels.append(t.get_text())
# otherwise add the label to the list directly
except AttributeError:
get_labels.append(t)
# replace the ticklabels list with the processed one
ticklabels = get_labels
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_major_ticks()
ret = []
for tick_label, tick in zip(ticklabels, ticks):
# deal with label1
tick.label1.set_text(tick_label)
tick.label1.update(kwargs)
# deal with label2
tick.label2.set_text(tick_label)
tick.label2.update(kwargs)
# only return visible tick labels
if tick.label1On:
ret.append(tick.label1)
if tick.label2On:
ret.append(tick.label2)
self.stale = True
return ret
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
# XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator(mticker.FixedLocator(ticks))
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
def axis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a :class:`tzinfo` instance or a timezone string.
This timezone is used to create date labels.
"""
# By providing a sample datetime instance with the desired
# timezone, the registered converter can be selected,
# and the "units" attribute, which is the timezone, can
# be set.
import datetime
if isinstance(tz, six.string_types):
import pytz
tz = pytz.timezone(tz)
self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))
def get_tick_space(self):
"""
Return the estimated number of ticks that can fit on the axis.
"""
# Must be overridden in the subclass
raise NotImplementedError()
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
raise NotImplementedError()
def get_minpos(self):
raise NotImplementedError()
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self, mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform_point((x, y))
except ValueError:
return False, {}
l, b = self.axes.transAxes.transform_point((0, 0))
r, t = self.axes.transAxes.transform_point((1, 1))
inaxis = xaxes >= 0 and xaxes <= 1 and (
(y < b and y > b - self.pickradius) or
(y > t and y < t + self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return XTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties=font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color=rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center')
label.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()))
self._set_artist_props(label)
self.label_position = 'bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties=font_manager.FontProperties(
size=rcParams['xtick.labelsize']),
color=rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right')
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform())
)
self._set_artist_props(offsetText)
self.offset_text_position = 'bottom'
return offsetText
def _get_pixel_distance_along_axis(self, where, perturb):
"""
Returns the amount, in data coordinates, that a single pixel
corresponds to in the locality given by "where", which is also given
in data coordinates, and is an x coordinate. "perturb" is the amount
to perturb the pixel. Usually +0.5 or -0.5.
Implementing this routine for an axis is optional; if present, it will
ensure that no ticks are lost due to round-off at the extreme ends of
an axis.
"""
# Note that this routine does not work for a polar axis, because of
# the 1e-10 below. To do things correctly, we need to use rmax
# instead of 1e-10 for a polar axis. But since we do not have that
# kind of information at this point, we just don't try to pad anything
# for the theta axis of a polar plot.
if self.axes.name == 'polar':
return 0.0
#
# first figure out the pixel location of the "where" point. We use
# 1e-10 for the y point, so that we remain compatible with log axes.
# transformation from data coords to display coords
trans = self.axes.transData
# transformation from display coords to data coords
transinv = trans.inverted()
pix = trans.transform_point((where, 1e-10))
# perturb the pixel
ptp = transinv.transform_point((pix[0] + perturb, pix[1]))
dx = abs(ptp[0] - where)
return dx
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
if position == 'top':
self.label.set_verticalalignment('baseline')
elif position == 'bottom':
self.label.set_verticalalignment('top')
else:
msg = "Position accepts only [ 'top' | 'bottom' ]"
raise ValueError(msg)
self.label_position = position
self.stale = True
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
x, y = self.label.get_position()
if self.label_position == 'bottom':
try:
spine = self.axes.spines['bottom']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
bottom = bbox.y0
self.label.set_position(
(x, bottom - self.labelpad * self.figure.dpi / 72.0)
)
else:
try:
spine = self.axes.spines['top']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
top = bbox.y1
self.label.set_position(
(x, top + self.labelpad * self.figure.dpi / 72.0)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position(
(x, bottom - self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
)
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at bottom. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
if position == 'top':
self.set_tick_params(which='both', top=True, labeltop=True,
bottom=False, labelbottom=False)
elif position == 'bottom':
self.set_tick_params(which='both', top=False, labeltop=False,
bottom=True, labelbottom=True)
elif position == 'both':
self.set_tick_params(which='both', top=True,
bottom=True)
elif position == 'none':
self.set_tick_params(which='both', top=False,
bottom=False)
elif position == 'default':
self.set_tick_params(which='both', top=True, labeltop=False,
bottom=True, labelbottom=True)
else:
raise ValueError("invalid position: %s" % position)
self.stale = True
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt = self.majorTicks[0]
mT = self.minorTicks[0]
majorTop = ((not majt.tick1On) and majt.tick2On and
(not majt.label1On) and majt.label2On)
minorTop = ((not mT.tick1On) and mT.tick2On and
(not mT.label1On) and mT.label2On)
if majorTop and minorTop:
return 'top'
MajorBottom = (majt.tick1On and (not majt.tick2On) and
majt.label1On and (not majt.label2On))
MinorBottom = (mT.tick1On and (not mT.tick2On) and
mT.label1On and (not mT.label2On))
if MajorBottom and MinorBottom:
return 'bottom'
majorDefault = (majt.tick1On and majt.tick2On and
majt.label1On and (not majt.label2On))
minorDefault = (mT.tick1On and mT.tick2On and
mT.label1On and (not mT.label2On))
if majorDefault and minorDefault:
return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_xlim`.
"""
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervalx = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervalx = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
self.stale = True
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
xmin, xmax = 0., 1.
dataMutated = self.axes.dataLim.mutatedx()
viewMutated = self.axes.viewLim.mutatedx()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
xmin = self.converter.convert(valmin, self.units, self)
xmax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervalx = xmin, xmax
if not viewMutated:
self.axes.viewLim.intervalx = xmin, xmax
self.stale = True
def get_tick_space(self):
ends = self.axes.transAxes.transform([[0, 0], [1, 0]])
length = ((ends[1][0] - ends[0][0]) / self.axes.figure.dpi) * 72.0
tick = self._get_tick(True)
# There is a heuristic here that the aspect ratio of tick text
# is no more than 3:1
size = tick.label1.get_size() * 3
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform_point((x, y))
except ValueError:
return False, {}
l, b = self.axes.transAxes.transform_point((0, 0))
r, t = self.axes.transAxes.transform_point((1, 1))
inaxis = yaxes >= 0 and yaxes <= 1 and (
(x < l and x > l - self.pickradius) or
(x > r and x < r + self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return YTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color=rcParams['axes.labelcolor'],
verticalalignment='bottom',
horizontalalignment='center',
rotation='vertical',
rotation_mode='anchor')
label.set_transform(mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes))
self._set_artist_props(label)
self.label_position = 'left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties=font_manager.FontProperties(
size=rcParams['ytick.labelsize']
),
color=rcParams['ytick.color'],
verticalalignment='baseline',
horizontalalignment='left')
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform())
)
self._set_artist_props(offsetText)
self.offset_text_position = 'left'
return offsetText
def _get_pixel_distance_along_axis(self, where, perturb):
"""
Returns the amount, in data coordinates, that a single pixel
corresponds to in the locality given by *where*, which is also given
in data coordinates, and is a y coordinate.
*perturb* is the amount to perturb the pixel. Usually +0.5 or -0.5.
Implementing this routine for an axis is optional; if present, it will
ensure that no ticks are lost due to round-off at the extreme ends of
an axis.
"""
#
# first figure out the pixel location of the "where" point. We use
# 1e-10 for the x point, so that we remain compatible with log axes.
# transformation from data coords to display coords
trans = self.axes.transData
# transformation from display coords to data coords
transinv = trans.inverted()
pix = trans.transform_point((1e-10, where))
# perturb the pixel
ptp = transinv.transform_point((pix[0], pix[1] + perturb))
dy = abs(ptp[1] - where)
return dy
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
self.label.set_rotation_mode('anchor')
self.label.set_horizontalalignment('center')
if position == 'left':
self.label.set_verticalalignment('bottom')
elif position == 'right':
self.label.set_verticalalignment('top')
else:
msg = "Position accepts only [ 'left' | 'right' ]"
raise ValueError(msg)
self.label_position = position
self.stale = True
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
x, y = self.label.get_position()
if self.label_position == 'left':
try:
spine = self.axes.spines['left']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
left = bbox.x0
self.label.set_position(
(left - self.labelpad * self.figure.dpi / 72.0, y)
)
else:
try:
spine = self.axes.spines['right']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
right = bbox.x1
self.label.set_position(
(right + self.labelpad * self.figure.dpi / 72.0, y)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position(
(x, top + self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
)
def set_offset_position(self, position):
x, y = self.offsetText.get_position()
if position == 'left':
x = 0
elif position == 'right':
x = 1
else:
msg = "Position accepts only [ 'left' | 'right' ]"
raise ValueError(msg)
self.offsetText.set_ha(position)
self.offsetText.set_position((x, y))
self.stale = True
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both, default or none)
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at left. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
if position == 'right':
self.set_tick_params(which='both', right=True, labelright=True,
left=False, labelleft=False)
self.set_offset_position(position)
elif position == 'left':
self.set_tick_params(which='both', right=False, labelright=False,
left=True, labelleft=True)
self.set_offset_position(position)
elif position == 'both':
self.set_tick_params(which='both', right=True,
left=True)
elif position == 'none':
self.set_tick_params(which='both', right=False,
left=False)
elif position == 'default':
self.set_tick_params(which='both', right=True, labelright=False,
left=True, labelleft=True)
else:
raise ValueError("invalid position: %s" % position)
self.stale = True
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt = self.majorTicks[0]
mT = self.minorTicks[0]
majorRight = ((not majt.tick1On) and majt.tick2On and
(not majt.label1On) and majt.label2On)
minorRight = ((not mT.tick1On) and mT.tick2On and
(not mT.label1On) and mT.label2On)
if majorRight and minorRight:
return 'right'
majorLeft = (majt.tick1On and (not majt.tick2On) and
majt.label1On and (not majt.label2On))
minorLeft = (mT.tick1On and (not mT.tick2On) and
mT.label1On and (not mT.label2On))
if majorLeft and minorLeft:
return 'left'
majorDefault = (majt.tick1On and majt.tick2On and
majt.label1On and (not majt.label2On))
minorDefault = (mT.tick1On and mT.tick2On and
mT.label1On and (not mT.label2On))
if majorDefault and minorDefault:
return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_ylim`.
"""
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervaly = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervaly = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
self.stale = True
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
self.stale = True
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
ymin, ymax = 0., 1.
dataMutated = self.axes.dataLim.mutatedy()
viewMutated = self.axes.viewLim.mutatedy()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
ymin = self.converter.convert(valmin, self.units, self)
ymax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervaly = ymin, ymax
if not viewMutated:
self.axes.viewLim.intervaly = ymin, ymax
self.stale = True
def get_tick_space(self):
ends = self.axes.transAxes.transform([[0, 0], [0, 1]])
length = ((ends[1][1] - ends[0][1]) / self.axes.figure.dpi) * 72.0
tick = self._get_tick(True)
# Having a spacing of at least 2 just looks good.
size = tick.label1.get_size() * 2.0
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
| bsd-3-clause |
brettwooldridge/buck | third-party/py/pywatchman/pywatchman/encoding.py | 29 | 2957 | # Copyright 2016-present Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
'''Module to deal with filename encoding on the local system, as returned by
Watchman.'''
import sys
from . import (
compat,
)
if compat.PYTHON3:
default_local_errors = 'surrogateescape'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
# On the Python 3 versions we support, sys.getfilesystemencoding never
# returns None.
return sys.getfilesystemencoding()
else:
# Python 2 doesn't support surrogateescape, so use 'strict' by
# default. Users can register a custom surrogateescape error handler and use
# that if they so desire.
default_local_errors = 'strict'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
fsencoding = sys.getfilesystemencoding()
if fsencoding is None:
# This is very unlikely to happen, but if it does, just use UTF-8
fsencoding = 'utf-8'
return fsencoding
def encode_local(s):
return s.encode(get_local_encoding(), default_local_errors)
def decode_local(bs):
return bs.decode(get_local_encoding(), default_local_errors)
| apache-2.0 |
gratteur/zim-desktop | zim/config/manager.py | 5 | 10560 | # -*- coding: utf-8 -*-
# Copyright 2013 Jaap Karssenberg <[email protected]>
from __future__ import with_statement
from weakref import WeakValueDictionary
from . import basedirs
from .dicts import INIConfigFile
from zim.fs import FileNotFoundError
from zim.signals import ConnectorMixin, SignalEmitter, SignalHandler
class ConfigManager(object):
'''This class defines an object that manages a set of config files.
The config manager abstracts the lookup of files using the XDG
search paths and ensures that there is only a single instance used
for each config file.
The config manager can switch the config file based on the config
X{profile} that is used. The profile is determined by the notebook
properties. However this object relies on it's creator to setup
the hooks to get the property from the notebook. Changes to the
profile are communicated to all users of the config by means of the
"changed" signals on L{ConfigFile} and L{ConfigDict} objects.
'''
def __init__(self, dir=None, dirs=None, profile=None):
'''Constructor
@param dir: the folder for reading and writing config files,
e.g. a C{Dir} or a C{VirtualConfigBackend} objects.
If no dir is given, the XDG basedirs are used and C{dirs} is
ignored.
@param dirs: list or generator of C{Dir} objects used as
search path when a config file does not exist on C{dir}
@param profile: initial profile name
'''
self.profile = profile
self._config_files = WeakValueDictionary()
self._config_dicts = WeakValueDictionary()
if dir is None:
assert dirs is None, "Do not provide 'dirs' without 'dir'"
self._dir = dir
self._dirs = dirs
def set_profile(self, profile):
'''Set the profile to use for the configuration
@param profile: the profile name or C{None}
'''
assert profile is None or isinstance(profile, basestring)
if profile != self.profile:
self.profile = profile
for path, conffile in self._config_files.items():
if path.startswith('<profile>/'):
file, defaults = self._get_file(path)
conffile.set_files(file, defaults)
# Updates will cascade through the dicts by the
# "changed" signals on various objects
def _get_file(self, filename):
basepath = filename.replace('<profile>/', '')
if self.profile:
path = filename.replace('<profile>/', 'profiles/%s/' % self.profile)
else:
path = basepath
if self._dir:
file = self._dir.file(path)
if self._dirs:
defaults = DefaultFileIter(self._dirs, path)
else:
defaults = DefaultFileIter([], path)
if self.profile and filename.startswith('<profile>/'):
mypath = filename.replace('<profile>/', '')
defaults.extra.insert(0, self._dir.file(mypath))
else:
file = basedirs.XDG_CONFIG_HOME.file('zim/' + path)
defaults = XDGConfigFileIter(basepath)
## Backward compatibility for profiles
if self.profile \
and filename in (
'<profile>/preferences.conf',
'<profile>/style.conf'
):
backwardfile = self._get_backward_file(filename)
defaults.extra.insert(0, backwardfile)
return file, defaults
def _get_backward_file(self, filename):
if filename == '<profile>/preferences.conf':
path = 'profiles/%s.conf' % self.profile
elif filename == '<profile>/style.conf':
path = 'styles/%s.conf' % self.profile
else:
raise AssertionError
if self._dir:
return self._dir.file(path)
else:
return basedirs.XDG_CONFIG_HOME.file('zim/' + path)
def get_config_file(self, filename):
'''Returns a C{ConfigFile} object for C{filename}'''
if filename not in self._config_files:
file, defaults = self._get_file(filename)
config_file = ConfigFile(file, defaults)
self._config_files[filename] = config_file
return self._config_files[filename]
def get_config_dict(self, filename):
'''Returns a C{SectionedConfigDict} object for C{filename}'''
if filename not in self._config_dicts:
file = self.get_config_file(filename)
config_dict = ConfigManagerINIConfigFile(file)
self._config_dicts[filename] = config_dict
return self._config_dicts[filename]
#def get_all_config_files(filename) - iterate multiple values ?
#def get_config_section(filename, section): - return section
def VirtualConfigManager(**data):
return ConfigManager(VirtualConfigBackend(**data))
class DefaultFileIter(object):
'''Generator for iterating default files
Will yield first the files in C{extra} followed by files that
are based on C{path} and C{dirs}. Yields only existing files.
'''
def __init__(self, dirs, path, extra=None):
self.path = path
self.dirs = dirs
self.extra = extra or []
def __iter__(self):
for file in self.extra:
if file.exists():
yield file
for dir in self.dirs:
file = dir.file(self.path)
if file.exists():
yield file
class XDGConfigDirsIter(object):
'''Generator for iterating XDG config dirs
Yields the "zim" subdir of each XDG config file.
'''
def __iter__(self):
from . import data_dirs # XXX
yield basedirs.XDG_CONFIG_HOME.subdir(('zim'))
for dir in basedirs.XDG_CONFIG_DIRS:
yield dir.subdir(('zim'))
for dir in data_dirs():
yield dir
class XDGConfigFileIter(DefaultFileIter):
'''Like C{DefaultFileIter}, but uses XDG config dirs'''
def __init__(self, path, extra=None):
self.path = path
self.dirs = XDGConfigDirsIter()
self.extra = extra or []
class ConfigManagerINIConfigFile(INIConfigFile):
'''Like L{INIConfigFile} but with autosave when the dict changes'''
def __init__(self, file):
INIConfigFile.__init__(self, file, monitor=True)
self.connect_after('changed', self.on_changed)
# autosave on changing the dict, connect after
# regular handlers to avoid getting stuck with a set
@SignalHandler
def on_changed(self, *a):
with self.on_file_changed.blocked():
self.write()
@SignalHandler
def on_file_changed(self, *a):
with self.on_changed.blocked():
INIConfigFile.on_file_changed(self, *a)
class ConfigFile(ConnectorMixin, SignalEmitter):
'''Container object for a config file
Maps to a "base" file in the home folder, used to write new values,
and an optional default file, which is used for reading only.
@ivar file: the underlying file object for the base config file
in the home folder
@ivar defaults: a generator that yields default files
@note: this class implement similar API to the L{File} class but
is explicitly not a sub-class of L{File} because config files should
typically not be moved, renamed, etc. It just implements the reading
and writing methods.
@signal: C{changed ()}: emitted when the
underlying file changed (based on C{gio} monitoring support)
or for file monitors or on profile switched
'''
# TODO __signals__
def __init__(self, file, defaults=None):
self.file = None
self.defaults = None
with self.blocked_signals('changed'):
self.set_files(file, defaults)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.file.path)
def __eq__(self, other):
return isinstance(other, ConfigFile) \
and other.file == self.file
def set_files(self, file, defaults=None):
if self.file:
self.disconnect_from(self.file)
self.file = file
self.defaults = defaults or []
#~ self.connectto(self.file, 'changed', self.on_file_changed)
self.emit('changed')
#~ def on_file_changed(self, file, *a):
#~ print "CONF FILE changed:", file
# TODO verify etag (we didn't write ourselves)
#~ self.emit('changed')
def check_has_changed_on_disk(self):
return True # we do not emit the signal if it is not real...
@property
def basename(self):
return self.file.basename
def touch(self):
'''Ensure the custom file in the home folder exists. Either by
copying a default config file, or touching an empty file.
Intended to be called before trying to edit the file with an
external editor.
'''
if not self.file.exists():
for default in self.defaults:
default.copyto(self.file)
break
else:
self.file.touch() # create empty file
def read(self, fail=False):
'''Read the base file or first default file
@param fail: if C{True} a L{FileNotFoundError} error is raised
when neither the base file or a default file are found. If
C{False} it will return C{''} for a non-existing file.
@returns: file content as a string
'''
try:
return self.file.read()
except FileNotFoundError:
for default in self.defaults:
return default.read()
else:
if fail:
raise
else:
return ''
def readlines(self, fail=False):
'''Read the base file or first default file
@param fail: if C{True} a L{FileNotFoundError} error is raised
when neither the base file or a default file are found. If
C{False} it will return C{[]} for a non-existing file.
@returns: file content as a list of lines
'''
try:
return self.file.readlines()
except FileNotFoundError:
for default in self.defaults:
return default.readlines()
else:
if fail:
raise
else:
return []
def write(self, text):
'''Write base file, see L{File.write()}'''
self.file.write(text)
def writelines(self, lines):
'''Write base file, see L{File.writelines()}'''
self.file.writelines(lines)
def remove(self):
'''Remove user file, leaves default files in place'''
if self.file.exists():
return self.file.remove()
class VirtualConfigBackend(object):
'''Virtual dir, mainly used for testing'''
def __init__(self, **data):
self._data = data
def file(self, path):
return VirtualConfigBackendFile(self._data, path)
class VirtualConfigBackendFile(object):
'''Virtual file, mainly used for testing'''
def __init__(self, data, path):
self._key = path
self._data = data
@property
def path(self):
return '<virtual>/' + self._key
@property
def basename(self):
import os
return os.path.basename(self.path)
def connect(self, handler, *a):
pass
def disconnect(self, handler):
pass
def exists(self):
return self._key in self._data \
and self._data[self._key] is not None
def touch(self):
self._data.setdefault(self._key, '')
def copyto(self, other):
text = self.read()
other.write(text)
def read(self):
try:
text = self._data[self._key]
except KeyError:
raise FileNotFoundError(self)
else:
if text is None:
raise FileNotFoundError(self)
else:
return text
def readlines(self):
text = self.read()
return text.splitlines(True)
def write(self, text):
self._data[self._key] = text or ''
def writelines(self, lines):
self._data[self._key] = ''.join(lines) or ''
def remove(self):
del self._data[self._key]
| gpl-2.0 |
PyBossa/pybossa | pybossa/default_settings.py | 1 | 4813 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
DEBUG = False
# webserver host and port
HOST = '0.0.0.0'
PORT = 5000
SECRET = 'foobar'
SECRET_KEY = 'my-session-secret'
ITSDANGEROUSKEY = 'its-dangerous-key'
## project configuration
BRAND = 'PYBOSSA'
TITLE = 'PYBOSSA'
COPYRIGHT = 'Set Your Institution'
DESCRIPTION = 'Set the description in your config'
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
LOGO = ''
DEFAULT_LOCALE = 'en'
LOCALES = [('en', 'English'), ('es', u'Español'),
('it', 'Italiano'), ('fr', u'Français'),
('ja', u'日本語'), ('el', u'ελληνικά')]
## Default THEME
THEME = 'default'
## Default number of apps per page
APPS_PER_PAGE = 20
## Default allowed extensions
ALLOWED_EXTENSIONS = ['js', 'css', 'png', 'jpg', 'jpeg', 'gif', 'zip']
UPLOAD_METHOD = 'local'
## Default number of users shown in the leaderboard
LEADERBOARD = 20
## Default configuration for debug toolbar
ENABLE_DEBUG_TOOLBAR = False
# Cache default key prefix
REDIS_SENTINEL = [('localhost', 26379)]
REDIS_MASTER = 'mymaster'
REDIS_DB = 0
REDIS_KEYPREFIX = 'pybossa_cache'
## Default cache timeouts
# Project cache
AVATAR_TIMEOUT = 30 * 24 * 60 * 60
APP_TIMEOUT = 15 * 60
REGISTERED_USERS_TIMEOUT = 15 * 60
ANON_USERS_TIMEOUT = 5 * 60 * 60
STATS_FRONTPAGE_TIMEOUT = APP_TIMEOUT
STATS_APP_TIMEOUT = 12 * 60 * 60
STATS_DRAFT_TIMEOUT = 24 * 60 * 60
N_APPS_PER_CATEGORY_TIMEOUT = 60 * 60
BROWSE_TASKS_TIMEOUT = 3 * 60 * 60
# Category cache
CATEGORY_TIMEOUT = 24 * 60 * 60
# User cache
USER_TIMEOUT = 15 * 60
USER_TOP_TIMEOUT = 24 * 60 * 60
USER_TOTAL_TIMEOUT = 24 * 60 * 60
# Project Presenters
PRESENTERS = ["basic", "image", "sound", "video", "map", "pdf"]
# Default Google Docs spreadsheet template tasks URLs
TEMPLATE_TASKS = {
'image': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdHFEN29mZUF0czJWMUhIejF6dWZXdkE&usp=sharing",
'sound': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEczcWduOXRUb1JUc1VGMmJtc2xXaXc&usp=sharing",
'video': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZ2UGhxSTJjQl9YNVhfUVhGRUdoRWc&usp=sharing",
'map': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZnbjdwcnhKRVNlN1dGXy0tTnNWWXc&usp=sharing",
'pdf': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEVVamc0R0hrcjlGdXRaUXlqRXlJMEE&usp=sharing"}
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Expiration time for password protected project cookies
PASSWD_COOKIE_TIMEOUT = 60 * 30
# Expiration time for account confirmation / password recovery links
ACCOUNT_LINK_EXPIRATION = 5 * 60 * 60
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Disable new account confirmation (via email)
ACCOUNT_CONFIRMATION_DISABLED = True
# Send emails weekly update every
WEEKLY_UPDATE_STATS = 'Sunday'
# Enable Server Sent Events
SSE = False
# Pro user features. False will make the feature available to all regular users,
# while True will make it available only to pro users
PRO_FEATURES = {
'auditlog': True,
'webhooks': True,
'updated_exports': True,
'notify_blog_updates': True,
'project_weekly_report': True,
'autoimporter': True,
'better_stats': True
}
CORS_RESOURCES = {r"/api/*": {"origins": "*",
"allow_headers": ['Content-Type',
'Authorization'],
"max_age": 21600
}}
FAILED_JOBS_RETRIES = 3
FAILED_JOBS_MAILS = 7
FULLTEXTSEARCH_LANGUAGE = 'english'
STRICT_SLASHES = True
# Background jobs default time outs
MINUTE = 60
TIMEOUT = 10 * MINUTE
# OneSignal GCM Sender ID
# DO NOT MODIFY THIS
GCM_SENDER_ID = "482941778795"
# Unpublish inactive projects
UNPUBLISH_PROJECTS = True
# TTL for ZIP files of personal data
TTL_ZIP_SEC_FILES = 3
# Default cryptopan key
CRYPTOPAN_KEY = '32-char-str-for-AES-key-and-pad.'
# Instruct PYBOSSA to generate absolute paths or not for avatars
AVATAR_ABSOLUTE = True
# Spam accounts to avoid
SPAM = []
| agpl-3.0 |
computersalat/ansible | test/support/integration/plugins/modules/postgresql_query.py | 53 | 10477 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Felix Archambault
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_query
short_description: Run PostgreSQL queries
description:
- Runs arbitrary PostgreSQL queries.
- Can run queries from SQL script files.
- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
to run queries on files made by pg_dump/pg_dumpall utilities.
version_added: '2.8'
options:
query:
description:
- SQL query to run. Variables can be escaped with psycopg2 syntax
U(http://initd.org/psycopg/docs/usage.html).
type: str
positional_args:
description:
- List of values to be passed as positional arguments to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(named_args).
type: list
elements: raw
named_args:
description:
- Dictionary of key-value arguments to pass to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(positional_args).
type: dict
path_to_script:
description:
- Path to SQL script on the remote host.
- Returns result of the last query in the script.
- Mutually exclusive with I(query).
type: path
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
autocommit:
description:
- Execute in autocommit mode when the query can't be run inside a transaction block
(e.g., VACUUM).
- Mutually exclusive with I(check_mode).
type: bool
default: no
version_added: '2.9'
encoding:
description:
- Set the client encoding for the current session (e.g. C(UTF-8)).
- The default is the encoding defined by the database.
type: str
version_added: '2.10'
seealso:
- module: postgresql_db
author:
- Felix Archambault (@archf)
- Andrew Klychkov (@Andersson007)
- Will Rouesnel (@wrouesnel)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Simple select query to acme db
postgresql_query:
db: acme
query: SELECT version()
- name: Select query to db acme with positional arguments and non-default credentials
postgresql_query:
db: acme
login_user: django
login_password: mysecretpass
query: SELECT * FROM acme WHERE id = %s AND story = %s
positional_args:
- 1
- test
- name: Select query to test_db with named_args
postgresql_query:
db: test_db
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
named_args:
id_val: 1
story_val: test
- name: Insert query to test_table in db test_db
postgresql_query:
db: test_db
query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
- name: Run queries from SQL script using UTF-8 client encoding for session
postgresql_query:
db: test_db
path_to_script: /var/lib/pgsql/test.sql
positional_args:
- 1
encoding: UTF-8
- name: Example of using autocommit parameter
postgresql_query:
db: test_db
query: VACUUM
autocommit: yes
- name: >
Insert data to the column of array type using positional_args.
Note that we use quotes here, the same as for passing JSON, etc.
postgresql_query:
query: INSERT INTO test_table (array_column) VALUES (%s)
positional_args:
- '{1,2,3}'
# Pass list and string vars as positional_args
- name: Set vars
set_fact:
my_list:
- 1
- 2
- 3
my_arr: '{1, 2, 3}'
- name: Select from test table by passing positional_args as arrays
postgresql_query:
query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
positional_args:
- '{{ my_list }}'
- '{{ my_arr|string }}'
'''
RETURN = r'''
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'SELECT * FROM bar'
statusmessage:
description: Attribute containing the message returned by the command.
returned: always
type: str
sample: 'INSERT 0 1'
query_result:
description:
- List of dictionaries in column:value form representing returned rows.
returned: changed
type: list
sample: [{"Column": "Value1"},{"Column": "Value2"}]
rowcount:
description: Number of affected rows.
returned: changed
type: int
sample: 5
'''
try:
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
from psycopg2.extras import DictCursor
except ImportError:
# it is needed for checking 'no result to fetch' in main(),
# psycopg2 availability will be checked by connect_to_db() into
# ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# Module execution.
#
def list_to_pg_array(elem):
"""Convert the passed list to PostgreSQL array
represented as a string.
Args:
elem (list): List that needs to be converted.
Returns:
elem (str): String representation of PostgreSQL array.
"""
elem = str(elem).strip('[]')
elem = '{' + elem + '}'
return elem
def convert_elements_to_pg_arrays(obj):
"""Convert list elements of the passed object
to PostgreSQL arrays represented as strings.
Args:
obj (dict or list): Object whose elements need to be converted.
Returns:
obj (dict or list): Object with converted elements.
"""
if isinstance(obj, dict):
for (key, elem) in iteritems(obj):
if isinstance(elem, list):
obj[key] = list_to_pg_array(elem)
elif isinstance(obj, list):
for i, elem in enumerate(obj):
if isinstance(elem, list):
obj[i] = list_to_pg_array(elem)
return obj
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
query=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
positional_args=dict(type='list', elements='raw'),
named_args=dict(type='dict'),
session_role=dict(type='str'),
path_to_script=dict(type='path'),
autocommit=dict(type='bool', default=False),
encoding=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
query = module.params["query"]
positional_args = module.params["positional_args"]
named_args = module.params["named_args"]
path_to_script = module.params["path_to_script"]
autocommit = module.params["autocommit"]
encoding = module.params["encoding"]
if autocommit and module.check_mode:
module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
if path_to_script and query:
module.fail_json(msg="path_to_script is mutually exclusive with query")
if positional_args:
positional_args = convert_elements_to_pg_arrays(positional_args)
elif named_args:
named_args = convert_elements_to_pg_arrays(named_args)
if path_to_script:
try:
with open(path_to_script, 'rb') as f:
query = to_native(f.read())
except Exception as e:
module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
if encoding is not None:
db_connection.set_client_encoding(encoding)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Prepare args:
if module.params.get("positional_args"):
arguments = module.params["positional_args"]
elif module.params.get("named_args"):
arguments = module.params["named_args"]
else:
arguments = None
# Set defaults:
changed = False
# Execute query:
try:
cursor.execute(query, arguments)
except Exception as e:
if not autocommit:
db_connection.rollback()
cursor.close()
db_connection.close()
module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
statusmessage = cursor.statusmessage
rowcount = cursor.rowcount
try:
query_result = [dict(row) for row in cursor.fetchall()]
except Psycopg2ProgrammingError as e:
if to_native(e) == 'no results to fetch':
query_result = {}
except Exception as e:
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
if 'SELECT' not in statusmessage:
if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
s = statusmessage.split()
if len(s) == 3:
if statusmessage.split()[2] != '0':
changed = True
elif len(s) == 2:
if statusmessage.split()[1] != '0':
changed = True
else:
changed = True
else:
changed = True
if module.check_mode:
db_connection.rollback()
else:
if not autocommit:
db_connection.commit()
kw = dict(
changed=changed,
query=cursor.query,
statusmessage=statusmessage,
query_result=query_result,
rowcount=rowcount if rowcount >= 0 else 0,
)
cursor.close()
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 |
Naoto-Imamachi/MIRAGE | scripts/module/preparation/phastcons_score_list.py | 1 | 3683 | #!usr/bin/env python
import sys
import re
import shelve
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("phastcons_score_list script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.phastcons_score_list_db_input)
utils.now_time("Reference_file: " + p.phastcons_score_list_reference)
utils.now_time("Output_file: " + p.phastcons_score_list_db_output)
output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
output_merge_shelve = shelve.open(output_merge)
#for x in ['chr21']:
for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']:
ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc...
ref_file = open(ref_s,'r')
input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db'
output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
input_shelve = shelve.open(input_s)
output_shelve = shelve.open(output_s)
score_list_dict = {}
for line in ref_file:
line = line.rstrip()
data = line.split("\t")
chrom = data[0]
if not chrom == x:
continue
strand = data[5]
if len(data) >= 12: #12bed format
exon_block = data[10].split(',')
exon_block.pop() #Remove the last item ''
exon_st = data[11].split(',')
exon_st.pop() #Remove the last item ''
name = data[3]
score_list_dict[name] = []
for y in range(len(exon_block)):
st = int(data[1]) + int(exon_st[y])
ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y])
length = ed - st
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
elif len(data) >= 3: #6bed format
st = int(data[1])
ed = int(data[2])
length = ed - st
name = data[3]
score_list_dict[name] = []
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
else:
print('ERROR: Your BED format file have less than three column.')
print ('BED format file need to have at least three column [chr, st, ed]...')
sys.exit(1)
output_shelve.update(score_list_dict)
output_merge_shelve.update(score_list_dict)
input_shelve.close()
output_shelve.close()
utils.now_time("phastcons_score_list script was successfully finished!!")
output_merge_shelve.close()
if __name__ == '__main__':
main()
| mit |
guewen/odoo | addons/project_issue/res_config.py | 441 | 1492 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class project_issue_settings(osv.osv_memory):
_name = 'project.config.settings'
_inherit = ['project.config.settings', 'fetchmail.config.settings']
_columns = {
'fetchmail_issue': fields.boolean("Create issues from an incoming email account ",
fetchmail_model='project.issue', fetchmail_name='Incoming Issues',
help="""Allows you to configure your incoming mail server, and create issues from incoming emails."""),
}
| agpl-3.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/paml/package.py | 5 | 2221 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Paml(MakefilePackage):
"""PAML is a package of programs for phylogenetic analyses of DNA or
protein sewuences using maximum likelihood."""
homepage = "http://abacus.gene.ucl.ac.uk/software/paml.html"
url = "http://abacus.gene.ucl.ac.uk/software/paml4.9e.tgz"
version('4.9e', 'ac5a062bfea1f4eaac79008434030acf')
build_directory = 'src'
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir(self.build_directory):
install('baseml', prefix.bin)
install('basemlg', prefix.bin)
install('chi2', prefix.bin)
install('codeml', prefix.bin)
install('evolver', prefix.bin)
install('infinitesites', prefix.bin)
install('mcmctree', prefix.bin)
install('pamp', prefix.bin)
install('yn00', prefix.bin)
install_tree('dat', prefix.dat)
install_tree('Technical', prefix.Technical)
| lgpl-2.1 |
nyasara/azuremono-docker | IronPython-2.7.4/Lib/encodings/cp861.py | 93 | 35587 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp861',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0x008c -> LATIN SMALL LETTER ETH
u'\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f0: 0x008c, # LATIN SMALL LETTER ETH
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x0095, # LATIN SMALL LETTER THORN
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
alrusdi/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/templates/unicode.py | 39 | 1290 | # -*- coding: utf-8 -*-
from unittest import TestCase
from django.template import Template, TemplateEncodingError, Context
from django.utils.safestring import SafeData
class UnicodeTests(TestCase):
def test_template(self):
# Templates can be created from unicode strings.
t1 = Template(u'ŠĐĆŽćžšđ {{ var }}')
# Templates can also be created from bytestrings. These are assumed to
# be encoded using UTF-8.
s = '\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91 {{ var }}'
t2 = Template(s)
s = '\x80\xc5\xc0'
self.assertRaises(TemplateEncodingError, Template, s)
# Contexts can be constructed from unicode or UTF-8 bytestrings.
c1 = Context({"var": "foo"})
c2 = Context({u"var": "foo"})
c3 = Context({"var": u"Đđ"})
c4 = Context({u"var": "\xc4\x90\xc4\x91"})
# Since both templates and all four contexts represent the same thing,
# they all render the same (and are returned as unicode objects and
# "safe" objects as well, for auto-escaping purposes).
self.assertEqual(t1.render(c3), t2.render(c3))
self.assertTrue(isinstance(t1.render(c3), unicode))
self.assertTrue(isinstance(t1.render(c3), SafeData))
| gpl-3.0 |
HarborYuan/cashier | env/Lib/site-packages/wheel/signatures/__init__.py | 70 | 3766 | """
Create and verify jws-js format Ed25519 signatures.
"""
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
__all__ = ['sign', 'verify']
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header": native(encoded_header),
"signature": native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and "kty" not in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| mit |
DerekK88/PICwriter | picwriter/components/stripslotconverter.py | 1 | 9317 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import gdspy
import picwriter.toolkit as tk
class StripSlotConverter(tk.Component):
"""Strip-to-Slot Side Converter Cell class. Adiabatically transforms a strip to a slot waveguide mode, with two sections. Section 1 introduces a narrow waveguide alongside the input strip waveguide and gradually lowers the gap between the strip waveguide and narrow side waveguide. Section 2 gradually converts the widths of the two waveguides until they are equal to the slot rail widths.
Args:
* **wgt_input** (WaveguideTemplate): WaveguideTemplate object for the input waveguide (should be either of type `strip` or `slot`).
* **wgt_output** (WaveguideTemplate): WaveguideTemplate object for the output waveguide (should be either of type `strip` or `slot`, opposite of the input type).
* **length1** (float): Length of section 1 that gradually changes the distance between the two waveguides.
* **length2** (float): Length of section 2 that gradually changes the widths of the two waveguides until equal to the slot waveguide rail widths.
* **start_rail_width** (float): Width of the narrow waveguide appearing next to the strip waveguide.
* **end_strip_width** (float): Width of the strip waveguide at the end of `length1` and before `length2`
* **d** (float): Distance between the outer edge of the strip waveguide and the start of the slot waveguide rail.
Keyword Args:
* **input_strip** (Boolean): If `True`, sets the input port to be the strip waveguide side. If `False`, slot waveguide is on the input. Defaults to `None`, in which case the input port waveguide template is used to choose.
* **port** (tuple): Cartesian coordinate of the input port. Defaults to (0,0).
* **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)
Members:
* **portlist** (dict): Dictionary with the relevant port information
Portlist format:
* portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}
* portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}
Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the taper, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.
'Direction' points *towards* the waveguide that will connect to it.
Note: The waveguide and cladding layer/datatype are taken from the `wgt_slot` by default.
"""
def __init__(
self,
wgt_input,
wgt_output,
length1,
length2,
start_rail_width,
end_strip_width,
d,
input_strip=None,
port=(0, 0),
direction="EAST",
):
tk.Component.__init__(self, "StripSlotConverter", locals())
self.portlist = {}
if (not isinstance(input_strip, bool)) and (input_strip != None):
raise ValueError(
"Invalid input provided for `input_strip`. Please specify a boolean."
)
if input_strip == None:
# Auto-detect based on wgt_input
self.input_strip = (
wgt_input.wg_type == "strip" or wgt_input.wg_type == "swg"
)
else:
# User-override
self.input_strip = input_strip
if self.input_strip:
self.wgt_strip = wgt_input
self.wgt_slot = wgt_output
else:
self.wgt_strip = wgt_output
self.wgt_slot = wgt_input
self.wg_spec = {
"layer": wgt_output.wg_layer,
"datatype": wgt_output.wg_datatype,
}
self.clad_spec = {
"layer": wgt_output.clad_layer,
"datatype": wgt_output.clad_datatype,
}
self.length1 = length1
self.length2 = length2
self.d = d
self.start_rail_width = start_rail_width
self.end_strip_width = end_strip_width
self.port = port
self.direction = direction
self.__build_cell()
self.__build_ports()
""" Translate & rotate the ports corresponding to this specific component object
"""
self._auto_transform_()
def __build_cell(self):
# Sequentially build all the geometric shapes using polygons
# Add strip waveguide taper for region 1
x0, y0 = (0, 0)
pts = [
(x0, y0 - self.wgt_strip.wg_width / 2.0),
(x0, y0 + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
]
strip1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the thin side waveguide for region 1
pts = [
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d),
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d + self.start_rail_width),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
]
thin_strip = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the bottom rail for region 2
pts = [
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
(x0 + self.length1 + self.length2, y0 - self.wgt_slot.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.wg_width / 2.0 + self.wgt_slot.rail,
),
]
rail1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the top rail for region 2
pts = [
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.wg_width / 2.0 - self.wgt_slot.rail,
),
(x0 + self.length1 + self.length2, y0 + self.wgt_slot.wg_width / 2.0),
]
rail2 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add a cladding polygon
pts = [
(x0, y0 + self.wgt_strip.clad_width + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.clad_width + self.wgt_slot.wg_width / 2.0,
),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.clad_width - self.wgt_slot.wg_width / 2.0,
),
(x0, y0 - self.wgt_strip.clad_width - self.wgt_strip.wg_width / 2.0),
]
clad = gdspy.Polygon(
pts, layer=self.wgt_strip.clad_layer, datatype=self.wgt_strip.clad_datatype
)
self.add(strip1)
self.add(thin_strip)
self.add(rail1)
self.add(rail2)
self.add(clad)
def __build_ports(self):
# Portlist format:
# example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}
self.portlist["input"] = {"port": (0, 0), "direction": "WEST"}
self.portlist["output"] = {
"port": (self.length1 + self.length2, 0),
"direction": "EAST",
}
if __name__ == "__main__":
from . import *
top = gdspy.Cell("top")
wgt_strip = WaveguideTemplate(bend_radius=50, wg_type="strip", wg_width=0.7)
wgt_slot = WaveguideTemplate(bend_radius=50, wg_type="slot", wg_width=0.7, slot=0.2)
wg1 = Waveguide([(0, 0), (100, 0)], wgt_strip)
tk.add(top, wg1)
ssc = StripSlotConverter(
wgt_strip,
wgt_slot,
length1=15.0,
length2=15.0,
start_rail_width=0.1,
end_strip_width=0.4,
d=1.0,
**wg1.portlist["output"]
)
tk.add(top, ssc)
(x1, y1) = ssc.portlist["output"]["port"]
wg2 = Waveguide([(x1, y1), (x1 + 100, y1)], wgt_slot)
tk.add(top, wg2)
gdspy.LayoutViewer(cells=top)
# gdspy.write_gds('StripSlotConverter.gds', unit=1.0e-6, precision=1.0e-9)
| mit |
EricForgy/JuliaBox | container/interactive/IJulia/tornado/src/gdrivesync.py | 4 | 9487 | import base64
import shutil
import os
import hashlib
import time
import datetime
import pytz
import isodate
from oauth2client.client import OAuth2Credentials
from oauth2client import GOOGLE_REVOKE_URI, GOOGLE_TOKEN_URI, GOOGLE_AUTH_URI
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
class GDriveSync:
"""Synchronizes folders from Google Drive.
Requires credentials to be provided as base64 encoded JSON representation of OAuth2Credentials, in form field gauth.
If credentials are not found, the Google authentication plugin is invoked
with state as ask_gdrive (/jboxauth/google?state=ask_gdrive). On successful
authentication and authorization, the plugin must call JuliaBox.init_gauth_tok
on the browser with appropriately formatted credentials.
"""
CREDSB64 = None
CREDS = None
GAUTH = None
DRIVE = None
LOCAL_TZ_OFFSET = 0
def __init__(self, loc):
self.loc = loc
with open(os.path.join(loc, '.gdrive')) as f:
self.gfolder = f.read().strip()
def repo_hash(self):
return hashlib.sha1('_'.join([self.loc, self.gfolder])).hexdigest()
def repo_name(self):
return os.path.basename(self.loc) + ' (' + self.gfolder + ')'
def sync(self):
self._sync_folder(self.loc, GDriveSync.folder_id(self.gfolder))
def _sync_folder(self, loc, gfolder):
# list local folder
loc_flist = {}
for f in os.listdir(loc):
if f.startswith('.'):
continue
full_path = os.path.join(loc, f)
is_dir = os.path.isdir(full_path)
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(full_path), pytz.utc)
# + datetime.timedelta(seconds=GDriveSync.LOCAL_TZ_OFFSET)
loc_flist[f] = {'fullpath': full_path, 'is_dir': is_dir, 'mtime': mtime}
# list remote folder
gdrive_flist = {}
for f in GDriveSync.DRIVE.ListFile({'q': "'" + gfolder + "' in parents and trashed=false"}).GetList():
fname = f['title']
full_path = os.path.join(loc, fname)
is_dir = ('application/vnd.google-apps.folder' in f['mimeType'])
mtime = GDriveSync.parse_gdrive_time(f['modifiedDate'])
gdrive_flist[fname] = {'fullpath': full_path, 'is_dir': is_dir, 'mtime': mtime, 'id': f['id']}
parent_spec = [{"kind": "drive#fileLink", "id": gfolder}]
# for all files in local folder
for f, attrs in loc_flist.items():
# if it is a folder
if attrs['is_dir']:
# if file not on remote create remote folder, remove file from local list, add to remote list
if f not in gdrive_flist:
gdrive_file = GDriveSync.DRIVE.CreateFile({
'title': f,
'mimeType': 'application/vnd.google-apps.folder',
'parents': parent_spec,
'modifiedDate': attrs['mtime']
})
gdrive_file.Upload()
gdrive_flist[f] = {
'fullpath': attrs['full_path'],
'is_dir': attrs['is_dir'],
'mtime': attrs['mtime'],
'id': gdrive_file['id']
}
del loc_flist[f]
else: # it is a file
# if file not on remote, upload local file, remove file from local list
if f not in gdrive_flist:
GDriveSync._upload(attrs['fullpath'], parents=parent_spec)
del loc_flist[f]
else:
gf_attrs = gdrive_flist[f]
# if file in remote is older, upload local file
tdiff = (attrs['mtime'] - gf_attrs['mtime']).total_seconds()
# print("existing file tdiff: " + str(tdiff))
if tdiff >= 1:
GDriveSync._upload(attrs['fullpath'], parents=None, remid=gf_attrs['id'])
# if file on remote is newer, download remote file
elif tdiff <= -1:
GDriveSync._download(attrs['fullpath'], gf_attrs['id'])
#else:
# print("already in sync " + attrs['fullpath'])
# remove file from both lists
del loc_flist[f]
del gdrive_flist[f]
# for files remaining in remote list
for f, gf_attrs in gdrive_flist.items():
# create local folder if it does not exist
fullpath = gf_attrs['fullpath']
if gf_attrs['is_dir']:
if not os.path.exists(fullpath):
os.makedirs(fullpath)
# download remote file, remove from remote list
else:
GDriveSync._download(fullpath, gf_attrs['id'])
del gdrive_flist[f]
# gdrive_flist should only have folders if any
# for folders remaining in remote list call _sync_folder recursively on them
for f, gf_attrs in gdrive_flist.items():
self._sync_folder(gf_attrs['fullpath'], gf_attrs['id'])
@staticmethod
def _upload(locpath, parents=None, remid=None):
fname = os.path.basename(locpath)
# print("uploading " + fname + " to " + locpath + ", parents: " + str(parents) + ", remid: " + str(remid))
gdrive_file = GDriveSync.DRIVE.CreateFile({'id': remid}) if (remid is not None) else \
GDriveSync.DRIVE.CreateFile({'title': fname, 'parents': parents})
gdrive_file.SetContentFile(locpath)
gdrive_file.Upload()
GDriveSync._sync_file_time(locpath, gdrive_file)
@staticmethod
def _download(locpath, remid):
# print("downloading " + locpath + " from " + remid)
gdrive_file = GDriveSync.DRIVE.CreateFile({'id': remid})
gdrive_file.GetContentFile(locpath)
GDriveSync._sync_file_time(locpath, gdrive_file)
@staticmethod
def _sync_file_time(locpath, gdrive_file):
gdrive_file.FetchMetadata()
mtime = GDriveSync.parse_gdrive_time(gdrive_file['modifiedDate'])
timestamp = (mtime - datetime.datetime.fromtimestamp(0, pytz.utc)).total_seconds()
# print("setting file time to " + str(mtime) + " timestamp: " + str(timestamp))
os.utime(locpath, (timestamp, timestamp))
@staticmethod
def parse_gdrive_time(tm):
if None != tm:
tm = isodate.parse_datetime(tm)
return tm
@staticmethod
def local_time_offset():
"""Return offset of local zone from GMT"""
if time.localtime().tm_isdst and time.daylight:
return time.altzone
else:
return time.timezone
@staticmethod
def init_creds(credsb64):
GDriveSync.LOCAL_TZ_OFFSET = GDriveSync.local_time_offset()
if GDriveSync.CREDSB64 == credsb64:
return
creds_json = base64.b64decode(credsb64)
creds = OAuth2Credentials.from_json(creds_json)
GDriveSync.CREDS = creds
GDriveSync.CREDSB64 = credsb64
gauth = GoogleAuth()
gauth.settings = {
'client_config_backend': 'settings',
'client_config_file': 'client_secrets.json',
'save_credentials': False,
'oauth_scope': ['https://www.googleapis.com/auth/drive'],
'client_config': {
'client_id': creds.client_id,
'client_secret': creds.client_secret,
'auth_uri': GOOGLE_AUTH_URI,
'token_uri': GOOGLE_TOKEN_URI,
'revoke_uri': GOOGLE_REVOKE_URI,
'redirect_uri': 'http://juliabox.org/jboxauth/google/'
}
}
gauth.LoadClientConfigSettings()
gauth.credentials = creds
GDriveSync.GAUTH = gauth
GDriveSync.DRIVE = GoogleDrive(gauth)
@staticmethod
def folder_name(gfolder):
return gfolder.split('/')[-2]
@staticmethod
def folder_id(gfolder):
return gfolder.split('/')[-1]
@staticmethod
def clone(gfolder, loc, overwrite=False):
if overwrite and os.path.exists(loc):
shutil.rmtree(loc)
# create the folder and .gdrive file
os.mkdir(loc)
with open(os.path.join(loc, '.gdrive'), 'w') as f:
f.write(gfolder)
GDriveSync._clone_gfolder(GDriveSync.folder_id(gfolder), loc)
return GDriveSync(loc)
@staticmethod
def _clone_gfolder(gfolder, loc):
drive = GDriveSync.DRIVE
for f in drive.ListFile({'q': "'" + gfolder + "' in parents and trashed=false"}).GetList():
fpath = os.path.join(loc, f['title'])
if 'application/vnd.google-apps.folder' in f['mimeType']:
os.mkdir(fpath)
GDriveSync._clone_gfolder(f['id'], fpath)
else:
GDriveSync._download(fpath, f['id'])
@staticmethod
def scan_repo_paths(dirs):
repos = []
for d in dirs:
for pth in os.listdir(d):
if pth.startswith('.'):
continue
fpth = os.path.join(d, pth)
if os.path.isdir(fpth):
gdrive_pth = os.path.join(fpth, '.gdrive')
if os.path.isfile(gdrive_pth):
repos.append(fpth)
return repos
| mit |
dzamie/weasyl | weasyl/blocktag.py | 1 | 4024 | # blocktag.py
from error import PostgresError
import define as d
import profile
import searchtag
from libweasyl import ratings
from weasyl.cache import region
# For blocked tags, `rating` refers to the lowest rating for which that tag is
# blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas
# (X, Y, 30) would block tag Y for only adult ratings.
def check(userid, submitid=None, charid=None, journalid=None):
"""
Returns True if the submission, character, or journal contains a search tag
that the user has blocked, else False.
"""
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['title'] in tags for b in blocked_tags)
def suggest(userid, target):
if not target:
return []
return d.execute("SELECT title FROM searchtag"
" WHERE title LIKE '%s%%' AND tagid NOT IN (SELECT tagid FROM blocktag WHERE userid = %i)"
" ORDER BY title LIMIT 10", [target, userid], options="within")
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def cached_select(userid):
return select(userid)
def insert(userid, tagid=None, title=None, rating=None):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
if tagid:
tag = int(tagid)
try:
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
except PostgresError:
return
elif title:
tag_name = d.get_search_tag(title)
try:
d.engine.execute("""
INSERT INTO blocktag (userid, tagid, rating)
VALUES (
%(user)s,
(SELECT tagid FROM searchtag WHERE title = %(tag_name)s),
%(rating)s
)
""", user=userid, tag_name=tag_name, rating=rating)
except PostgresError:
try:
tag = searchtag.create(title)
except PostgresError:
return
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
cached_select.invalidate(userid)
def remove(userid, tagid=None, title=None):
if tagid:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, %i)", [userid, tagid])
elif title:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, (SELECT tagid FROM searchtag WHERE title = '%s'))",
[userid, d.get_search_tag(title)])
cached_select.invalidate(userid)
| apache-2.0 |
phihag/youtube-dl | youtube_dl/extractor/primesharetv.py | 73 | 1853 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
sanitized_Request,
urlencode_postdata,
)
class PrimeShareTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?primeshare\.tv/download/(?P<id>[\da-zA-Z]+)'
_TEST = {
'url': 'http://primeshare.tv/download/238790B611',
'md5': 'b92d9bf5461137c36228009f31533fbc',
'info_dict': {
'id': '238790B611',
'ext': 'mp4',
'title': 'Public Domain - 1960s Commercial - Crest Toothpaste-YKsuFona',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>File not exist<' in webpage:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
headers = {
'Referer': url,
'Content-Type': 'application/x-www-form-urlencoded',
}
wait_time = int(self._search_regex(
r'var\s+cWaitTime\s*=\s*(\d+)',
webpage, 'wait time', default=7)) + 1
self._sleep(wait_time, video_id)
req = sanitized_Request(
url, urlencode_postdata(fields), headers)
video_page = self._download_webpage(
req, video_id, 'Downloading video page')
video_url = self._search_regex(
r"url\s*:\s*'([^']+\.primeshare\.tv(?::443)?/file/[^']+)'",
video_page, 'video url')
title = self._html_search_regex(
r'<h1>Watch\s*(?: )?\s*\((.+?)(?:\s*\[\.\.\.\])?\)\s*(?: )?\s*<strong>',
video_page, 'title')
return {
'id': video_id,
'url': video_url,
'title': title,
'ext': 'mp4',
}
| unlicense |
atruberg/django-custom | django/contrib/admin/templatetags/log.py | 114 | 2125 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
| bsd-3-clause |
henrytao-me/openerp.positionq | openerp/addons/pad_project/__openerp__.py | 119 | 1478 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Pad on tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds a PAD in all project kanban views.
===================================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['project', 'pad'],
'data': ['project_task.xml'],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
guewen/OpenUpgrade | openerp/service/server.py | 32 | 35650 | #-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
if os.name == 'posix':
import resource
else:
resource = None
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools.misc import stripped_sys_argv, dumpstacks
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class BaseWSGIServerNoBind(werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.modules.modules.ad_paths:
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
for i in xml_files:
for path in openerp.modules.modules.ad_paths:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module] = 1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s', i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if not stop or test_mode:
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
self.httpd.serve_forever()
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen(nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
if not self.long_polling_pid:
self.long_polling_spawn()
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
while self.workers and time.time() < limit:
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
if resource is None:
return
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = psutil.Process(os.getpid()).get_memory_info()
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest2.TestSuite()
for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = openerp.modules.module.TestStream()
result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = openerp.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = RegistryManager.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MoamerEncsConcordiaCa/tensorflow | tensorflow/python/ops/spectral_ops.py | 38 | 4414 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operators (e.g. FFT, RFFT).
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@rfft
@@irfft
@@rfft2d
@@irfft2d
@@rfft3d
@@irfft3d
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.util.all_util import remove_undocumented
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
return _array_ops.shape(input_tensor)[-fft_rank:]
# Otherwise, return a constant.
return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
def _rfft_wrapper(fft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
def _rfft(input_tensor, fft_length=None, name=None):
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)
if fft_length is None:
fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
return fft_fn(input_tensor, fft_length, name)
_rfft.__doc__ = fft_fn.__doc__
return _rfft
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""
def _irfft(input_tensor, fft_length=None, name=None):
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)
if fft_length is None:
fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
return ifft_fn(input_tensor, fft_length, name)
_irfft.__doc__ = ifft_fn.__doc__
return _irfft
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft")
irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft")
rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d")
irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d")
rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d")
irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d")
remove_undocumented(__name__)
| apache-2.0 |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/stats/_binned_statistic.py | 10 | 25912 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import callable, xrange
from scipy._lib._numpy_compat import suppress_warnings
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in xrange(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except:
null = np.nan
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/idlelib/IdleHistory.py | 122 | 4052 | "Implement Idle Shell history mechanism with History class"
from idlelib.configHandler import idleConf
class History:
''' Implement Idle Shell history mechanism.
store - Store source statement (called from PyShell.resetoutput).
fetch - Fetch stored statement matching prefix already entered.
history_next - Bound to <<history-next>> event (default Alt-N).
history_prev - Bound to <<history-prev>> event (default Alt-P).
'''
def __init__(self, text):
'''Initialize data attributes and bind event methods.
.text - Idle wrapper of tk Text widget, with .bell().
.history - source statements, possibly with multiple lines.
.prefix - source already entered at prompt; filters history list.
.pointer - index into history.
.cyclic - wrap around history list (or not).
'''
self.text = text
self.history = []
self.prefix = None
self.pointer = None
self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next)
def history_next(self, event):
"Fetch later statement; start with ealiest if cyclic."
self.fetch(reverse=False)
return "break"
def history_prev(self, event):
"Fetch earlier statement; start with most recent."
self.fetch(reverse=True)
return "break"
def fetch(self, reverse):
'''Fetch statememt and replace current line in text widget.
Set prefix and pointer as needed for successive fetches.
Reset them to None, None when returning to the start line.
Sound bell when return to start line or cannot leave a line
because cyclic is False.
'''
nhist = len(self.history)
pointer = self.pointer
prefix = self.prefix
if pointer is not None and prefix is not None:
if self.text.compare("insert", "!=", "end-1c") or \
self.text.get("iomark", "end-1c") != self.history[pointer]:
pointer = prefix = None
self.text.mark_set("insert", "end-1c") # != after cursor move
if pointer is None or prefix is None:
prefix = self.text.get("iomark", "end-1c")
if reverse:
pointer = nhist # will be decremented
else:
if self.cyclic:
pointer = -1 # will be incremented
else: # abort history_next
self.text.bell()
return
nprefix = len(prefix)
while 1:
pointer += -1 if reverse else 1
if pointer < 0 or pointer >= nhist:
self.text.bell()
if not self.cyclic and pointer < 0: # abort history_prev
return
else:
if self.text.get("iomark", "end-1c") != prefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", prefix)
pointer = prefix = None
break
item = self.history[pointer]
if item[:nprefix] == prefix and len(item) > nprefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", item)
break
self.text.see("insert")
self.text.tag_remove("sel", "1.0", "end")
self.pointer = pointer
self.prefix = prefix
def store(self, source):
"Store Shell input statement into history list."
source = source.strip()
if len(source) > 2:
# avoid duplicates
try:
self.history.remove(source)
except ValueError:
pass
self.history.append(source)
self.pointer = None
self.prefix = None
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_idlehistory', verbosity=2, exit=False)
| gpl-3.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/flask/blueprints.py | 169 | 16872 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, ``None``
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprints.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None,
root_path=None):
_PackageBoundObject.__init__(self, import_name, template_folder,
root_path=root_path)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Non-decorator version of the :meth:`errorhandler` error attach
function, akin to the :meth:`~flask.Flask.register_error_handler`
application-wide function of the :class:`~flask.Flask` object but
for error handlers limited to this blueprint.
.. versionadded:: 0.11
"""
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
| apache-2.0 |
cloudbase/nova-virtualbox | nova/virt/ironic/patcher.py | 7 | 7408 | # coding=utf-8
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Helper classes for Ironic HTTP PATCH creation.
"""
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
def create(node):
"""Create an instance of the appropriate DriverFields class.
:param node: a node object returned from ironicclient
:returns: GenericDriverFields or a subclass thereof, as appropriate
for the supplied node.
"""
if 'pxe' in node.driver:
return PXEDriverFields(node)
else:
return GenericDriverFields(node)
class GenericDriverFields(object):
def __init__(self, node):
self.node = node
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = []
patch.append({'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta['id']})
patch.append({'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)})
patch.append({'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])})
if instance.ephemeral_gb:
patch.append({'path': '/instance_info/ephemeral_gb',
'op': 'add',
'value': str(instance.ephemeral_gb)})
if CONF.default_ephemeral_format:
patch.append({'path': '/instance_info/ephemeral_format',
'op': 'add',
'value': CONF.default_ephemeral_format})
if preserve_ephemeral is not None:
patch.append({'path': '/instance_info/preserve_ephemeral',
'op': 'add', 'value': str(preserve_ephemeral)})
capabilities = {}
# read the flavor and get the extra_specs value.
extra_specs = flavor.get('extra_specs')
# scan through the extra_specs values and ignore the keys
# not starting with keyword 'capabilities'.
for key, val in six.iteritems(extra_specs):
if not key.startswith('capabilities:'):
continue
# split the extra_spec key to remove the keyword
# 'capabilities' and get the actual key.
capabilities_string, capabilities_key = key.split(':', 1)
if capabilities_key:
capabilities[capabilities_key] = val
if capabilities:
patch.append({'path': '/instance_info/capabilities',
'op': 'add', 'value': jsonutils.dumps(capabilities)})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
return []
class PXEDriverFields(GenericDriverFields):
def _get_kernel_ramdisk_dict(self, flavor):
"""Get the deploy ramdisk and kernel IDs from the flavor.
:param flavor: the flavor object.
:returns: a dict with the pxe options for the deploy ramdisk and
kernel if the IDs were found in the flavor, otherwise an empty
dict is returned.
"""
extra_specs = flavor['extra_specs']
deploy_kernel = extra_specs.get('baremetal:deploy_kernel_id')
deploy_ramdisk = extra_specs.get('baremetal:deploy_ramdisk_id')
deploy_ids = {}
if deploy_kernel and deploy_ramdisk:
deploy_ids['pxe_deploy_kernel'] = deploy_kernel
deploy_ids['pxe_deploy_ramdisk'] = deploy_ramdisk
return deploy_ids
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
Build a json-patch to add the required fields to deploy a node
using the PXE driver.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_deploy_patch(
instance, image_meta, flavor, preserve_ephemeral)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains both ramdisk
# and kernel ids, use them.
for key, value in self._get_kernel_ramdisk_dict(flavor).items():
patch.append({'path': '/driver_info/%s' % key,
'op': 'add', 'value': value})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
Build a json-patch to remove the fields used to deploy a node
using the PXE driver. Note that the fields added to the Node's
instance_info don't need to be removed because they are purged
during the Node's tear down.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_cleanup_patch(
instance, network_info, flavor)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains a ramdisk and
# kernel id remove it from nodes as part of the tear down process
for key in self._get_kernel_ramdisk_dict(flavor):
if key in self.node.driver_info:
patch.append({'op': 'remove',
'path': '/driver_info/%s' % key})
return patch
| apache-2.0 |
KyleJamesWalker/ansible | lib/ansible/modules/cloud/openstack/_quantum_subnet.py | 12 | 10250 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: quantum_subnet
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use M(os_subnet) instead.
version_added: "1.2"
short_description: Add/remove subnet from a network
description:
- Add/remove subnet from a network
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: True
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: True
auth_url:
description:
- The keystone URL for authentication
required: false
default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
required: true
default: None
name:
description:
- The name of the subnet that should be created
required: true
default: None
cidr:
description:
- The CIDR representation of the subnet that should be assigned to the subnet
required: true
default: None
tenant_name:
description:
- The name of the tenant for whom the subnet should be created
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
dns_nameservers:
description:
- DNS nameservers for this subnet, comma-separated
required: false
default: None
version_added: "1.4"
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should be allocated
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the virtual machines
required: false
default: None
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: Create a subnet for a tenant with the specified subnet
quantum_subnet:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
tenant_name: tenant1
network_name: network1
name: net1subnet
cidr: 192.168.0.0/24
'''
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
_os_keystone = None
_os_tenant_id = None
_os_network_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = " Error in connecting to neutron: %s" % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['network_name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception as e:
module.fail_json(msg="Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_subnet_id(module, neutron):
global _os_network_id
subnet_id = None
_os_network_id = _get_net_id(neutron, module)
if not _os_network_id:
module.fail_json(msg = "network id of network not found.")
else:
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception as e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _create_subnet(module, neutron):
neutron.format = 'json'
subnet = {
'name': module.params['name'],
'ip_version': module.params['ip_version'],
'enable_dhcp': module.params['enable_dhcp'],
'tenant_id': _os_tenant_id,
'gateway_ip': module.params['gateway_ip'],
'dns_nameservers': module.params['dns_nameservers'],
'network_id': _os_network_id,
'cidr': module.params['cidr'],
}
if module.params['allocation_pool_start'] and module.params['allocation_pool_end']:
allocation_pools = [
{
'start' : module.params['allocation_pool_start'],
'end' : module.params['allocation_pool_end']
}
]
subnet.update({'allocation_pools': allocation_pools})
if not module.params['gateway_ip']:
subnet.pop('gateway_ip')
if module.params['dns_nameservers']:
subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',')
else:
subnet.pop('dns_nameservers')
try:
new_subnet = neutron.create_subnet(dict(subnet=subnet))
except Exception as e:
module.fail_json(msg = "Failure in creating subnet: %s" % e.message)
return new_subnet['subnet']['id']
def _delete_subnet(module, neutron, subnet_id):
try:
neutron.delete_subnet(subnet_id)
except Exception as e:
module.fail_json( msg = "Error in deleting subnet: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
network_name = dict(required=True),
cidr = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
ip_version = dict(default='4', choices=['4', '6']),
enable_dhcp = dict(default='true', type='bool'),
gateway_ip = dict(default=None),
dns_nameservers = dict(default=None),
allocation_pool_start = dict(default=None),
allocation_pool_end = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
subnet_id = _create_subnet(module, neutron)
module.exit_json(changed = True, result = "Created" , id = subnet_id)
else:
module.exit_json(changed = False, result = "success" , id = subnet_id)
else:
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.exit_json(changed = False, result = "success")
else:
_delete_subnet(module, neutron, subnet_id)
module.exit_json(changed = True, result = "deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
IronLanguages/ironpython3 | Src/StdLib/Lib/email/encoders.py | 146 | 1786 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
from base64 import encodebytes as _bencode
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(b' ', b'=20')
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = str(_bencode(orig), 'ascii')
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload(decode=True)
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If decoding from ASCII succeeds,
# we know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.decode('ascii')
except UnicodeError:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
| apache-2.0 |
YehudaItkin/virt-test | virttest/staging/backports/__init__.py | 23 | 3497 | """
This module contains backported functions that are not present in Python 2.4
but are standard in more recent versions.
"""
import re
import sys
# Import backported modules
import simplejson
import collections
import itertools
if not hasattr(itertools, 'product'):
import _itertools
itertools.product = _itertools.product
# pylint: disable=I0011,W0622
# noinspection PyShadowingBuiltins
def _next(*args):
"""
Retrieve the next item from the iterator by calling its next() method.
If default is given, it is returned if the iterator is exhausted,
otherwise StopIteration is raised.
New in version 2.6.
:param iterator: the iterator
:type iterator: iterator
:param default: the value to return if the iterator raises StopIteration
:type default: object
:return: The object returned by iterator.next()
:rtype: object
"""
if len(args) == 2:
try:
return args[0].next()
except StopIteration:
return args[1]
elif len(args) > 2:
raise TypeError("next expected at most 2 arguments, %s" % len(args))
else:
return args[0].next()
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _any(iterable):
"""
From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods
:codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker
licensed under cc-wiki with attribution required
"""
for element in iterable:
if element:
return True
return False
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _all(iterable):
"""
From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods
:codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker
licensed under cc-wiki with attribution required
"""
for element in iterable:
if not element:
return False
return True
# Adapted from http://code.activestate.com/recipes/576847/
# :codeauthor: Vishal Sapre
# :license: MIT
BIN_HEX_DICT = {
'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100',
'5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001',
'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110',
'f': '1111', 'L': ''}
# match left leading zeroes, but don't match a single 0 for the case of
# bin(0) == '0b0'
BIN_ZSTRIP = re.compile(r'^0*(?=[01])')
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _bin(number):
"""
Adapted from http://code.activestate.com/recipes/576847/
:codeauthor: Vishal Sapre
:license: MIT
A foolishly simple look-up method of getting binary string from an integer
This happens to be faster than all other ways!!!
"""
# =========================================================
# create hex of int, remove '0x'. now for each hex char,
# look up binary string, append in list and join at the end.
# =========================================================
# replace leading left zeroes with '0b'
tmp = [BIN_HEX_DICT[hstr] for hstr in hex(number)[2:]]
return BIN_ZSTRIP.sub('0b', ''.join(tmp))
if not hasattr(__builtins__, 'next'):
next = _next
else:
next = next
if not hasattr(__builtins__, 'any'):
any = _any
else:
any = any
if not hasattr(__builtins__, 'all'):
all = _all
else:
all = all
if not hasattr(__builtins__, 'bin'):
bin = _bin
else:
bin = bin
| gpl-2.0 |
EndyKaufman/django-postgres-angularjs-blog | app/manager/migrations/0006_properties.py | 1 | 1170 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-24 14:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0005_add_fields_and_set_defaults'),
]
operations = [
migrations.CreateModel(
name='Properties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=512, unique=True)),
('value', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')),
('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit |
interfect/cjdns | node_build/dependencies/libuv/build/gyp/pylib/gyp/input.py | 292 | 114315 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| gpl-3.0 |
Andy-Amoy/shadowsocks | shadowsocks/asyncdns.py | 655 | 17416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&[email protected]', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
| apache-2.0 |
robinro/ansible-modules-extras | network/f5/bigip_sys_db.py | 23 | 5861 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_sys_db
short_description: Manage BIG-IP system database variables
description:
- Manage BIG-IP system database variables
version_added: "2.2"
options:
key:
description:
- The database variable to manipulate.
required: true
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value). When C(reset) sets the
variable back to the default value. At least one of value and state
C(reset) are required.
required: false
default: present
choices:
- present
- reset
value:
description:
- The value to set the key to. At least one of value and state C(reset)
are required.
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set the boot.quiet DB variable on the BIG-IP
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "boot.quiet"
value: "disable"
delegate_to: localhost
- name: Disable the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
value: "false"
delegate_to: localhost
- name: Reset the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
state: "reset"
delegate_to: localhost
'''
RETURN = '''
name:
description: The key in the system database that was specified
returned: changed and success
type: string
sample: "setup.run"
default_value:
description: The default value of the key
returned: changed and success
type: string
sample: "true"
value:
description: The value that you set the key to
returned: changed and success
type: string
sample: "false"
'''
try:
from f5.bigip import ManagementRoot
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpSysDb(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def flush(self):
result = dict()
state = self.params['state']
value = self.params['value']
if not state == 'reset' and not value:
raise F5ModuleError(
"When setting a key, a value must be supplied"
)
current = self.read()
if self.params['check_mode']:
if value == current:
changed = False
else:
changed = True
else:
if state == "present":
changed = self.present()
elif state == "reset":
changed = self.reset()
current = self.read()
result.update(
name=current.name,
default_value=current.defaultValue,
value=current.value
)
result.update(dict(changed=changed))
return result
def read(self):
dbs = self.api.tm.sys.dbs.db.load(
name=self.params['key']
)
return dbs
def present(self):
current = self.read()
if current.value == self.params['value']:
return False
current.update(value=self.params['value'])
current.refresh()
if current.value != self.params['value']:
raise F5ModuleError(
"Failed to set the DB variable"
)
return True
def reset(self):
current = self.read()
default = current.defaultValue
if current.value == default:
return False
current.update(value=default)
current.refresh()
if current.value != current.defaultValue:
raise F5ModuleError(
"Failed to reset the DB variable"
)
return True
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
key=dict(required=True),
state=dict(default='present', choices=['present', 'reset']),
value=dict(required=False, default=None)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpSysDb(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
bert9bert/statsmodels | statsmodels/tsa/statespace/kalman_filter.py | 2 | 86079 | """
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import (validate_vector_shape, validate_matrix_shape,
reorder_missing_matrix, reorder_missing_vector)
from . import tools
from statsmodels.tools.sm_exceptions import ValueWarning
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6
FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7
FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3
FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4
FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5
FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2
FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST = 0x01
MEMORY_NO_PREDICTED = 0x02
MEMORY_NO_FILTERED = 0x04
MEMORY_NO_LIKELIHOOD = 0x08
MEMORY_NO_GAIN = 0x10
MEMORY_NO_SMOOTHING = 0x20
MEMORY_NO_STD_FORECAST = 0x40
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING |
MEMORY_NO_STD_FORECAST
)
TIMING_INIT_PREDICTED = 0
TIMING_INIT_FILTERED = 1
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : array_like or integer
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
filter_timing
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
"""
filter_methods = [
'filter_conventional', 'filter_exact_initial', 'filter_augmented',
'filter_square_root', 'filter_univariate', 'filter_collapsed',
'filter_extended', 'filter_unscented'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL)
"""
(bool) Flag for exact initial Kalman filtering. Not implemented.
"""
filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED)
"""
(bool) Flag for augmented Kalman filtering. Not implemented.
"""
filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT)
"""
(bool) Flag for square-root Kalman filtering. Not implemented.
"""
filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE)
"""
(bool) Flag for univariate filtering of multivariate observation vector.
"""
filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED)
"""
(bool) Flag for Kalman filtering with collapsed observation vector.
"""
filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED)
"""
(bool) Flag for extended Kalman filtering. Not implemented.
"""
filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED)
"""
(bool) Flag for unscented Kalman filtering. Not implemented.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast', 'memory_no_predicted',
'memory_no_filtered', 'memory_no_likelihood', 'memory_no_gain',
'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST)
"""
(bool) Flag to prevent storing forecasts.
"""
memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED)
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED)
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN)
"""
(bool) Flag to prevent storing the Kalman gain matrices.
"""
memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_std_forecast = (
OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST))
"""
(bool) Flag to prevent storing standardized forecast errors.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
timing_options = [
'timing_init_predicted', 'timing_init_filtered'
]
timing_init_predicted = OptionWrapper('filter_timing',
TIMING_INIT_PREDICTED)
"""
(bool) Flag for the default timing convention (Durbin and Koopman, 2012).
"""
timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED)
"""
(bool) Flag for the alternate timing convention (Kim and Nelson, 2012).
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
filter_timing = TIMING_INIT_PREDICTED
"""
(int) Filter timing.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
kalman_filter_classes=None, **kwargs):
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
# Options
self.prefix_kalman_filter_map = (
kalman_filter_classes
if kalman_filter_classes is not None
else tools.prefix_kalman_filter_map.copy())
self.set_filter_method(**kwargs)
self.set_inversion_method(**kwargs)
self.set_stability_method(**kwargs)
self.set_conserve_memory(**kwargs)
self.set_filter_timing(**kwargs)
self.tolerance = tolerance
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, filter_timing=None,
loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if filter_timing is None:
filter_timing = self.filter_timing
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = self.prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, filter_timing, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.filter_timing = filter_timing
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
r"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : integer, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
FILTER_CONVENTIONAL = 0x01
Conventional Kalman filter.
FILTER_UNIVARIATE = 0x10
Univariate approach to Kalman filtering. Overrides conventional
method if both are specified.
FILTER_COLLAPSED = 0x20
Collapsed approach to Kalman filtering. Will be used *in addition*
to conventional or univariate filtering.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.filter_method
1
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
>>> mod.ssm.set_filter_method(filter_univariate=False,
... filter_collapsed=True)
>>> mod.ssm.filter_method
33
>>> mod.ssm.set_filter_method(filter_method=1)
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate
False
>>> mod.ssm.filter_collapsed
False
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
if self._compatibility_mode and not self.filter_method == 1:
raise NotImplementedError('Only conventional Kalman filtering'
' is available. Consider updating'
' dependencies for more options.')
def set_inversion_method(self, inversion_method=None, **kwargs):
r"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : integer, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE = 0x01
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU = 0x02
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU = 0x04
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY = 0x08
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY = 0x10
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- If the filtering method is specified to be univariate, then simple
division is always used regardless of the dimension of the endogenous
time series.
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.inversion_method
1
>>> mod.ssm.solve_cholesky
True
>>> mod.ssm.invert_univariate
True
>>> mod.ssm.invert_lu
False
>>> mod.ssm.invert_univariate = False
>>> mod.ssm.inversion_method
8
>>> mod.ssm.set_inversion_method(solve_cholesky=False,
... invert_cholesky=True)
>>> mod.ssm.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
r"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : integer, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.stability_method
1
>>> mod.ssm.stability_force_symmetry
True
>>> mod.ssm.stability_force_symmetry = False
>>> mod.ssm.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
r"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : integer, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL = 0
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST = 0x01
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED = 0x02
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED = 0x04
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD = 0x08
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_NO_GAIN = 0x10
Do not store the Kalman gain matrices.
MEMORY_NO_SMOOTHING = 0x20
Do not store temporary variables related to Klaman smoothing. If
this option is used, smoothing is unavailable.
MEMORY_NO_SMOOTHING = 0x20
Do not store standardized forecast errors.
MEMORY_CONSERVE
Do not store any intermediate matrices.
Note that if using a Scipy version less than 0.16, the options
MEMORY_NO_GAIN, MEMORY_NO_SMOOTHING, and MEMORY_NO_STD_FORECAST
have no effect.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm..conserve_memory
0
>>> mod.ssm.memory_no_predicted
False
>>> mod.ssm.memory_no_predicted = True
>>> mod.ssm.conserve_memory
2
>>> mod.ssm.set_conserve_memory(memory_no_filtered=True,
... memory_no_forecast=True)
>>> mod.ssm.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_filter_timing(self, alternate_timing=None, **kwargs):
r"""
Set the filter timing convention
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
Parameters
----------
alternate_timing : integer, optional
Whether or not to use the alternate timing convention. Default is
unspecified.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
"""
if alternate_timing is not None:
self.filter_timing = int(alternate_timing)
if 'timing_init_predicted' in kwargs:
self.filter_timing = int(not kwargs['timing_init_predicted'])
if 'timing_init_filtered' in kwargs:
self.filter_timing = int(kwargs['timing_init_filtered'])
if (self._compatibility_mode and
self.filter_timing == TIMING_INIT_FILTERED):
raise NotImplementedError('Only "predicted" Kalman filter'
' timing is available. Consider'
' updating dependencies for more'
' options.')
def _filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
filter_timing=None, tolerance=None, loglikelihood_burn=None,
complex_step=False):
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Initialize the state
self._initialize_state(prefix=prefix, complex_step=complex_step)
# Run the filter
kfilter()
tmp = np.array(kfilter.loglikelihood)
tmp2 = np.array(kfilter.predicted_state)
return kfilter
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, filter_timing=None,
tolerance=None, loglikelihood_burn=None, complex_step=False):
r"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
filter_timing : int, optional
Determines the timing convention of the filter. Default is that
from Durbin and Koopman (2012), in which the filter is initialized
with predicted values.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
Notes
-----
This function by default does not compute variables required for
smoothing.
"""
if conserve_memory is None:
conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING
# Run the filter
kfilter = self._filter(
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, complex_step)
tmp = np.array(kfilter.loglikelihood)
# Create the results object
results = self.results_class(self)
results.update_representation(self)
results.update_filter(kfilter)
return results
def loglike(self, **kwargs):
r"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
return np.sum(kfilter.loglikelihood[loglikelihood_burn:])
def loglikeobs(self, **kwargs):
r"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
llf_obs = np.array(kfilter.loglikelihood, copy=True)
# Set any burned observations to have zero likelihood
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
r"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
simulated_states : array
An (nsimulations x k_states) array of simulated states.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
# Check / generate measurement shocks
if measurement_shocks is not None:
measurement_shocks = np.array(measurement_shocks)
if measurement_shocks.ndim == 0:
measurement_shocks = measurement_shocks[np.newaxis, np.newaxis]
elif measurement_shocks.ndim == 1:
measurement_shocks = measurement_shocks[:, np.newaxis]
if not measurement_shocks.shape == (nsimulations, self.k_endog):
raise ValueError('Invalid shape of provided measurement'
' shocks. Required (%d, %d)'
% (nsimulations, self.k_endog))
elif self.shapes['obs_cov'][-1] == 1:
measurement_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov'],
size=nsimulations)
# Check / generate state shocks
if state_shocks is not None:
state_shocks = np.array(state_shocks)
if state_shocks.ndim == 0:
state_shocks = state_shocks[np.newaxis, np.newaxis]
elif state_shocks.ndim == 1:
state_shocks = state_shocks[:, np.newaxis]
if not state_shocks.shape == (nsimulations, self.k_posdef):
raise ValueError('Invalid shape of provided state shocks.'
' Required (%d, %d).'
% (nsimulations, self.k_posdef))
elif self.shapes['state_cov'][-1] == 1:
state_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef), cov=self['state_cov'],
size=nsimulations)
# Get the initial states
if initial_state is not None:
initial_state = np.array(initial_state)
if initial_state.ndim == 0:
initial_state = initial_state[np.newaxis]
elif (initial_state.ndim > 1 and
not initial_state.shape == (self.k_states, 1)):
raise ValueError('Invalid shape of provided initial state'
' vector. Required (%d, 1)' % self.k_states)
elif self.initialization == 'known':
initial_state = np.random.multivariate_normal(
self._initial_state, self._initial_state_cov)
elif self.initialization == 'stationary':
from scipy.linalg import solve_discrete_lyapunov
# (I - T)^{-1} c = x => (I - T) x = c
initial_state_mean = np.linalg.solve(
np.eye(self.k_states) - self['transition', :, :, 0],
self['state_intercept', :, 0])
R = self['selection', :, :, 0]
Q = self['state_cov', :, :, 0]
selected_state_cov = R.dot(Q).dot(R.T)
initial_state_cov = solve_discrete_lyapunov(
self['transition', :, :, 0], selected_state_cov)
initial_state = np.random.multivariate_normal(
initial_state_mean, initial_state_cov)
elif self.initialization == 'approximate_diffuse':
initial_state = np.zeros(self.k_states)
else:
initial_state = np.zeros(self.k_states)
return self._simulate(nsimulations, measurement_shocks, state_shocks,
initial_state)
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
time_invariant = self.time_invariant
# Holding variables for the simulations
simulated_obs = np.zeros((nsimulations, self.k_endog),
dtype=self.dtype)
simulated_states = np.zeros((nsimulations+1, self.k_states),
dtype=self.dtype)
simulated_states[0] = initial_state
# Perform iterations to create the new time series
obs_intercept_t = 0
design_t = 0
state_intercept_t = 0
transition_t = 0
selection_t = 0
for t in range(nsimulations):
# Get the current shocks (this accomodates time-varying matrices)
if measurement_shocks is None:
measurement_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t])
else:
measurement_shock = measurement_shocks[t]
if state_shocks is None:
state_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef),
cov=self['state_cov', :, :, t])
else:
state_shock = state_shocks[t]
# Get current-iteration matrices
if not time_invariant:
obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t
design_t = 0 if self.design.shape[-1] == 1 else t
state_intercept_t = (
0 if self.state_intercept.shape[-1] == 1 else t)
transition_t = 0 if self.transition.shape[-1] == 1 else t
selection_t = 0 if self.selection.shape[-1] == 1 else t
obs_intercept = self['obs_intercept', :, obs_intercept_t]
design = self['design', :, :, design_t]
state_intercept = self['state_intercept', :, state_intercept_t]
transition = self['transition', :, :, transition_t]
selection = self['selection', :, :, selection_t]
# Iterate the measurement equation
simulated_obs[t] = (
obs_intercept + np.dot(design, simulated_states[t]) +
measurement_shock)
# Iterate the state equation
simulated_states[t+1] = (
state_intercept + np.dot(transition, simulated_states[t]) +
np.dot(selection, state_shock))
return simulated_obs, simulated_states[:-1]
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
r"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
# Since the first step is the impulse itself, we actually want steps+1
steps += 1
# Check for what kind of impulse we want
if type(impulse) == int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' index of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:, :, 0])
impulse = np.dot(state_chol, impulse)
# If we have a time-invariant system, we can solve for the IRF directly
if self.time_invariant:
# Get the state space matrices
design = self.design[:, :, 0]
transition = self.transition[:, :, 0]
selection = self.selection[:, :, 0]
# Holding arrays
irf = np.zeros((steps, self.k_endog), dtype=self.dtype)
states = np.zeros((steps, self.k_states), dtype=self.dtype)
# First iteration
states[0] = np.dot(selection, impulse)
irf[0] = np.dot(design, states[0])
# Iterations
for t in range(1, steps):
states[t] = np.dot(transition, states[t-1])
irf[t] = np.dot(design, states[t])
# Otherwise, create a new model
else:
# Get the basic model components
representation = {}
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
representation[name] = getattr(self, name)
# Allow additional specification
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `irf` has been ignored.')
exception = ('Impulse response functions for models with'
' time-varying %s matrix requires an updated'
' time-varying matrix for any periods beyond those in'
' the original model.')
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], steps)
if mat.ndim < 3 or not mat.shape[2] == steps:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states,
self.k_posdef, **model_kwargs)
model.initialize_approximate_diffuse()
model._initialize_filter()
model._initialize_state()
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
# Since simulate returns the zero-th period, we need to simulate
# steps + 1 periods and exclude the zero-th observation.
steps += 1
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
irf, _ = model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)
irf = irf[1:]
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
return irf
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
filter_timing : int
Whether or not to use the alternate timing convention.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
kalman_gain : array
The Kalman gain at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn',
'converged', 'period_converged', 'filtered_state',
'filtered_state_cov', 'predicted_state', 'predicted_state_cov',
'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts',
'forecasts_error', 'forecasts_error_cov', 'llf_obs',
'collapsed_forecasts', 'collapsed_forecasts_error',
'collapsed_forecasts_error_cov',
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : boolean, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.filter_timing = kalman_filter.filter_timing
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Reset caches
has_missing = np.sum(self.nmissing) > 0
if not self._compatibility_mode and not (self.memory_no_std_forecast or
self.invert_lu or
self.solve_lu or
self.filter_collapsed):
if has_missing:
self._standardized_forecasts_error = np.array(
reorder_missing_vector(
kalman_filter.standardized_forecast_error,
self.missing, prefix=self.prefix))
else:
self._standardized_forecasts_error = np.array(
kalman_filter.standardized_forecast_error, copy=True)
else:
self._standardized_forecasts_error = None
if not self._compatibility_mode:
# In the partially missing data case, all entries will
# be in the upper left submatrix rather than the correct placement
# Re-ordering does not make sense in the collapsed case.
if has_missing and (not self.memory_no_gain and
not self.filter_collapsed):
self._kalman_gain = np.array(reorder_missing_matrix(
kalman_filter.kalman_gain, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp1 = np.array(reorder_missing_matrix(
kalman_filter.tmp1, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp2 = np.array(reorder_missing_vector(
kalman_filter.tmp2, self.missing, prefix=self.prefix))
self.tmp3 = np.array(reorder_missing_matrix(
kalman_filter.tmp3, self.missing, reorder_rows=True,
prefix=self.prefix))
self.tmp4 = np.array(reorder_missing_matrix(
kalman_filter.tmp4, self.missing, reorder_cols=True,
reorder_rows=True, prefix=self.prefix))
else:
self._kalman_gain = np.array(
kalman_filter.kalman_gain, copy=True)
self.tmp1 = np.array(kalman_filter.tmp1, copy=True)
self.tmp2 = np.array(kalman_filter.tmp2, copy=True)
self.tmp3 = np.array(kalman_filter.tmp3, copy=True)
self.tmp4 = np.array(kalman_filter.tmp4, copy=True)
else:
self._kalman_gain = None
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Save the collapsed values
self.collapsed_forecasts = None
self.collapsed_forecasts_error = None
self.collapsed_forecasts_error_cov = None
if self.filter_collapsed:
# Copy the provided arrays (which are from the collapsed dataset)
# into new variables
self.collapsed_forecasts = self.forecasts[:self.k_states, :]
self.collapsed_forecasts_error = (
self.forecasts_error[:self.k_states, :]
)
self.collapsed_forecasts_error_cov = (
self.forecasts_error_cov[:self.k_states, :self.k_states, :]
)
# Recreate the original arrays (which should be from the original
# dataset) in the appropriate dimension
self.forecasts = np.zeros((self.k_endog, self.nobs))
self.forecasts_error = np.zeros((self.k_endog, self.nobs))
self.forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs))
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
if not (self.memory_no_forecast or self.memory_no_predicted):
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] > 0:
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the
# first blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in),
# regardless of which endogenous variables they refer to
# (i.e. the non- missing endogenous variables for that
# observation). Furthermore, the forecast error covariance
# matrix is only valid for those elements. What is done is
# to set all elements to nan for these observations so that
# they are flagged as missing. The variables
# missing_forecasts, etc. then provide the forecasts, etc.
# provided by the Kalman filter, from which the data can be
# retrieved if desired.
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error[mask, t] = (
self.endog[mask, t] - self.forecasts[mask, t])
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# In the collapsed case, everything just needs to be rebuilt
# for the original observed data, since the Kalman filter
# produced these values for the collapsed data.
elif self.filter_collapsed:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = (
self.endog[:, t] - self.forecasts[:, t]
)
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
# In the case of entirely missing observations, let the Kalman
# gain be zeros.
if self.nmissing[t] == self.k_endog:
continue
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
if self.nmissing[t] == 0:
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
else:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
self._kalman_gain[:, mask, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[mask, :, design_t]),
np.linalg.inv(F[:, :, 0])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
"""
Standardized forecast errors
Notes
-----
The forecast errors produced by the Kalman filter are
.. math::
v_t \sim N(0, F_t)
Hypothesis tests are usually applied to the standardized residuals
.. math::
v_t^s = B_t v_t \sim N(0, I)
where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then
:math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t`
and :math:`L_t` are lower triangular. Finally,
:math:`B_t v_t \sim N(0, B_t F_t B_t')` and
:math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`.
Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or
:math:`L_t v_t^s = v_t`; the latter equation is the form required to
use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is
lower triangular, we can use a triangular solver (?TRTRS).
"""
if self._standardized_forecasts_error is None:
if self.k_endog == 1:
self._standardized_forecasts_error = (
self.forecasts_error /
self.forecasts_error_cov[0, 0, :]**0.5)
else:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
if self.nmissing[t] < self.k_endog:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
upper, _ = linalg.cho_factor(F[:, :, 0])
self._standardized_forecasts_error[mask, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[mask, t], trans=1))
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
r"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Cannot predict if we do not have appropriate arrays
if self.memory_no_forecast or self.memory_no_predicted:
raise ValueError('Predict is not possible if memory conservation'
' has been used to avoid storing forecasts or'
' predicted values.')
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
ndynamic = 0
if dynamic is not None:
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.', ValueWarning)
dynamic = None
elif dynamic > self.nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.', ValueWarning)
dynamic = None
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
if dynamic is not None:
ndynamic = max(0, min(end, self.nobs) - dynamic)
# Get the number of in-sample static predictions
nstatic = min(end, self.nobs) if dynamic is None else dynamic
# Construct the design and observation intercept and covariance
# matrices for start-npadded:end. If not time-varying in the original
# model, then they will be copied over if none are provided in
# `kwargs`. Otherwise additional matrices must be provided in `kwargs`.
representation = {}
for name, shape in self.shapes.items():
if name == 'obs':
continue
representation[name] = getattr(self, name)
# Update the matrices from kwargs for forecasts
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `predict` has been ignored.')
exception = ('Forecasting for models with time-varying %s matrix'
' requires an updated time-varying matrix for the'
' period to be forecasted.')
if nforecast > 0:
for name, shape in self.shapes.items():
if name == 'obs':
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
if len(shape) == 2:
validate_vector_shape(name, mat.shape,
shape[0], nforecast)
if mat.ndim < 2 or not mat.shape[1] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
else:
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], nforecast)
if mat.ndim < 3 or not mat.shape[2] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Update the matrices from kwargs for dynamic prediction in the case
# that `end` is less than `nobs` and `dynamic` is less than `end`. In
# this case, any time-varying matrices in the default `representation`
# will be too long, causing an error to be thrown below in the
# KalmanFilter(...) construction call, because the endog has length
# nstatic + ndynamic + nforecast, whereas the time-varying matrices
# from `representation` have length nobs.
if ndynamic > 0 and end < self.nobs:
for name, shape in self.shapes.items():
if not name == 'obs' and representation[name].shape[-1] > 1:
representation[name] = representation[name][..., :end]
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
else:
# Construct the new endogenous array.
endog = np.empty((self.k_endog, ndynamic + nforecast))
endog.fill(np.nan)
endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog])
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'filter_timing': self.filter_timing,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(
endog, self.k_states, self.k_posdef, **model_kwargs
)
model.initialize_known(
self.initial_state,
self.initial_state_cov
)
model._initialize_filter()
model._initialize_state()
results = self._predict(nstatic, ndynamic, nforecast, model)
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast)
def _predict(self, nstatic, ndynamic, nforecast, model):
# Note: this doesn't use self, and can either be a static method or
# moved outside the class altogether.
# Get the underlying filter
kfilter = model._kalman_filter
# Save this (which shares memory with the memoryview on which the
# Kalman filter will be operating) so that we can replace actual data
# with predicted data during dynamic forecasting
endog = model._representations[model.prefix]['obs']
for t in range(kfilter.model.nobs):
# Run the Kalman filter for the first `nstatic` periods (for
# which dynamic computation will not be performed)
if t < nstatic:
next(kfilter)
# Perform dynamic prediction
elif t < nstatic + ndynamic:
design_t = 0 if model.design.shape[2] == 1 else t
obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t
# Unconditional value is the intercept (often zeros)
endog[:, t] = model.obs_intercept[:, obs_intercept_t]
# If t > 0, then we can condition the forecast on the state
if t > 0:
# Predict endog[:, t] given `predicted_state` calculated in
# previous iteration (i.e. t-1)
endog[:, t] += np.dot(
model.design[:, :, design_t],
kfilter.predicted_state[:, t]
)
# Advance Kalman filter
next(kfilter)
# Perform any (one-step-ahead) forecasting
else:
next(kfilter)
# Return the predicted state and predicted state covariance matrices
results = FilterResults(model)
results.update_representation(model)
results.update_filter(kfilter)
return results
class PredictionResults(FilterResults):
r"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast):
# Save the filter results object
self.results = results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
| bsd-3-clause |
rjw57/cubbie | migrations/versions/316bb58e84f_add_user_identities.py | 1 | 1110 | """add user_identities
Revision ID: 316bb58e84f
Revises: 38c8ec357e0
Create Date: 2015-03-11 01:40:12.157458
"""
# revision identifiers, used by Alembic.
revision = '316bb58e84f'
down_revision = '38c8ec357e0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_identities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider', sa.Text(), nullable=False),
sa.Column('provider_user_id', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_user_identities_provider_provider_id', 'user_identities', ['provider', 'provider_user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_user_identities_provider_provider_id', table_name='user_identities')
op.drop_table('user_identities')
### end Alembic commands ###
| mit |
njl/pycon | symposion/schedule/tests/factories.py | 3 | 1750 | import datetime
import factory
import factory.fuzzy
from pycon.tests.factories import PyConTutorialProposalFactory
from symposion.conference.models import Section
from symposion.conference.tests.factories import SectionFactory
from symposion.speakers.tests.factories import SpeakerFactory
from ..models import Presentation, Slot, SlotKind, Day, Schedule
class ScheduleFactory(factory.DjangoModelFactory):
class Meta:
model = Schedule
section = factory.SubFactory(SectionFactory)
class DayFactory(factory.DjangoModelFactory):
class Meta:
model = Day
schedule = factory.SubFactory(ScheduleFactory)
date = factory.fuzzy.FuzzyDate(start_date=datetime.date(1900, 1, 1))
class SlotKindFactory(factory.DjangoModelFactory):
class Meta:
model = SlotKind
schedule = factory.SubFactory(ScheduleFactory)
label = factory.fuzzy.FuzzyText()
class SlotFactory(factory.DjangoModelFactory):
class Meta:
model = Slot
day = factory.SubFactory(DayFactory)
# .kind and .day both need to point at the same schedule
kind = factory.SubFactory(
SlotKindFactory,
schedule=factory.LazyAttribute(lambda kind: kind.factory_parent.day.schedule)
)
start = factory.LazyAttribute(lambda n: datetime.time())
end = factory.LazyAttribute(lambda n: datetime.time())
class PresentationFactory(factory.DjangoModelFactory):
class Meta:
model = Presentation
title = 'Presentation'
description = 'Description'
abstract = 'Abstract'
speaker = factory.SubFactory(SpeakerFactory)
proposal_base = factory.SubFactory(PyConTutorialProposalFactory)
section = Section.objects.get(slug='tutorials')
slot = factory.SubFactory(SlotFactory)
| bsd-3-clause |
cwgreene/Nanostructure-Simulator | utils/plot_trajectories.py | 1 | 1140 | import os
import sys
import re
import pylab
def parse_trajectory_line(line):
trajectory = []
for x,y in re.findall("\(([0-9.]+), ([0-9.]+)\)",line):
trajectory.append((float(x),float(y)))
return trajectory
def generate_trajectories(file):
#get rid fo two first lines
file.readline()
file.readline()
#parse each line
for line in file:
yield parse_trajectory_line(line)
def open_trajectory_file(n):
for filename in os.listdir("results"):
if re.match(str(n)+"traj",filename):
return open("results/"+filename)
raise "File not found"
def display_trajectories(n):
input =""
file = open_trajectory_file(n)
trajectory_gen = generate_trajectories(file)
trajectory = trajectory_gen.next()
interactive = True
i = 0
while input != 'q':
first = map(lambda x: x[0],trajectory)
second = map(lambda x: x[1],trajectory)
pylab.plot(first,second)
if interactive:
input = raw_input()
if input == "go":
i += 1
interactive=False
if i %100 == 0:
print i
raw_input()
try:
trajectory=trajectory_gen.next()
except:
print "Done"
break
if __name__=="__main__":
display_trajectories(sys.argv[1])
| mit |
guymakam/Kodi-Israel | plugin.video.israelive/resources/lib/livestreamer/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| gpl-2.0 |
danieljaouen/ansible | lib/ansible/plugins/inventory/auto.py | 25 | 2196 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: auto
plugin_type: inventory
author:
- Matt Davis <@nitzmahone>
short_description: Loads and executes an inventory plugin specified in a YAML config
description:
- By whitelisting C(auto) as the final inventory plugin, any YAML inventory config file with a
C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that
config. This effectively provides automatic whitelisting of all installed/accessible inventory plugins.
- To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element.
'''
EXAMPLES = '''
# This plugin is not intended for direct use; it is a fallback mechanism for automatic whitelisting of
# all installed inventory plugins.
'''
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.plugins.loader import inventory_loader
class InventoryModule(BaseInventoryPlugin):
NAME = 'auto'
def verify_file(self, path):
if not path.endswith('.yml') and not path.endswith('.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=True):
config_data = loader.load_from_file(path, cache=False)
plugin_name = config_data.get('plugin')
if not plugin_name:
raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path))
plugin = inventory_loader.get(plugin_name)
if not plugin:
raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name))
if not plugin.verify_file(path):
raise AnsibleParserError("inventory config '{0}' could not be verified by plugin '{1}'".format(path, plugin_name))
plugin.parse(inventory, loader, path, cache=cache)
| gpl-3.0 |
jonathansick/androcmd | scripts/phat_baseline_test.py | 1 | 3612 | #!/usr/bin/env python
# encoding: utf-8
"""
Grid computation of dust attenuation for old vs. young stellar populations.
2015-05-12 - Created by Jonathan Sick
"""
import argparse
from androcmd.phatpipeline import PhatCatalog
from androcmd.baselineexp import SolarZPipeline, ThreeZPipeline
def main():
args = parse_args()
if args.pipeline == 'solarz':
# Use the single-Z solar pipeline
Pipeline = SolarZPipeline
elif args.pipeline == 'threez':
# Use the three-metallicity track pipeline
Pipeline = ThreeZPipeline
isoc = dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang')
pipeline = Pipeline(brick=23,
root_dir=args.model_name,
isoc_args=isoc)
if args.fit is not None:
dataset = PhatCatalog(args.brick)
pipeline.fit(args.fit, [args.fit], dataset)
if args.plot_hess is not None:
from androcmd.baselineexp import plot_fit_hess_grid
dataset = PhatCatalog(args.brick)
plot_fit_hess_grid(args.plot_hess, pipeline, dataset)
if args.plot_diff is not None:
from androcmd.baselineexp import plot_diff_hess_grid
dataset = PhatCatalog(args.brick)
plot_diff_hess_grid(args.plot_diff, pipeline, dataset)
if args.plot_sfh is not None:
from androcmd.baselineexp import sfh_comparison_plot
dataset = PhatCatalog(args.brick)
sfh_comparison_plot(args.plot_sfh, pipeline, dataset)
if args.plot_zsfh is not None:
from androcmd.baselineexp import plot_sfh_metallicity_trends
dataset = PhatCatalog(args.brick)
for fit_key in args.plot_zsfh:
plot_path = "{model}_b{brick:d}_zsfh_{key}".format(
model=args.model_name, brick=args.brick, key=fit_key)
plot_sfh_metallicity_trends(plot_path, pipeline, dataset, fit_key)
if args.chi_table is not None:
from androcmd.baselineexp import tabulate_fit_chi
dataset = PhatCatalog(args.brick)
tabulate_fit_chi(args.chi_table, pipeline, dataset)
if args.plot_isoc is not None:
from androcmd.baselineexp import plot_isocs, plot_isocs_lewis
dataset = PhatCatalog(args.brick)
plot_isocs(args.plot_isoc, pipeline, dataset)
plot_isocs_lewis(args.plot_isoc + '_lewis', pipeline, dataset)
if args.plot_lock is not None:
from androcmd.baselineexp import plot_lockfile
plot_lockfile(args.plot_lock, pipeline)
def parse_args():
parser = argparse.ArgumentParser(
description="Model a brick with differential old/young dust.")
parser.add_argument('model_name')
parser.add_argument('brick', type=int)
parser.add_argument('--fit',
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'],
default=None)
parser.add_argument('--pipeline',
choices=['solarz', 'threez'],
default='solarz')
parser.add_argument('--plot-hess', default=None)
parser.add_argument('--plot-diff', default=None)
parser.add_argument('--plot-sfh', default=None)
parser.add_argument('--chi-table', default=None)
parser.add_argument('--plot-zsfh', nargs='*', default=None,
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'])
parser.add_argument('--plot-isoc', default=None)
parser.add_argument('--plot-lock', default=None)
return parser.parse_args()
if __name__ == '__main__':
main()
| mit |
vlinhd11/vlinhd11-android-scripting | python/gdata/tests/gdata_tests/codesearch_test.py | 133 | 1930 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
import gdata.codesearch
import gdata.test_data
class CodeSearchDataTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.codesearch.CodesearchFeedFromString(
gdata.test_data.CODE_SEARCH_FEED)
def testCorrectXmlConversion(self):
self.assert_(self.feed.id.text ==
'http://www.google.com/codesearch/feeds/search?q=malloc')
self.assert_(len(self.feed.entry) == 10)
for entry in self.feed.entry:
if entry.id.text == ('http://www.google.com/codesearch?hl=en&q=+ma'
'lloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1'
'&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoco'
'nf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=softwa'
're/autoconf/manual/autoconf-2.60/autoconf.html-002#first'):
self.assert_(len(entry.match) == 4)
for match in entry.match:
if match.line_number == '4':
self.assert_(match.type == 'text/html')
self.assert_(entry.file.name ==
'software/autoconf/manual/autoconf-2.60/autoconf.html-002')
self.assert_(entry.package.name == 'http://www.gnu.org')
self.assert_(entry.package.uri == 'http://www.gnu.org')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/scipy/stats/_stats_mstats_common.py | 12 | 8157 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
optimize.curve_fit : Use non-linear least squares to fit a function to data.
optimize.leastsq : Minimize the sum of squares of a set of equations.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit |
lhellebr/spacewalk | backend/server/rhnLib.py | 1 | 8211 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import hashlib
import string
import base64
import posixpath
from spacewalk.common.rhnLib import parseRPMName
from spacewalk.common.rhnLog import log_debug
from spacewalk.common.rhnException import rhnFault
# architecture work
from rhnMapping import check_package_arch
def computeSignature(*fields):
# Init the hash
m = hashlib.new('sha256')
for i in fields:
# use str(i) since some of the fields may be non-string
m.update(str(i))
return base64.encodestring(m.digest()).rstrip()
# 'n_n-n-v.v.v-r_r.r:e.ARCH.rpm' ---> [n,v,r,e,a]
def parseRPMFilename(pkgFilename):
"""
IN: Package Name: xxx-yyy-ver.ver.ver-rel.rel_rel:e.ARCH.rpm (string)
Understood rules:
o Name can have nearly any char, but end in a - (well seperated by).
Any character; may include - as well.
o Version cannot have a -, but ends in one.
o Release should be an actual number, and can't have any -'s.
o Release can include the Epoch, e.g.: 2:4 (4 is the epoch)
o Epoch: Can include anything except a - and the : seperator???
XXX: Is epoch info above correct?
OUT: [n,e,v,r, arch].
"""
if type(pkgFilename) != type(''):
raise rhnFault(21, str(pkgFilename)) # Invalid arg.
pkgFilename = os.path.basename(pkgFilename)
# Check that this is a package NAME (with arch.rpm) and strip
# that crap off.
pkg = string.split(pkgFilename, '.')
# 'rpm' at end?
if string.lower(pkg[-1]) not in ['rpm', 'deb']:
raise rhnFault(21, 'neither an rpm nor a deb package name: %s' % pkgFilename)
# Valid architecture next?
if check_package_arch(pkg[-2]) is None:
raise rhnFault(21, 'Incompatible architecture found: %s' % pkg[-2])
_arch = pkg[-2]
# Nuke that arch.rpm.
pkg = string.join(pkg[:-2], '.')
ret = list(parseRPMName(pkg))
if ret:
ret.append(_arch)
return ret
# XXX TBD where to place this function - it has to be accessible from several
# places
def normalize_server_arch(arch):
log_debug(4, 'server arch', arch)
if arch is None:
return ''
arch = str(arch)
if '-' in arch:
# Already normalized
return arch
# Fix the arch if need be
suffix = '-redhat-linux'
arch = arch + suffix
return arch
class InvalidAction(Exception):
""" An error class to signal when we can not handle an action """
pass
class EmptyAction(Exception):
""" An error class that signals that we encountered an internal error
trying to handle an action through no fault of the client
"""
pass
class ShadowAction(Exception):
""" An error class for actions that should not get to the client """
pass
def transpose_to_hash(arr, column_names):
""" Handy function to transpose an array from row-based to column-based,
with named columns.
"""
result = []
for c in column_names:
result.append([])
colnum = len(column_names)
for r in arr:
if len(r) != colnum:
raise Exception(
"Mismatching number of columns: expected %s, got %s; %s" % (
colnum, len(r), r))
for i in range(len(r)):
result[i].append(r[i])
# Now build the hash labeled with the column names
rh = {}
for i in range(len(column_names)):
rh[column_names[i]] = result[i]
return rh
def get_package_path(nevra, org_id, source=0, prepend="", omit_epoch=None,
package_type='rpm', checksum_type=None, checksum=None):
""" Computes a package path, optionally prepending a prefix
The path will look like
<prefix>/<org_id>/checksum[:3]/n/e:v-r/a/checksum/n-v-r.a.rpm if not omit_epoch
<prefix>/<org_id>/checksum[:3]/n/v-r/a/checksum/n-v-r.a.rpm if omit_epoch
"""
name, epoch, version, release, pkgarch = nevra
# dirarch and pkgarch are special-cased for source rpms
if source:
dirarch = 'SRPMS'
else:
dirarch = pkgarch
if org_id in ['', None]:
org = "NULL"
else:
org = org_id
if not omit_epoch and epoch not in [None, '']:
version = str(epoch) + ':' + version
# normpath sanitizes the path (removing duplicated / and such)
template = os.path.normpath(prepend +
"/%s/%s/%s/%s-%s/%s/%s/%s-%s-%s.%s.%s")
return template % (org, checksum[:3], name, version, release, dirarch, checksum,
name, nevra[2], release, pkgarch, package_type)
# bug #161989
# It seems that our software was written specifically for rpms in far too many
# ways. Here's a little bit of a hack function that will return the package path
# (as in from get_package_path) but without the filename appended.
# This enables us to append an arbitrary file name that is not restricted to the
# form: name-version-release.arch.type
def get_package_path_without_package_name(nevra, org_id, prepend="",
checksum_type=None, checksum=None):
"""return a package path without the package name appended"""
return os.path.dirname(get_package_path(nevra, org_id, prepend=prepend,
checksum_type=checksum_type, checksum=checksum))
class CallableObj:
""" Generic callable object """
def __init__(self, name, func):
self.func = func
self.name = name
def __call__(self, *args, **kwargs):
return self.func(self.name, *args, **kwargs)
def make_evr(nvre, source=False):
""" IN: 'e:name-version-release' or 'name-version-release:e'
OUT: {'name':name, 'version':version, 'release':release, 'epoch':epoch }
"""
if ":" in nvre:
nvr, epoch = nvre.rsplit(":", 1)
if "-" in epoch:
nvr, epoch = epoch, nvr
else:
nvr, epoch = nvre, ""
nvr_parts = nvr.rsplit("-", 2)
if len(nvr_parts) != 3:
raise rhnFault(err_code=21, err_text="NVRE is missing name, version, or release.")
result = dict(zip(["name", "version", "release"], nvr_parts))
result["epoch"] = epoch
if source and result["release"].endswith(".src"):
result["release"] = result["release"][:-4]
return result
def _is_secure_path(path):
path = posixpath.normpath(path)
return not (path.startswith('/') or path.startswith('../'))
def get_crash_path(org_id, system_id, crash):
"""For a given org_id, system_id and crash, return relative path to a crash directory."""
path = os.path.join('systems', org_id, system_id, 'crashes', crash)
if _is_secure_path(path):
return path
else:
return None
def get_crashfile_path(org_id, system_id, crash, filename):
"""For a given org_id, system_id, crash and filename, return relative path to a crash file."""
path = os.path.join(get_crash_path(org_id, system_id, crash), filename)
if _is_secure_path(path):
return path
else:
return None
def get_action_path(org_id, system_id, action_id):
"""For a given org_id, system_id, and action_id, return relative path to a store directory."""
path = os.path.join('systems', str(org_id), str(system_id), 'actions', str(action_id))
if _is_secure_path(path):
return path
def get_actionfile_path(org_id, system_id, action_id, filename):
"""For a given org_id, system_id, action_id, and file, return relative path to a file."""
path = os.path.join(get_action_path(org_id, system_id, action_id), str(filename))
if _is_secure_path(path):
return path
| gpl-2.0 |
h4ck3rm1k3/MapNickAutotools | scons/scons-local-1.2.0/SCons/Tool/suncc.py | 12 | 1857 | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 3842 2008/12/20 22:59:52 scons"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
| lgpl-2.1 |
SabunMacTavish/CTF-Platform | api/autogenerators/rtfm.py | 2 | 1943 | __author__ = "Collin Petty"
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["Collin Petty", "Peter Chapman"]
__credits__ = ["David Brumely", "Collin Petty", "Peter Chapman"]
__email__ = ["[email protected]", "[email protected]"]
__status__ = "Production"
import tempfile
import os
import random
import string
template_file = "rtfm.txt"
templates = "autogenerators/templates/"
def validate_dependencies():
print "DEPENDENCY CHECK - rtfm.py (autogen)"
if not os.path.exists(_template_path()):
print "ERROR - Read the Manual - Could not find the template file (%s)" % template_file
return False
return True
def generate():
template = open(_template_path(), 'r').read()
key = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
template = template.replace('###KEY###', key)
shift = random.randint(1, 26)
out_text = _caesar(template, shift)
output = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
output.write(out_text)
output.close()
return [os.path.abspath(output.name)], key, """<p>On the back of the broken panel you see a recovery\
<a href='###file_1_url###' target='_blank'>manual</a>. You need to find the emergency repair key in\
order to put the robot into <code>autoboot</code> mode, but it appears to be ciphered using a Caesar cipher.</p>"""
def _template_path():
return templates + template_file
def _caesar(text, shift):
ret = list()
for t in text:
t = ord(t)
if t in range(ord('a'), ord('z')+1):
ret.append(((t - ord('a') + shift) % 26) + ord('a'))
elif t in range(ord('A'), ord('Z')+1):
ret.append(((t - ord('A') + shift) % 26) + ord('A'))
elif t in range(ord('0'), ord('9')+1):
ret.append(((t - ord('0') + shift) % 10) + ord('0'))
else:
ret.append(t)
return string.joinfields(map(chr, ret), "") | mit |
esthermm/odoomrp-wip | stock_quant_valuation/models/stock_quant.py | 8 | 1040 | # -*- coding: utf-8 -*-
# (c) 2016 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, api, models
from openerp.addons import decimal_precision as dp
class StockQuant(models.Model):
_inherit = 'stock.quant'
@api.multi
@api.depends("product_id", "product_id.manual_standard_cost", "qty")
def _compute_manual_value(self):
for record in self:
record.manual_value = (record.product_id.manual_standard_cost *
record.qty)
@api.multi
@api.depends('cost', 'qty')
def _compute_real_value(self):
for record in self:
record.real_value = record.cost * record.qty
manual_value = fields.Float(
string="Manual Value", store=True, compute="_compute_manual_value",
digits=dp.get_precision('Product Price'))
real_value = fields.Float(
string="Real Value", store=True, compute="_compute_real_value",
digits=dp.get_precision('Product Price'))
| agpl-3.0 |
jy723/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/names.py | 215 | 5223 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
PRN_SEPARATOR = '/'
import re
def normalize_package_context(package_context):
package_context = package_context.strip()
while package_context.endswith(PRN_SEPARATOR):
package_context = package_context[:-1]
return package_context
#######################################################################
# RESOURCE NAMES
# resource names refer to entities in a file system
def resource_name(res_pkg_name, name, my_pkg=None):
"""
Convert package name + resource into a fully qualified resource name
@param res_pkg_name: name of package resource is located in
@type res_pkg_name: str
@param name: resource base name
@type name: str
@param my_pkg: name of package resource is being referred to
in. If specified, name will be returned in local form if
res_pkg_name is my_pkg
@type my_pkg: str
@return: name for resource
@rtype: str
"""
if res_pkg_name != my_pkg:
return res_pkg_name+PRN_SEPARATOR+name
return name
def resource_name_base(name):
"""
pkg/typeName -> typeName, typeName -> typeName
Convert fully qualified resource name into the package-less resource name
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: resource name sans package-name scope
@rtype: str
"""
return name[name.rfind(PRN_SEPARATOR)+1:]
def resource_name_package(name):
"""
pkg/typeName -> pkg, typeName -> None
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: package name of resource
@rtype: str
"""
if not PRN_SEPARATOR in name:
return None
return name[:name.find(PRN_SEPARATOR)]
def package_resource_name(name):
"""
Split a name into its package and resource name parts, e.g. 'std_msgs/String -> std_msgs, String'
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: package name, resource name
@rtype: str
@raise ValueError: if name is invalid
"""
if PRN_SEPARATOR in name:
val = tuple(name.split(PRN_SEPARATOR))
if len(val) != 2:
raise ValueError("invalid name [%s]"%name)
else:
return val
else:
return '', name
################################################################################
# NAME VALIDATORS
#ascii char followed by (alphanumeric, _, /)
RESOURCE_NAME_LEGAL_CHARS_P = re.compile('^[A-Za-z][\w_\/]*$')
def is_legal_resource_name(name):
"""
Check if name is a legal ROS name for filesystem resources
(alphabetical character followed by alphanumeric, underscore, or
forward slashes). This constraint is currently not being enforced,
but may start getting enforced in later versions of ROS.
@param name: Name
@type name: str
"""
# resource names can be unicode due to filesystem
if name is None:
return False
m = RESOURCE_NAME_LEGAL_CHARS_P.match(name)
# '//' check makes sure there isn't double-slashes
return m is not None and m.group(0) == name and not '//' in name
BASE_RESOURCE_NAME_LEGAL_CHARS_P = re.compile('^[A-Za-z][\w_]*$') #ascii char followed by (alphanumeric, _)
def is_legal_resource_base_name(name):
"""
Validates that name is a legal resource base name. A base name has
no package context, e.g. "String".
"""
# resource names can be unicode due to filesystem
if name is None:
return False
m = BASE_RESOURCE_NAME_LEGAL_CHARS_P.match(name)
return m is not None and m.group(0) == name
| gpl-3.0 |
INM-6/nest-git-migration | topology/examples/conncomp.py | 13 | 4213 | # -*- coding: utf-8 -*-
#
# conncomp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers with nodes composed of one pyramidal cell
and one interneuron. Connect with two projections, one pyr->pyr, one
pyr->in, and visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import pylab
pylab.ion()
import nest
import nest.topology as topo
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'pyr'},
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.5,
'weights': 1.0,
'delays': 1.0})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'mask': {'circular': {'radius': 1.0}},
'kernel': 0.2,
'weights': 1.0,
'delays': 1.0})
pylab.clf()
# plot targets of neurons in different grid locations
for ctr in [[15,15]]:
# obtain node id for center: pick first node of composite
ctr_id = topo.GetElement(a, ctr)
# get all projection targets of center neuron
tgts = [ci[1] for ci in nest.GetConnections(ctr_id)]
# get positions of targets
tpyr = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if nest.GetStatus([n],'model')[0]=='pyr'])))
tin = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if nest.GetStatus([n],'model')[0]=='in'])))
# scatter-plot
pylab.scatter(tpyr[0]-0.02, tpyr[1]-0.02, 20, 'b', zorder = 10)
pylab.scatter(tin[0] +0.02, tin[1] +0.02, 20, 'r', zorder = 10)
# mark locations with background grey circle
pylab.plot(tpyr[0],tpyr[1],'o',markerfacecolor=(0.7,0.7,0.7),
markersize=10,markeredgewidth=0,zorder=1,label='_nolegend_')
pylab.plot(tin[0], tin[1] ,'o',markerfacecolor=(0.7,0.7,0.7),
markersize=10,markeredgewidth=0,zorder=1,label='_nolegend_')
# mark sender position with transparent red circle
ctrpos = topo.GetPosition(ctr_id)[0]
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.15, zorder = 99,
fc = 'r', alpha = 0.4, ec = 'none'))
# mark mask positions with open red/blue circles
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.5, zorder = 2,
fc = 'none', ec = 'b', lw=3))
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=1.0, zorder = 2,
fc = 'none', ec = 'r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5,-1.5), 3.0, 3.0, zorder = 1,
fc = 'none', ec = 'k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-1.6, 1.6, -1.6, 1.6])
pylab.axes().set_aspect('equal', 'box')
| gpl-2.0 |
geomagpy/magpy | magpy/lib/format_dtu.py | 3 | 5567 | """
MagPy
Auxiliary input filter - WIC/WIK
Written by Roman Leonhardt June 2012
- contains test and read function, toDo: write function
"""
from __future__ import print_function
from magpy.stream import *
def isDTU1(filename):
"""
Checks whether a file is ASCII DTU (type1) format used within the DTU's FGE network
Characteristic features are:
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
try:
if not temp.startswith('FILENAME: '):
elem = temp.split()
if len(elem) == 6:
try:
testtime = datetime.strptime(elem[0],"%H:%M:%S")
except:
return False
else:
return False
except:
return False
return True
def readDTU1(filename, headonly=False, **kwargs):
"""
Reading DTU1 format data.
Looks like:
FILENAME: GDH4_20091215.sec
INST. TYPE: Primary magnetometer
INSTRUMENT: FGE S0120 E0192
FILTER: Electronic lowpass
ADC: ICP 7017 vers. B2.3
SOFTWARE: FG_ComData vers. 3.04
CHANNELS: 6 Time,x,y,z,T1,T2
TIME 1 hh:mm:ss PC clock, UT, timeserver
x 400 nT/V variation horizontal magnetic north in nT
y 400 nT/V variation horizontal magnetic east in nT
z 400 nT/V variation vertical in nT
T1 0 Kelvin/v no temp sensor on pendulum
T2 320 Kelvin/V electronic temp in Kelvin, sensor: AD592
DATA:
00:00:01 124.04 134.08 -17.68 0.00 291.90
00:00:02 124.00 134.00 -17.68 0.00 291.90
00:00:03 124.08 134.00 -17.64 0.00 291.90
"""
fh = open(filename, 'rt')
# read file and split text into channels
data = []
getfile = True
key = None
stream = DataStream()
# Check whether header infromation is already present
headers = {}
# get day from filename (platform independent)
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
daystring = daystring[0].split('_')
print(daystring[1])
try:
day = datetime.strftime(datetime.strptime(daystring[1] , "%Y%m%d"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
return []
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
elem = line.split()
if line.isspace():
# blank line
pass
elif line.startswith('FILENAME:'):
pass
elif line.startswith('INST. TYPE:'):
tmp = line.split(':')[1]
headers['InstrumentType'] = tmp.lstrip()
elif line.startswith('INSTRUMENT:'):
tmp = line.split(':')[1]
headers['Instrument'] = tmp.lstrip()
elif line.startswith('FILTER:'):
tmp = line.split(':')[1]
headers['Filter'] = tmp.lstrip()
elif line.startswith('ADC:'):
tmp = line.split(':')[1]
headers['ADC'] = tmp.lstrip()
elif line.startswith('SOFTWARE:'):
tmp = line.split(':')[1]
headers['Software'] = tmp.lstrip()
elif line.startswith('CHANNELS:'):
tmp = line.split(':')[1]
headers['Channels'] = tmp.lstrip()
elif line.startswith('TIME'):
pass
elif line.startswith('x'):
pass
elif line.startswith('y'):
pass
elif line.startswith('z'):
pass
elif line.startswith('T1'):
pass
elif line.startswith('T2'):
pass
elif line.startswith('DATA:'):
pass
elif headonly:
# skip data for option headonly
continue
else:
row = LineStruct()
try:
row.time=date2num(datetime.strptime(day+'T'+elem[0],"%Y-%m-%dT%H:%M:%S"))
try:
row.x = float(elem[1])
except:
row.x = float('nan')
try:
row.y = float(elem[2])
except:
row.y = float('nan')
try:
row.z = float(elem[3])
except:
row.z = float('nan')
try:
row.t1 = float(elem[4])
except:
row.t1 = float('nan')
try:
row.t2 = float(elem[5])
except:
row.t2 = float('nan')
except:
#raise ValueError, "Wrong date format in %s" % filename
pass
stream.add(row)
fh.close()
else:
headers = stream.header
stream =[]
return DataStream(stream, headers)
| bsd-3-clause |
m-r-hunt/invaders | enemies.py | 1 | 6646 | # Invaders
# Copyright (C) 2013 Maximilian Hunt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, random, pygame, projectiles, score_counter
class EnemySprite(pygame.sprite.Sprite):
# Class for one enemy invader.
def __init__(self, image, position, bullet_group):
# image: relative path to an image pygame can load
# position: (x, y) coordinates on screen
# bullet_group: pygame.sprite.Group to put fired bullets in
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.position = position
self.rect = self.image.get_rect()
self.rect.center = position
self.bullet_group = bullet_group
def update(self, dv, score, collisions):
# Update this enemy. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to increment on death
# collisions: a dictionary of collisions, possibly containing this object
# Handle any collisions given
if self in collisions:
death = False
for bullet in collisions[self]:
if (bullet.origin != self):
bullet.kill()
death = True
if (death == True):
score.increment()
self.kill()
# Update position
self.position = (self.position[0] + dv[0], self.position[1] + dv[1])
self.rect.center = self.position
def y(self):
# Return height (y coordinate).
return self.position[1]
def fire(self):
# (Possibly) fire a bullet down.
if (random.randrange(100) < 2):
bounds = (0-100, 800+100, 0-100, 600+100)
bullet = projectiles.Bullet(os.path.join("Resources", "Enemy Bullet.png"), self.position, (0, 5), bounds, self)
self.bullet_group.add(bullet)
class EnemyColumn(pygame.sprite.Group):
# Class for one column in a formation of enemies.
# Exists so we can easily fire only the lowest enemy in each column
# Remembers its own x coordinate, everything else happens inside the actual enemies
def __init__(self, x_position):
# x_position: integer x coordinate
pygame.sprite.Group.__init__(self)
self.x_position = x_position
def update(self, dv, score, collisions):
# Update this column. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to pass to contained EnemySprites
# collisions: a dictionary of collisions to pass to contained EnemySprites
# Return (x, y), x of this column and y of lowest contained Sprite.
self.x_position += dv[0]
# Update contained sprites
for i in self.sprites():
i.update(dv, score, collisions)
# Compute biggest y, ask that EnemySprite to fire.
max_y = 0
if (len(self) != 0):
for i in self.sprites():
if (i.y() > max_y):
max_y = i.y()
bottom_enemy = i
bottom_enemy.fire()
return self.x_position, max_y
class EnemyFormation(pygame.sprite.Group):
# Class for a whole formation of enemies.
# Contains both EnemyColumns and EnemySprites
# Magic numbers: Base speed stepped horizontally or vertically each frame.
H_STEP = 2
V_STEP = 10
def __init__(self, topleft, layout, bounds, bullet_group):
pygame.sprite.Group.__init__(self)
self.columns = []
columns, rows = layout
# Generate all the enemies and columns.
for i in range(0, columns):
column_x = topleft[0] + i*64
enemy_column = EnemyColumn(topleft[0] + i*64)
for j in range(0, rows):
new_enemy = EnemySprite(os.path.join("resources", "Enemy.png"), (column_x, topleft[1] + j*64), bullet_group)
enemy_column.add(new_enemy)
self.add(new_enemy)
self.columns.append(enemy_column)
# Direction: +1 for right, -1 for left (i.e. +-ve x direction)
self.current_direction = +1
self.left_bound, self.right_bound, self.bottom_bound = bounds
self.total = columns * rows
def update(self, score, collisions):
# Update this formation. Should be called once per frame.
# score: a Score to pass to contained EnemyColumns
# collisions: a dictionary of collisions to pass to contained EnemyColumns
# Returns (bool, bool). First is True if this formation is still in a good state, False if it needs resetting.
# Second is True if this is because it's now empty, False if it has reached the bottom of the screen.
direction_change = too_low = False
# Compute factor to move faster when we have fewer remaining members.
scale = int(float(self.total)/float(len(self)))
# Update columns
for i in self.columns:
x, y = i.update((scale*self.current_direction*self.H_STEP, 0), score, collisions)
# Remove empty columns
if (len(i.sprites()) == 0):
self.columns.remove(i)
# Notice if we've gone too low
elif (y > self.bottom_bound):
too_low = True
# Remember to change direction when we reach screen edges
elif (x < self.left_bound or x > self.right_bound):
direction_change = True
# Indicate we're empty
if (len(self.columns) == 0):
return False, True
# Indicate we reached the bottom of the screen.
elif too_low:
return False, False
# Drop down and change direction
elif direction_change:
self.current_direction *= -1
for i in self.columns:
i.update((scale*self.current_direction*self.H_STEP, self.V_STEP), score, [])
# If we made it here, everything's fine.
return True, True | gpl-2.0 |
MERegistro/meregistro | django/contrib/admin/templatetags/admin_list.py | 43 | 12835 | import datetime
from django.conf import settings
from django.contrib.admin.util import lookup_field, display_for_field, label_for_field
from django.contrib.admin.views.main import ALL_VAR, EMPTY_CHANGELIST_VALUE
from django.contrib.admin.views.main import ORDER_VAR, ORDER_TYPE_VAR, PAGE_VAR, SEARCH_VAR
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
register = Library()
DOT = '.'
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
paginator_number = register.simple_tag(paginator_number)
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
pagination = register.inclusion_tag('admin/pagination.html')(pagination)
def result_headers(cl):
"""
Generates the list column headers.
"""
lookup_opts = cl.lookup_opts
for i, field_name in enumerate(cl.list_display):
header, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": header,
"class_attrib": mark_safe(' class="action-checkbox-column"')
}
continue
# It is a non-field, but perhaps one that is sortable
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {"text": header}
continue
# So this _is_ a sortable non-field. Go to the yield
# after the else clause.
else:
admin_order_field = None
th_classes = []
new_order_type = 'asc'
if field_name == cl.order_field or admin_order_field == cl.order_field:
th_classes.append('sorted %sending' % cl.order_type.lower())
new_order_type = {'asc': 'desc', 'desc': 'asc'}[cl.order_type.lower()]
yield {
"text": header,
"sortable": True,
"url": cl.get_query_string({ORDER_VAR: i, ORDER_TYPE_VAR: new_order_type}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')
}
def _boolean_icon(field_val):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(u'<img src="%simg/admin/icon-%s.gif" alt="%s" />' % (settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[field_val], field_val))
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, models.DateField) or isinstance(f, models.TimeField):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if form and field_name in form.fields:
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield list(items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield list(items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
def result_list(cl):
"""
Displays the headers and data list together
"""
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': list(result_headers(cl)),
'results': list(results(cl))}
result_list = register.inclusion_tag("admin/change_list_results.html")(result_list)
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
date_hierarchy = register.inclusion_tag('admin/date_hierarchy.html')(date_hierarchy)
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
search_form = register.inclusion_tag('admin/search_form.html')(search_form)
def admin_list_filter(cl, spec):
return {'title': spec.title(), 'choices' : list(spec.choices(cl))}
admin_list_filter = register.inclusion_tag('admin/filter.html')(admin_list_filter)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
admin_actions = register.inclusion_tag("admin/actions.html", takes_context=True)(admin_actions)
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/datasets/tests/test_samples_generator.py | 3 | 7262 | import numpy as np
from numpy.testing import assert_equal, assert_approx_equal, \
assert_array_almost_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_less
from .. import make_classification
from .. import make_multilabel_classification
from .. import make_hastie_10_2
from .. import make_regression
from .. import make_blobs
from .. import make_friedman1
from .. import make_friedman2
from .. import make_friedman3
from .. import make_low_rank_matrix
from .. import make_sparse_coded_signal
from .. import make_sparse_uncorrelated
from .. import make_spd_matrix
from .. import make_swiss_roll
from .. import make_s_curve
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_multilabel_classification():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_approx_equal(np.std(y - np.dot(X, c)), 1.0, significant=2)
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_approx_equal(np.std(y - np.dot(X, c)), 1.0, significant=2)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, 10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_equal(X[:, 0], t * np.cos(t))
assert_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_equal(X[:, 0], np.sin(t))
assert_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
| agpl-3.0 |
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver | configs/sim/gscreen_custom/gscreen_handler.py | 25 | 4194 | # This is a handler file for using Gscreen's infrastructure
# to load a completely custom glade screen
# The only things that really matters is that it's saved as a GTK builder project,
# the toplevel window is caller window1 (The default name) and you connect a destroy
# window signal else you can't close down linuxcnc
class HandlerClass:
# this will be pretty standard to gain access to everything
# emc is for control and status of linuxcnc
# data is important data from gscreen and linuxcnc
# widgets is all the widgets from the glade files
# gscreen is for access to gscreens methods
#
# we added setting the gremlin DRO on from the startup,
# a global variable for the number of key presses,
# and make only the active axis buttons visible
def __init__(self, halcomp,builder,useropts,gscreen):
self.emc = gscreen.emc
self.data = gscreen.data
self.widgets = gscreen.widgets
self.gscreen = gscreen
self.nhits = 0
self.widgets.gremlin.set_property('enable_dro',True)
for i in ("x","y","z","a","b","c","u","v","w","s"):
if i in self.data.axis_list:
self.widgets["axis_%s"%i].set_visible(True)
self.widgets.offsetpage1.set_row_visible("1",False)
# This is a new method for a couple of widgets we added callbacks to.
# The argument 'widget' is a reference to the actual widget that called.
# In this way we can use this method on a bunch of widgets without knowing
# their name ahead of time.
def on_button_press(self,widget,data=None):
global nhits
self.nhits += 1
widget.set_label("hits: %d" % self.nhits)
# This method is overriden from gscreen
# We selected this method name in the glade file as a callback.
# Since this method name is the same as one in gscreen,
# gscreen won't connect a callback to it's method.
# Meaning this is the only one called.
def on_estop_clicked(self,*args):
print "estop"
if self.data.estopped:
self.emc.estop_reset(1)
else:
self.emc.machine_off(1)
self.emc.estop(1)
self.widgets.on_label.set_text("Machine Off")
return True
# This is a new method for our new button
# we selected this method name in the glade file as a callback
def on_machine_state_clicked(self,*args):
if self.data.estopped:
return
elif not self.data.machine_on:
self.emc.machine_on(1)
self.widgets.on_label.set_text("Machine On")
else:
self.emc.machine_off(1)
self.widgets.on_label.set_text("Machine Off")
# here we override gscreen's method of hiding the cursor
# by writing a method with the same name that gscreen connects a signal to.
# and our new method in fact calls a sound method and then the hide cursor method
# that are both in gscreen
# So now we get a sound when we hide and show the pointer
def on_hide_cursor(self,widget):
self.gscreen.audio.set_sound(self.data.alert_sound)
self.gscreen.audio.run()
self.gscreen.on_hide_cursor(None)
# every 100 milli seconds this gets called
# we add calls to the regular functions for the widgets we are using.
# and add any extra calls/code
def periodic(self):
self.gscreen.update_mdi_spindle_button()
self.gscreen.update_spindle_bar()
self.gscreen.update_active_gcodes()
self.gscreen.update_active_mcodes()
self.gscreen.update_aux_coolant_pins()
self.gscreen.update_feed_speed_label()
self.gscreen.update_tool_label()
self.gscreen.update_coolant_leds()
self.gscreen.update_estop_led()
self.gscreen.update_machine_on_led()
self.gscreen.update_limit_override()
self.gscreen.update_override_label()
self.gscreen.update_jog_rate_label()
self.gscreen.update_mode_label()
self.gscreen.update_units_button_label()
def get_handlers(halcomp,builder,useropts,gscreen):
return [HandlerClass(halcomp,builder,useropts,gscreen)]
| gpl-2.0 |
bowang/tensorflow | tensorflow/compiler/tests/ternary_ops_test.py | 101 | 4286 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for ternary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class TernaryOpsTest(XLATestCase):
def _testTernary(self, op, a, b, c, expected):
with self.test_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
pc = array_ops.placeholder(dtypes.as_dtype(c.dtype), c.shape, name="c")
output = op(pa, pb, pc)
result = session.run(output, {pa: a, pb: b, pc: c})
self.assertAllClose(result, expected, rtol=1e-3)
def testLinspace(self):
self._testTernary(
math_ops.linspace,
np.float32(1),
np.float32(2),
np.int32(1),
expected=np.array([1], dtype=np.float32))
self._testTernary(
math_ops.linspace,
np.float32(1),
np.float32(4),
np.int32(3),
expected=np.array([1, 2.5, 4], dtype=np.float32))
def testRange(self):
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(2),
np.int32(1),
expected=np.array([1], dtype=np.int32))
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(7),
np.int32(2),
expected=np.array([1, 3, 5], dtype=np.int32))
def testSelect(self):
self._testTernary(
array_ops.where,
np.array(0, dtype=np.bool),
np.array(2, dtype=np.float32),
np.array(7, dtype=np.float32),
expected=np.array(7, dtype=np.float32))
self._testTernary(
array_ops.where,
np.array(1, dtype=np.bool),
np.array([1, 2, 3, 4], dtype=np.float32),
np.array([5, 6, 7, 8], dtype=np.float32),
expected=np.array([1, 2, 3, 4], dtype=np.float32))
self._testTernary(
array_ops.where,
np.array(0, dtype=np.bool),
np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32),
np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32),
expected=np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32))
self._testTernary(
array_ops.where,
np.array([0, 1, 1, 0], dtype=np.bool),
np.array([1, 2, 3, 4], dtype=np.float32),
np.array([5, 6, 7, 8], dtype=np.float32),
expected=np.array([5, 2, 3, 8], dtype=np.float32))
self._testTernary(
array_ops.where,
np.array([0, 1, 0], dtype=np.bool),
np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32),
np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32),
expected=np.array([[7, 8], [3, 4], [11, 12]], dtype=np.float32))
def testSlice(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.slice,
np.array([[], [], []], dtype=dtype),
np.array([1, 0], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
expected=np.array([[], []], dtype=dtype))
self._testTernary(
array_ops.slice,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
np.array([2, 1], dtype=np.int32),
expected=np.array([[2], [5]], dtype=dtype))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
thaihungle/deepexp | rare-mann/mimic_gen.py | 1 | 5981 | import numpy as np
import os
import random
import pickle
class MimicDataLoader(object):
def __init__(self, data_folder, batch_size=1, max_sequence=10, max_iter=None, split = 0.75, train_keep=1):
super(MimicDataLoader, self).__init__()
self.data_folder = data_folder
self.batch_size = batch_size
self.num_step = max_sequence
self.max_iter = max_iter
self.num_iter = 0
self.input_map=pickle.load(open(data_folder+'/dig_map.pkl','rb'))
self.ouput_map = pickle.load(open(data_folder + '/proc_map.pkl', 'rb'))
self.all_input = pickle.load(open(data_folder+'/dig_input.pkl','rb'))
self.all_output = pickle.load(open(data_folder + '/proc_output.pkl', 'rb'))
self.output_size = self.all_output.shape[1]
if len(np.shape(self.all_output))>1:
self.all_output = np.argmax(self.all_output, axis=1)
print(self.all_output[:10])
print(self.all_output.shape)
self.num_samples=self.all_input.shape[0]
print('num samples {}'.format(self.num_samples))
lindex=list(range(self.num_samples))
# random.shuffle(lindex)
self.train_data_indexes = lindex[:int(self.num_samples*split*train_keep)]
self.test_data_indexes = lindex[int(self.num_samples*split):]
self.is_training=True
self.data_offset=0
self.input_size=self.all_input.shape[1]
print('num train samples: {}'.format(len(self.train_data_indexes)))
print('train index: {} ...'.format(self.train_data_indexes[:10]))
print('num test samples: {}'.format(len(self.test_data_indexes)))
print('test index: {} ...'.format(self.test_data_indexes[:10]))
print('num classes: {}'.format(self.output_size))
print('num steps per episode: {}'.format(self.num_step))
print('batch size: {}'.format(self.batch_size))
def fetch_all(self):
train_x=[]
train_y=[]
test_x=[]
test_y=[]
for ind in self.train_data_indexes:
train_x.append(self.all_input[ind])
train_y.append(self.all_output[ind])
for ind in self.test_data_indexes:
test_x.append(self.all_input[ind])
test_y.append(self.all_output[ind])
return np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
def fetch_batch(self, is_training=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
indexes = np.zeros((self.batch_size, self.num_step), dtype=np.int32)
for i in range(self.batch_size):
indexes[i, :] = np.random.choice(len(list_index), self.num_step, replace=False)
# print('-------------')
# print(indexes[:10])
all_inputs=[]
all_outputs=[]
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
def fetch_batch_full(self, is_training, is_rand=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
num_t = len(list_index)
indexes = np.zeros((self.batch_size, num_t),dtype=np.int32)
for i in range(self.batch_size):
if is_rand:
indexes[i, :] = np.random.choice(len(list_index), num_t, replace=False)
else:
indexes[i, :] = np.asarray(list(range(len(list_index))))
# indexes = np.zeros((self.batch_size, num_t), dtype=np.int32)
# for i in range(self.batch_size):
# indexes[i,:]=np.arange(num_t)
all_inputs=[]
all_outputs=[]
for s in range(num_t):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
# indexes just have shape (batch,)
def predict_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
def predict_online_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
| mit |
persandstrom/home-assistant | homeassistant/components/device_tracker/luci.py | 4 | 5240 | """
Support for OpenWRT (luci) routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.luci/
"""
import json
import logging
import re
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_SSL)
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean
})
class InvalidLuciTokenError(HomeAssistantError):
"""When an invalid token is detected."""
pass
def get_scanner(hass, config):
"""Validate the configuration and return a Luci scanner."""
scanner = LuciDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class LuciDeviceScanner(DeviceScanner):
"""This class queries a wireless router running OpenWrt firmware."""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
protocol = 'http' if not config[CONF_SSL] else 'https'
self.origin = '{}://{}'.format(protocol, host)
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.refresh_token()
self.mac2name = None
self.success_init = self.token is not None
def refresh_token(self):
"""Get a new token."""
self.token = _get_token(self.origin, self.username, self.password)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
url = '{}/cgi-bin/luci/rpc/uci'.format(self.origin)
result = _req_json_rpc(
url, 'get_all', 'dhcp', params={'auth': self.token})
if result:
hosts = [x for x in result.values()
if x['.type'] == 'host' and
'mac' in x and 'name' in x]
mac2name_list = [
(x['mac'].upper(), x['name']) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _req_json_rpc
return
return self.mac2name.get(device.upper(), None)
def _update_info(self):
"""Ensure the information from the Luci router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking ARP")
url = '{}/cgi-bin/luci/rpc/sys'.format(self.origin)
try:
result = _req_json_rpc(
url, 'net.arptable', params={'auth': self.token})
except InvalidLuciTokenError:
_LOGGER.info("Refreshing token")
self.refresh_token()
return False
if result:
self.last_results = []
for device_entry in result:
# Check if the Flags for each device contain
# NUD_REACHABLE and if so, add it to last_results
if int(device_entry['Flags'], 16) & 0x2:
self.last_results.append(device_entry['HW address'])
return True
return False
def _req_json_rpc(url, method, *args, **kwargs):
"""Perform one JSON RPC operation."""
data = json.dumps({'method': method, 'params': args})
try:
res = requests.post(url, data=data, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == 200:
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from luci")
return
try:
return result['result']
except KeyError:
_LOGGER.exception("No result in response from luci")
return
elif res.status_code == 401:
# Authentication error
_LOGGER.exception(
"Failed to authenticate, check your username and password")
return
elif res.status_code == 403:
_LOGGER.error("Luci responded with a 403 Invalid token")
raise InvalidLuciTokenError
else:
_LOGGER.error("Invalid response from luci: %s", res)
def _get_token(origin, username, password):
"""Get authentication token for the given configuration."""
url = '{}/cgi-bin/luci/rpc/auth'.format(origin)
return _req_json_rpc(url, 'login', username, password)
| apache-2.0 |
abadger/ansible-modules-core | network/nxos/nxos_vxlan_vtep_vni.py | 19 | 19617 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep_vni
version_added: "2.2"
short_description: Creates a Virtual Network Identifier member (VNI)
description:
- Creates a Virtual Network Identifier member (VNI) for an NVE
overlay interface.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
vni:
description:
- ID of the Virtual Network Identifier.
required: true
assoc_vrf:
description:
- This attribute is used to identify and separate processing VNIs
that are associated with a VRF and used for routing. The VRF
and VNI specified with this command must match the configuration
of the VNI under the VRF.
required: false
choices: ['true','false']
default: null
ingress_replication:
description:
- Specifies mechanism for host reachability advertisement.
required: false
choices: ['bgp','static']
default: null
multicast_group:
description:
- The multicast group (range) of the VNI. Valid values are
string and keyword 'default'.
required: false
default: null
peer_list:
description:
- Set the ingress-replication static peer list. Valid values
are an array, a space-separated string of ip addresses,
or the keyword 'default'.
required: false
default: null
suppress_arp:
description:
- Suppress arp under layer 2 VNI.
required: false
choices: ['true','false']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
include_defaults:
description:
- Specify to use or not the complete running configuration
for module operations.
required: false
default: true
choices: ['true','true']
config:
description:
- Configuration string to be used for module operations. If not
specified, the module will use the current running configuration.
required: false
default: null
save:
description:
- Specify to save the running configuration after
module operations.
required: false
default: false
choices: ['true','false']
'''
EXAMPLES = '''
- nxos_vxlan_vtep_vni:
interface: nve1
vni: 6000
ingress_replication: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"ingress_replication": "default", "interface": "nve1", "vni": "6000"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"assoc_vrf": false, "ingress_replication": "", "interface": "nve1",
"multicast_group": "", "peer_list": [],
"suppress_arp": false, "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "member vni 6000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
BOOL_PARAMS = ['suppress_arp']
PARAM_TO_COMMAND_KEYMAP = {
'assoc_vrf': 'associate-vrf',
'interface': 'interface',
'vni': 'member vni',
'ingress_replication': 'ingress-replication protocol',
'multicast_group': 'mcast-group',
'peer_list': 'peer-ip',
'suppress_arp': 'suppress-arp'
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
REGEX = re.compile(r'(?:interface nve)(?P<value>.*)$', re.M)
value = ''
if 'interface nve' in config:
value = 'nve{0}'.format(REGEX.search(config).group('value'))
return value
def get_custom_value(arg, config, module):
splitted_config = config.splitlines()
if arg == 'assoc_vrf':
value = False
if 'associate-vrf' in config:
value = True
elif arg == 'peer_list':
value = []
REGEX = re.compile(r'(?:peer-ip\s)(?P<peer_value>.*)$', re.M)
for line in splitted_config:
peer_value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line:
peer_value = REGEX.search(line).group('peer_value')
if peer_value:
value.append(peer_value)
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
custom = [
'assoc_vrf',
'peer_list'
]
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface {0}'.format(interface_exist)]
temp_config = netcfg.get_section(parents)
if 'associate-vrf' in temp_config:
parents.append('member vni {0} associate-vrf'.format(
module.params['vni']))
config = netcfg.get_section(parents)
elif 'member vni' in temp_config:
parents.append('member vni {0}'.format(module.params['vni']))
config = netcfg.get_section(parents)
else:
config = {}
if config:
for arg in args:
if arg not in ['interface', 'vni']:
if arg in custom:
existing[arg] = get_custom_value(arg, config, module)
else:
existing[arg] = get_value(arg, config, module)
existing['interface'] = interface_exist
existing['vni'] = module.params['vni']
return existing, interface_exist
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if key == 'associate-vrf':
command = 'member vni {0} {1}'.format(module.params['vni'], key)
if value:
commands.append(command)
else:
commands.append('no {0}'.format(command))
elif key == 'peer-ip' and value != 'default':
for peer in value:
commands.append('{0} {1}'.format(key, peer))
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'peer-ip':
for peer in existing_value:
commands.append('no {0} {1}'.format(key, peer))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
vni_command = 'member vni {0}'.format(module.params['vni'])
ingress_replication_command = 'ingress-replication protocol static'
interface_command = 'interface {0}'.format(module.params['interface'])
if ingress_replication_command in commands:
static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
parents = [interface_command, vni_command, ingress_replication_command]
candidate.add(static_level_cmds, parents=parents)
commands = [cmd for cmd in commands if 'peer' not in cmd]
if vni_command in commands:
parents = [interface_command]
commands.remove(vni_command)
if module.params['assoc_vrf'] is None:
parents.append(vni_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
if existing['assoc_vrf']:
commands = ['no member vni {0} associate-vrf'.format(
module.params['vni'])]
else:
commands = ['no member vni {0}'.format(module.params['vni'])]
parents = ['interface {0}'.format(module.params['interface'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
vni=dict(required=True, type='str'),
assoc_vrf=dict(required=False, type='bool'),
multicast_group=dict(required=False, type='str'),
peer_list=dict(required=False, type='list'),
suppress_arp=dict(required=False, type='bool'),
ingress_replication=dict(required=False, type='str',
choices=['bgp', 'static', 'default']),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
if module.params['assoc_vrf']:
mutually_exclusive_params = ['multicast_group',
'suppress_arp',
'ingress_replication']
for param in mutually_exclusive_params:
if module.params[param]:
module.fail_json(msg='assoc_vrf cannot be used with '
'{0} param'.format(param))
if module.params['peer_list']:
if module.params['ingress_replication'] != 'static':
module.fail_json(msg='ingress_replication=static is required '
'when using peer_list param')
else:
peer_list = module.params['peer_list']
if peer_list[0] == 'default':
module.params['peer_list'] = 'default'
else:
stripped_peer_list = map(str.strip, peer_list)
module.params['peer_list'] = stripped_peer_list
state = module.params['state']
args = [
'assoc_vrf',
'interface',
'vni',
'ingress_replication',
'multicast_group',
'peer_list',
'suppress_arp'
]
existing, interface_exist = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
if not interface_exist:
WARNINGS.append("The proposed NVE interface does not exist. "
"Use nxos_interface to create it first.")
elif interface_exist != module.params['interface']:
module.fail_json(msg='Only 1 NVE interface is allowed on '
'the switch.')
elif (existing and state == 'absent' and
existing['vni'] != module.params['vni']):
module.fail_json(msg="ERROR: VNI delete failed: Could not find"
" vni node for {0}".format(
module.params['vni']),
existing_vni=existing['vni'])
else:
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state, interface_exist = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ukanga/SickRage | lib/html5lib/sanitizer.py | 805 | 16428 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| gpl-3.0 |
openprocurement/restkit | restkit/filters.py | 2 | 3801 | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import base64
import re
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from urlparse import urlunparse
from restkit.oauth2 import Request, SignatureMethod_HMAC_SHA1
class BasicAuth(object):
""" Simple filter to manage basic authentification"""
def __init__(self, username, password):
self.credentials = (username, password)
def on_request(self, request):
encode = base64.b64encode("%s:%s" % self.credentials)
request.headers['Authorization'] = 'Basic %s' % encode
def validate_consumer(consumer):
""" validate a consumer agains oauth2.Consumer object """
if not hasattr(consumer, "key"):
raise ValueError("Invalid consumer.")
return consumer
def validate_token(token):
""" validate a token agains oauth2.Token object """
if token is not None and not hasattr(token, "key"):
raise ValueError("Invalid token.")
return token
class OAuthFilter(object):
""" oauth filter """
def __init__(self, path, consumer, token=None, method=None,
realm=""):
""" Init OAuthFilter
:param path: path or regexp. * mean all path on wicth oauth can be
applied.
:param consumer: oauth consumer, instance of oauth2.Consumer
:param token: oauth token, instance of oauth2.Token
:param method: oauth signature method
token and method signature are optionnals. Consumer should be an
instance of `oauth2.Consumer`, token an instance of `oauth2.Toke`
signature method an instance of `oauth2.SignatureMethod`.
"""
if path.endswith('*'):
self.match = re.compile("%s.*" % path.rsplit('*', 1)[0])
else:
self.match = re.compile("%s$" % path)
self.consumer = validate_consumer(consumer)
self.token = validate_token(token)
self.method = method or SignatureMethod_HMAC_SHA1()
self.realm = realm
def on_path(self, request):
path = request.parsed_url.path or "/"
return (self.match.match(path) is not None)
def on_request(self, request):
if not self.on_path(request):
return
params = {}
form = False
parsed_url = request.parsed_url
if request.body and request.body is not None:
ctype = request.headers.iget('content-type')
if ctype is not None and \
ctype.startswith('application/x-www-form-urlencoded'):
# we are in a form try to get oauth params from here
form = True
params = dict(parse_qsl(request.body))
# update params from quey parameters
params.update(parse_qsl(parsed_url.query))
raw_url = urlunparse((parsed_url.scheme, parsed_url.netloc,
parsed_url.path, '', '', ''))
oauth_req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=request.method,
http_url=raw_url, parameters=params,
is_form_encoded=form)
oauth_req.sign_request(self.method, self.consumer, self.token)
if form:
request.body = oauth_req.to_postdata()
request.headers['Content-Length'] = len(request.body)
elif request.method in ('GET', 'HEAD'):
request.original_url = request.url
request.url = oauth_req.to_url()
else:
oauth_headers = oauth_req.to_header(realm=self.realm)
request.headers.update(oauth_headers)
| apache-2.0 |
DxCx/nzbToMedia | libs/beets/ui/commands.py | 4 | 50834 | # This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module provides the default commands for beets' command-line
interface.
"""
from __future__ import print_function
import logging
import os
import time
import itertools
import codecs
import platform
import beets
from beets import ui
from beets.ui import print_, input_, decargs
from beets import autotag
from beets.autotag import recommendation
from beets.autotag import hooks
from beets import plugins
from beets import importer
from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path
from beets.util.functemplate import Template
from beets import library
from beets import config
from beets.util.confit import _package_path
# Global logger.
log = logging.getLogger('beets')
# The list of default subcommands. This is populated with Subcommand
# objects that can be fed to a SubcommandsOptionParser.
default_commands = []
# Utilities.
def _do_query(lib, query, album, also_items=True):
"""For commands that operate on matched items, performs a query
and returns a list of matching items and a list of matching
albums. (The latter is only nonempty when album is True.) Raises
a UserError if no items match. also_items controls whether, when
fetching albums, the associated items should be fetched also.
"""
if album:
albums = list(lib.albums(query))
items = []
if also_items:
for al in albums:
items += al.items()
else:
albums = []
items = list(lib.items(query))
if album and not albums:
raise ui.UserError('No matching albums found.')
elif not album and not items:
raise ui.UserError('No matching items found.')
return items, albums
# fields: Shows a list of available fields for queries and format strings.
fields_cmd = ui.Subcommand('fields',
help='show fields available for queries and format strings')
def fields_func(lib, opts, args):
def _print_rows(names):
print(" " + "\n ".join(names))
def _show_plugin_fields(album):
plugin_fields = []
for plugin in plugins.find_plugins():
if album:
fdict = plugin.album_template_fields
else:
fdict = plugin.template_fields
plugin_fields += fdict.keys()
if plugin_fields:
print("Template fields from plugins:")
_print_rows(plugin_fields)
print("Item fields:")
_print_rows(library.ITEM_KEYS)
_show_plugin_fields(False)
print("\nAlbum fields:")
_print_rows(library.ALBUM_KEYS)
_show_plugin_fields(True)
fields_cmd.func = fields_func
default_commands.append(fields_cmd)
# import: Autotagger and importer.
VARIOUS_ARTISTS = u'Various Artists'
# Importer utilities and support.
def disambig_string(info):
"""Generate a string for an AlbumInfo or TrackInfo object that
provides context that helps disambiguate similar-looking albums and
tracks.
"""
disambig = []
if info.data_source and info.data_source != 'MusicBrainz':
disambig.append(info.data_source)
if isinstance(info, hooks.AlbumInfo):
if info.media:
if info.mediums > 1:
disambig.append(u'{0}x{1}'.format(
info.mediums, info.media
))
else:
disambig.append(info.media)
if info.year:
disambig.append(unicode(info.year))
if info.country:
disambig.append(info.country)
if info.label:
disambig.append(info.label)
if info.albumdisambig:
disambig.append(info.albumdisambig)
if disambig:
return u', '.join(disambig)
def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
out = '%.1f%%' % ((1 - dist) * 100)
if dist <= config['match']['strong_rec_thresh'].as_number():
out = ui.colorize('green', out)
elif dist <= config['match']['medium_rec_thresh'].as_number():
out = ui.colorize('yellow', out)
else:
out = ui.colorize('red', out)
return out
def penalty_string(distance, limit=None):
"""Returns a colorized string that indicates all the penalties
applied to a distance object.
"""
penalties = []
for key in distance.keys():
key = key.replace('album_', '')
key = key.replace('track_', '')
key = key.replace('_', ' ')
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ['...']
return ui.colorize('yellow', '(%s)' % ', '.join(penalties))
def show_change(cur_artist, cur_album, match):
"""Print out a representation of the changes that will be made if an
album's tags are changed according to `match`, which must be an AlbumMatch
object.
"""
def show_album(artist, album):
if artist:
album_description = u' %s - %s' % (artist, album)
elif album:
album_description = u' %s' % album
else:
album_description = u' (unknown album)'
print_(album_description)
def format_index(track_info):
"""Return a string representing the track index of the given
TrackInfo or Item object.
"""
if isinstance(track_info, hooks.TrackInfo):
index = track_info.index
medium_index = track_info.medium_index
medium = track_info.medium
mediums = match.info.mediums
else:
index = medium_index = track_info.track
medium = track_info.disc
mediums = track_info.disctotal
if config['per_disc_numbering']:
if mediums > 1:
return u'{0}-{1}'.format(medium, medium_index)
else:
return unicode(medium_index)
else:
return unicode(index)
# Identify the album in question.
if cur_artist != match.info.artist or \
(cur_album != match.info.album and
match.info.album != VARIOUS_ARTISTS):
artist_l, artist_r = cur_artist or '', match.info.artist
album_l, album_r = cur_album or '', match.info.album
if artist_r == VARIOUS_ARTISTS:
# Hide artists for VA releases.
artist_l, artist_r = u'', u''
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
album_l, album_r = ui.colordiff(album_l, album_r)
print_("Correcting tags from:")
show_album(artist_l, album_l)
print_("To:")
show_album(artist_r, album_r)
else:
print_(u"Tagging:\n {0.artist} - {0.album}".format(match.info))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(info))
# Tracks.
pairs = match.mapping.items()
pairs.sort(key=lambda (_, track_info): track_info.index)
# Build up LHS and RHS for track difference display. The `lines` list
# contains ``(lhs, rhs, width)`` tuples where `width` is the length (in
# characters) of the uncolorized LHS.
lines = []
medium = disctitle = None
for item, track_info in pairs:
# Medium number and title.
if medium != track_info.medium or disctitle != track_info.disctitle:
media = match.info.media or 'Media'
if match.info.mediums > 1 and track_info.disctitle:
lhs = '%s %s: %s' % (media, track_info.medium,
track_info.disctitle)
elif match.info.mediums > 1:
lhs = '%s %s' % (media, track_info.medium)
elif track_info.disctitle:
lhs = '%s: %s' % (media, track_info.disctitle)
else:
lhs = None
if lhs:
lines.append((lhs, '', 0))
medium, disctitle = track_info.medium, track_info.disctitle
# Titles.
new_title = track_info.title
if not item.title.strip():
# If there's no title, we use the filename.
cur_title = displayable_path(os.path.basename(item.path))
lhs, rhs = cur_title, new_title
else:
cur_title = item.title.strip()
lhs, rhs = ui.colordiff(cur_title, new_title)
lhs_width = len(cur_title)
# Track number change.
cur_track, new_track = format_index(item), format_index(track_info)
if cur_track != new_track:
if item.track in (track_info.index, track_info.medium_index):
color = 'lightgray'
else:
color = 'red'
if (cur_track + new_track).count('-') == 1:
lhs_track, rhs_track = ui.colorize(color, cur_track), \
ui.colorize(color, new_track)
else:
color = 'red'
lhs_track, rhs_track = ui.color_diff_suffix(cur_track,
new_track)
templ = ui.colorize(color, u' (#') + u'{0}' + \
ui.colorize(color, u')')
lhs += templ.format(lhs_track)
rhs += templ.format(rhs_track)
lhs_width += len(cur_track) + 4
# Length change.
if item.length and track_info.length and \
abs(item.length - track_info.length) > \
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
lhs_length, rhs_length = ui.color_diff_suffix(cur_length,
new_length)
templ = ui.colorize('red', u' (') + u'{0}' + \
ui.colorize('red', u')')
lhs += templ.format(lhs_length)
rhs += templ.format(rhs_length)
lhs_width += len(cur_length) + 3
# Penalties.
penalties = penalty_string(match.distance.tracks[track_info])
if penalties:
rhs += ' %s' % penalties
if lhs != rhs:
lines.append((' * %s' % lhs, rhs, lhs_width))
elif config['import']['detail']:
lines.append((' * %s' % lhs, '', lhs_width))
# Print each track in two columns, or across two lines.
col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2
if lines:
max_width = max(w for _, _, w in lines)
for lhs, rhs, lhs_width in lines:
if not rhs:
print_(lhs)
elif max_width > col_width:
print_(u'%s ->\n %s' % (lhs, rhs))
else:
pad = max_width - lhs_width
print_(u'%s%s -> %s' % (lhs, ' ' * pad, rhs))
# Missing and unmatched tracks.
if match.extra_tracks:
print_('Missing tracks:')
for track_info in match.extra_tracks:
line = ' ! %s (#%s)' % (track_info.title, format_index(track_info))
if track_info.length:
line += ' (%s)' % ui.human_seconds_short(track_info.length)
print_(ui.colorize('yellow', line))
if match.extra_items:
print_('Unmatched tracks:')
for item in match.extra_items:
line = ' ! %s (#%s)' % (item.title, format_index(item))
if item.length:
line += ' (%s)' % ui.human_seconds_short(item.length)
print_(ui.colorize('yellow', line))
def show_item_change(item, match):
"""Print out the change that would occur by tagging `item` with the
metadata from `match`, a TrackMatch object.
"""
cur_artist, new_artist = item.artist, match.info.artist
cur_title, new_title = item.title, match.info.title
if cur_artist != new_artist or cur_title != new_title:
cur_artist, new_artist = ui.colordiff(cur_artist, new_artist)
cur_title, new_title = ui.colordiff(cur_title, new_title)
print_("Correcting track tags from:")
print_(" %s - %s" % (cur_artist, cur_title))
print_("To:")
print_(" %s - %s" % (new_artist, new_title))
else:
print_("Tagging track: %s - %s" % (cur_artist, cur_title))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(info))
def _summary_judment(rec):
"""Determines whether a decision should be made without even asking
the user. This occurs in quiet mode and when an action is chosen for
NONE recommendations. Return an action or None if the user should be
queried. May also print to the console if a summary judgment is
made.
"""
if config['import']['quiet']:
if rec == recommendation.strong:
return importer.action.APPLY
else:
action = config['import']['quiet_fallback'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
})
elif rec == recommendation.none:
action = config['import']['none_rec_action'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
'ask': None,
})
else:
return None
if action == importer.action.SKIP:
print_('Skipping.')
elif action == importer.action.ASIS:
print_('Importing as-is.')
return action
def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
objects depending on `singleton`. for albums, `cur_artist`,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
Returns the result of the choice, which may SKIP, ASIS, TRACKS, or
MANUAL or a candidate (an AlbumMatch/TrackMatch object).
"""
# Sanity check.
if singleton:
assert item is not None
else:
assert cur_artist is not None
assert cur_album is not None
# Zero candidates.
if not candidates:
if singleton:
print_("No matching recordings found.")
opts = ('Use as-is', 'Skip', 'Enter search', 'enter Id',
'aBort')
else:
print_("No matching release found for {0} tracks."
.format(itemcount))
print_('For help, see: '
'http://beets.readthedocs.org/en/latest/faq.html#nomatch')
opts = ('Use as-is', 'as Tracks', 'Group albums', 'Skip',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts)
if sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 's':
return importer.action.SKIP
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
else:
assert False
# Is the change good enough?
bypass_candidates = False
if rec != recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec <= recommendation.low
if not bypass_candidates:
# Display list of candidates.
print_(u'Finding tags for {0} "{1} - {2}".'.format(
u'track' if singleton else u'album',
item.artist if singleton else cur_artist,
item.title if singleton else cur_album,
))
print_(u'Candidates:')
for i, match in enumerate(candidates):
# Index, metadata, and distance.
line = [
u'{0}.'.format(i + 1),
u'{0} - {1}'.format(
match.info.artist,
match.info.title if singleton else match.info.album,
),
u'({0})'.format(dist_string(match.distance)),
]
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
line.append(penalties)
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
line.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(line))
# Ask the user for a choice.
if singleton:
opts = ('Skip', 'Use as-is', 'Enter search', 'enter Id',
'aBort')
else:
opts = ('Skip', 'Use as-is', 'as Tracks', 'Group albums',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts, numrange=(1, len(candidates)))
if sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 'm':
pass
elif sel == 'e':
return importer.action.MANUAL
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
# When choosing anything but the first match,
# disable the default action.
require = True
bypass_candidates = False
# Show what we're about to do.
if singleton:
show_item_change(item, match)
else:
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == recommendation.strong and not config['import']['timid']:
return match
# Ask for confirmation.
if singleton:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'Enter search', 'enter Id', 'aBort')
else:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'as Tracks', 'Group albums', 'Enter search', 'enter Id',
'aBort')
default = config['import']['default_action'].as_choice({
'apply': 'a',
'skip': 's',
'asis': 'u',
'none': None,
})
if default is None:
require = True
sel = ui.input_options(opts, require=require, default=default)
if sel == 'a':
return match
elif sel == 'g':
return importer.action.ALBUMS
elif sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
def manual_search(singleton):
"""Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_('Artist:')
name = input_('Track:' if singleton else 'Album:')
return artist.strip(), name.strip()
def manual_id(singleton):
"""Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = u'Enter {0} ID:'.format('recording' if singleton else 'release')
return input_(prompt).strip()
class TerminalImportSession(importer.ImportSession):
"""An import session that runs in a terminal.
"""
def choose_match(self, task):
"""Given an initial autotagging of items, go through an interactive
dance with the user to ask for a choice of metadata. Returns an
AlbumMatch object, ASIS, or SKIP.
"""
# Show what we're tagging.
print_()
print_(displayable_path(task.paths, u'\n') +
u' ({0} items)'.format(len(task.items)))
# Take immediate action if appropriate.
action = _summary_judment(task.rec)
if action == importer.action.APPLY:
match = task.candidates[0]
show_change(task.cur_artist, task.cur_album, match)
return match
elif action is not None:
return action
# Loop until we have a choice.
candidates, rec = task.candidates, task.rec
while True:
# Ask for a choice from the user.
choice = choose_candidate(candidates, False, rec, task.cur_artist,
task.cur_album, itemcount=len(task.items))
# Choose which tags to use.
if choice in (importer.action.SKIP, importer.action.ASIS,
importer.action.TRACKS, importer.action.ALBUMS):
# Pass selection to main control flow.
return choice
elif choice is importer.action.MANUAL:
# Try again with manual search terms.
search_artist, search_album = manual_search(False)
_, _, candidates, rec = autotag.tag_album(
task.items, search_artist, search_album
)
elif choice is importer.action.MANUAL_ID:
# Try a manually-entered ID.
search_id = manual_id(False)
if search_id:
_, _, candidates, rec = autotag.tag_album(
task.items, search_id=search_id
)
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
assert isinstance(choice, autotag.AlbumMatch)
return choice
def choose_item(self, task):
"""Ask the user for a choice about tagging a single item. Returns
either an action constant or a TrackMatch object.
"""
print_()
print_(task.item.path)
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
action = _summary_judment(task.rec)
if action == importer.action.APPLY:
match = candidates[0]
show_item_change(task.item, match)
return match
elif action is not None:
return action
while True:
# Ask for a choice.
choice = choose_candidate(candidates, True, rec, item=task.item)
if choice in (importer.action.SKIP, importer.action.ASIS):
return choice
elif choice == importer.action.TRACKS:
assert False # TRACKS is only legal for albums.
elif choice == importer.action.MANUAL:
# Continue in the loop with a new set of candidates.
search_artist, search_title = manual_search(True)
candidates, rec = autotag.tag_item(task.item, search_artist,
search_title)
elif choice == importer.action.MANUAL_ID:
# Ask for a track ID.
search_id = manual_id(True)
if search_id:
candidates, rec = autotag.tag_item(task.item,
search_id=search_id)
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
return choice
def resolve_duplicate(self, task):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warn("This %s is already in the library!" %
("album" if task.is_album else "item"))
if config['import']['quiet']:
# In quiet mode, don't prompt -- just skip.
log.info('Skipping.')
sel = 's'
else:
sel = ui.input_options(
('Skip new', 'Keep both', 'Remove old')
)
if sel == 's':
# Skip new.
task.set_choice(importer.action.SKIP)
elif sel == 'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif sel == 'r':
# Remove old.
task.remove_duplicates = True
else:
assert False
def should_resume(self, path):
return ui.input_yn(u"Import of the directory:\n{0}\n"
"was interrupted. Resume (Y/n)?"
.format(displayable_path(path)))
# The import command.
def import_files(lib, paths, query):
"""Import the files in the given list of paths or matching the
query.
"""
# Check the user-specified directories.
for path in paths:
fullpath = syspath(normpath(path))
if not config['import']['singletons'] and not os.path.isdir(fullpath):
raise ui.UserError(u'not a directory: {0}'.format(
displayable_path(path)))
elif config['import']['singletons'] and not os.path.exists(fullpath):
raise ui.UserError(u'no such file: {0}'.format(
displayable_path(path)))
# Check parameter consistency.
if config['import']['quiet'] and config['import']['timid']:
raise ui.UserError("can't be both quiet and timid")
# Open the log.
if config['import']['log'].get() is not None:
logpath = config['import']['log'].as_filename()
try:
logfile = codecs.open(syspath(logpath), 'a', 'utf8')
except IOError:
raise ui.UserError(u"could not open log file for writing: %s" %
displayable_path(logpath))
print(u'import started', time.asctime(), file=logfile)
else:
logfile = None
# Never ask for input in quiet mode.
if config['import']['resume'].get() == 'ask' and \
config['import']['quiet']:
config['import']['resume'] = False
session = TerminalImportSession(lib, logfile, paths, query)
try:
session.run()
finally:
# If we were logging, close the file.
if logfile:
print(u'', file=logfile)
logfile.close()
# Emit event.
plugins.send('import', lib=lib, paths=paths)
import_cmd = ui.Subcommand('import', help='import new music',
aliases=('imp', 'im'))
import_cmd.parser.add_option('-c', '--copy', action='store_true',
default=None, help="copy tracks into library directory (default)")
import_cmd.parser.add_option('-C', '--nocopy', action='store_false',
dest='copy', help="don't copy tracks (opposite of -c)")
import_cmd.parser.add_option('-w', '--write', action='store_true',
default=None, help="write new metadata to files' tags (default)")
import_cmd.parser.add_option('-W', '--nowrite', action='store_false',
dest='write', help="don't write metadata (opposite of -w)")
import_cmd.parser.add_option('-a', '--autotag', action='store_true',
dest='autotag', help="infer tags for imported files (default)")
import_cmd.parser.add_option('-A', '--noautotag', action='store_false',
dest='autotag',
help="don't infer tags for imported files (opposite of -a)")
import_cmd.parser.add_option('-p', '--resume', action='store_true',
default=None, help="resume importing if interrupted")
import_cmd.parser.add_option('-P', '--noresume', action='store_false',
dest='resume', help="do not try to resume importing")
import_cmd.parser.add_option('-q', '--quiet', action='store_true',
dest='quiet', help="never prompt for input: skip albums instead")
import_cmd.parser.add_option('-l', '--log', dest='log',
help='file to log untaggable albums for later review')
import_cmd.parser.add_option('-s', '--singletons', action='store_true',
help='import individual tracks instead of full albums')
import_cmd.parser.add_option('-t', '--timid', dest='timid',
action='store_true', help='always confirm all actions')
import_cmd.parser.add_option('-L', '--library', dest='library',
action='store_true', help='retag items matching a query')
import_cmd.parser.add_option('-i', '--incremental', dest='incremental',
action='store_true', help='skip already-imported directories')
import_cmd.parser.add_option('-I', '--noincremental', dest='incremental',
action='store_false', help='do not skip already-imported directories')
import_cmd.parser.add_option('--flat', dest='flat',
action='store_true', help='import an entire tree as a single album')
import_cmd.parser.add_option('-g', '--group-albums', dest='group_albums',
action='store_true', help='group tracks in a folder into seperate albums')
def import_func(lib, opts, args):
config['import'].set_args(opts)
# Special case: --copy flag suppresses import_move (which would
# otherwise take precedence).
if opts.copy:
config['import']['move'] = False
if opts.library:
query = decargs(args)
paths = []
else:
query = None
paths = args
if not paths:
raise ui.UserError('no path specified')
import_files(lib, paths, query)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
tmpl = Template(ui._pick_format(album, fmt))
if album:
for album in lib.albums(query):
ui.print_obj(album, lib, tmpl)
else:
for item in lib.items(query):
ui.print_obj(item, lib, tmpl)
list_cmd = ui.Subcommand('list', help='query the library', aliases=('ls',))
list_cmd.parser.add_option('-a', '--album', action='store_true',
help='show matching albums instead of tracks')
list_cmd.parser.add_option('-p', '--path', action='store_true',
help='print paths for matched items or albums')
list_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def list_func(lib, opts, args):
if opts.path:
fmt = '$path'
else:
fmt = opts.format
list_items(lib, decargs(args), opts.album, fmt)
list_cmd.func = list_func
default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
"""
with lib.transaction():
items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes.
affected_albums = set()
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
ui.print_obj(item, lib)
ui.print_(ui.colorize('red', u' deleted'))
if not pretend:
item.remove(True)
affected_albums.add(item.album_id)
continue
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug(u'skipping %s because mtime is up to date (%i)' %
(displayable_path(item.path), item.mtime))
continue
# Read new data.
try:
item.read()
except Exception as exc:
log.error(u'error reading {0}: {1}'.format(
displayable_path(item.path), exc))
continue
# Special-case album artist when it matches track artist. (Hacky
# but necessary for preserving album-level metadata for non-
# autotagged imports.)
if not item.albumartist:
old_item = lib.get_item(item.id)
if old_item.albumartist == old_item.artist == item.artist:
item.albumartist = old_item.albumartist
item._dirty.discard('albumartist')
# Check for and display changes.
changed = ui.show_model_changes(item,
fields=library.ITEM_KEYS_META)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move()
item.store()
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store()
# Skip album changes while pretending.
if pretend:
return
# Modify affected albums to reflect changes in their items.
for album_id in affected_albums:
if album_id is None: # Singletons.
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
log.debug('emptied album %i' % album_id)
continue
first_item = album.items().get()
# Update album structure to reflect an item in it.
for key in library.ALBUM_KEYS_ITEM:
album[key] = first_item[key]
album.store()
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug('moving album %i' % album_id)
album.move()
update_cmd = ui.Subcommand('update',
help='update the library', aliases=('upd','up',))
update_cmd.parser.add_option('-a', '--album', action='store_true',
help='match albums instead of tracks')
update_cmd.parser.add_option('-M', '--nomove', action='store_false',
default=True, dest='move', help="don't move files in library")
update_cmd.parser.add_option('-p', '--pretend', action='store_true',
help="show all changes but do nothing")
update_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def update_func(lib, opts, args):
update_items(lib, decargs(args), opts.album, opts.move, opts.pretend)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
# Show all the items.
for item in items:
ui.print_obj(item, lib)
# Confirm with user.
print_()
if delete:
prompt = 'Really DELETE %i files (y/n)?' % len(items)
else:
prompt = 'Really remove %i items from the library (y/n)?' % \
len(items)
if not ui.input_yn(prompt, True):
return
# Remove (and possibly delete) items.
with lib.transaction():
for obj in (albums if album else items):
obj.remove(delete)
remove_cmd = ui.Subcommand('remove',
help='remove matching items from the library', aliases=('rm',))
remove_cmd.parser.add_option("-d", "--delete", action="store_true",
help="also remove files from disk")
remove_cmd.parser.add_option('-a', '--album', action='store_true',
help='match albums instead of tracks')
def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete)
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
# stats: Show library/query statistics.
def show_stats(lib, query, exact):
"""Shows some statistics about the matched items."""
items = lib.items(query)
total_size = 0
total_time = 0.0
total_items = 0
artists = set()
albums = set()
for item in items:
if exact:
total_size += os.path.getsize(item.path)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
total_items += 1
artists.add(item.artist)
albums.add(item.album)
size_str = '' + ui.human_bytes(total_size)
if exact:
size_str += ' ({0} bytes)'.format(total_size)
print_("""Tracks: {0}
Total time: {1} ({2:.2f} seconds)
Total size: {3}
Artists: {4}
Albums: {5}""".format(total_items, ui.human_seconds(total_time), total_time,
size_str, len(artists), len(albums)))
stats_cmd = ui.Subcommand('stats',
help='show statistics about the library or a query')
stats_cmd.parser.add_option('-e', '--exact', action='store_true',
help='get exact file sizes')
def stats_func(lib, opts, args):
show_stats(lib, decargs(args), opts.exact)
stats_cmd.func = stats_func
default_commands.append(stats_cmd)
# version: Show current beets version.
def show_version(lib, opts, args):
print_('beets version %s' % beets.__version__)
# Show plugins.
names = [p.name for p in plugins.find_plugins()]
if names:
print_('plugins:', ', '.join(names))
else:
print_('no plugins loaded')
version_cmd = ui.Subcommand('version',
help='output version information')
version_cmd.func = show_version
default_commands.append(version_cmd)
# modify: Declaratively change metadata.
def modify_items(lib, mods, dels, query, write, move, album, confirm):
"""Modifies matching items according to key=value assignments."""
# Parse key=value specifications into a dictionary.
model_cls = library.Album if album else library.Item
fsets = {}
for mod in mods:
key, value = mod.split('=', 1)
fsets[key] = model_cls._parse(key, value)
# Get the items to modify.
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Apply changes *temporarily*, preview them, and collect modified
# objects.
print_('Modifying %i %ss.' % (len(objs), 'album' if album else 'item'))
changed = set()
for obj in objs:
for field, value in fsets.iteritems():
obj[field] = value
for field in dels:
del obj[field]
if ui.show_model_changes(obj):
changed.add(obj)
# Still something to do?
if not changed:
print_('No changes to make.')
return
# Confirm action.
if confirm:
extra = ' and write tags' if write else ''
if not ui.input_yn('Really modify%s (Y/n)?' % extra):
return
# Apply changes to database.
with lib.transaction():
for obj in changed:
if move:
cur_path = obj.path
if lib.directory in ancestry(cur_path): # In library?
log.debug('moving object %s' % cur_path)
obj.move()
obj.store()
# Apply tags if requested.
if write:
if album:
changed_items = itertools.chain(*(a.items() for a in changed))
else:
changed_items = changed
for item in changed_items:
try:
item.write()
except library.FileOperationError as exc:
log.error(exc)
modify_cmd = ui.Subcommand('modify',
help='change metadata fields', aliases=('mod',))
modify_cmd.parser.add_option('-M', '--nomove', action='store_false',
default=True, dest='move', help="don't move files in library")
modify_cmd.parser.add_option('-w', '--write', action='store_true',
default=None, help="write new metadata to files' tags (default)")
modify_cmd.parser.add_option('-W', '--nowrite', action='store_false',
dest='write', help="don't write metadata (opposite of -w)")
modify_cmd.parser.add_option('-a', '--album', action='store_true',
help='modify whole albums instead of tracks')
modify_cmd.parser.add_option('-y', '--yes', action='store_true',
help='skip confirmation')
modify_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def modify_func(lib, opts, args):
args = decargs(args)
mods = []
dels = []
query = []
for arg in args:
if arg.endswith('!') and '=' not in arg and ':' not in arg:
dels.append(arg[:-1])
elif '=' in arg:
mods.append(arg)
else:
query.append(arg)
if not mods and not dels:
raise ui.UserError('no modifications specified')
write = opts.write if opts.write is not None else \
config['import']['write'].get(bool)
modify_items(lib, mods, dels, query, write, opts.move, opts.album,
not opts.yes)
modify_cmd.func = modify_func
default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
"""
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
action = 'Copying' if copy else 'Moving'
entity = 'album' if album else 'item'
log.info('%s %i %ss.' % (action, len(objs), entity))
for obj in objs:
log.debug('moving: %s' % obj.path)
obj.move(copy, basedir=dest)
obj.store()
move_cmd = ui.Subcommand('move',
help='move or copy items', aliases=('mv',))
move_cmd.parser.add_option('-d', '--dest', metavar='DIR', dest='dest',
help='destination directory')
move_cmd.parser.add_option('-c', '--copy', default=False, action='store_true',
help='copy instead of moving')
move_cmd.parser.add_option('-a', '--album', default=False, action='store_true',
help='match whole albums instead of tracks')
def move_func(lib, opts, args):
dest = opts.dest
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(dest):
raise ui.UserError('no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album)
move_cmd.func = move_func
default_commands.append(move_cmd)
# write: Write tags into files.
def write_items(lib, query, pretend):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = _do_query(lib, query, False, False)
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
log.info(u'missing file: {0}'.format(
util.displayable_path(item.path)
))
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except Exception as exc:
log.error(u'error reading {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
# Check for and display changes.
changed = ui.show_model_changes(item, clean_item,
library.ITEM_KEYS_WRITABLE, always=True)
if changed and not pretend:
try:
item.write()
except library.FileOperationError as exc:
log.error(exc)
write_cmd = ui.Subcommand('write', help='write tag information to files')
write_cmd.parser.add_option('-p', '--pretend', action='store_true',
help="show all changes but do nothing")
def write_func(lib, opts, args):
write_items(lib, decargs(args), opts.pretend)
write_cmd.func = write_func
default_commands.append(write_cmd)
# config: Show and edit user configuration.
config_cmd = ui.Subcommand('config',
help='show or edit the user configuration')
config_cmd.parser.add_option('-p', '--paths', action='store_true',
help='show files that configuration was loaded from')
config_cmd.parser.add_option('-e', '--edit', action='store_true',
help='edit user configuration with $EDITOR')
config_cmd.parser.add_option('-d', '--defaults', action='store_true',
help='include the default configuration')
def config_func(lib, opts, args):
# Make sure lazy configuration is loaded
config.resolve()
# Print paths.
if opts.paths:
filenames = []
for source in config.sources:
if not opts.defaults and source.default:
continue
if source.filename:
filenames.append(source.filename)
# In case the user config file does not exist, prepend it to the
# list.
user_path = config.user_config_path()
if user_path not in filenames:
filenames.insert(0, user_path)
for filename in filenames:
print(filename)
# Open in editor.
elif opts.edit:
path = config.user_config_path()
if 'EDITOR' in os.environ:
editor = os.environ['EDITOR']
args = [editor, editor, path]
elif platform.system() == 'Darwin':
args = ['open', 'open', '-n', path]
elif platform.system() == 'Windows':
# On windows we can execute arbitrary files. The os will
# take care of starting an appropriate application
args = [path, path]
else:
# Assume Unix
args = ['xdg-open', 'xdg-open', path]
try:
os.execlp(*args)
except OSError:
raise ui.UserError("Could not edit configuration. Please"
"set the EDITOR environment variable.")
# Dump configuration.
else:
print(config.dump(full=opts.defaults))
config_cmd.func = config_func
default_commands.append(config_cmd)
# completion: print completion script
completion_cmd = ui.Subcommand('completion',
help='print shell script that provides command line completion')
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print(line, end='')
if not (os.path.isfile(u'/etc/bash_completion') or
os.path.isfile(u'/usr/share/bash-completion/bash_completion') or
os.path.isfile(u'/usr/share/local/bash-completion/bash_completion')):
log.warn(u'Warning: Unable to find the bash-completion package. '
u'Command line completion might not work.')
def completion_script(commands):
"""Yield the full completion shell script as strings.
``commands`` is alist of ``ui.Subcommand`` instances to generate
completion data for.
"""
base_script = os.path.join(_package_path('beets.ui'), 'completion_base.sh')
with open(base_script, 'r') as base_script:
yield base_script.read()
options = {}
aliases = {}
command_names = []
# Collect subcommands
for cmd in commands:
name = cmd.name
command_names.append(name)
for alias in cmd.aliases:
aliases[alias] = name
options[name] = {'flags': [], 'opts': []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'):
option_type = 'flags'
else:
option_type = 'opts'
options[name][option_type].extend(
opts._short_opts + opts._long_opts
)
# Add global options
options['_global'] = {
'flags': ['-v', '--verbose'],
'opts': '-l --library -c --config -d --directory -h --help'.split(' ')
}
# Help subcommand
command_names.append('help')
# Add flags common to all commands
options['_common'] = {
'flags': ['-h', '--help']
}
# Start generating the script
yield "_beet() {\n"
# Command names
yield " local commands='%s'\n" % ' '.join(command_names)
yield "\n"
# Command aliases
yield " local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items():
yield " local alias__%s=%s\n" % (alias, cmd)
yield '\n'
# Fields
yield " fields='%s'\n" % ' '.join(
set(library.ITEM_KEYS + library.ALBUM_KEYS))
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = ' '.join(option_list)
yield " local %s__%s='%s'\n" % (option_type, cmd, option_list)
yield ' _beet_dispatch\n'
yield '}\n'
completion_cmd.func = print_completion
completion_cmd.hide = True
default_commands.append(completion_cmd)
| gpl-3.0 |