repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
gdsfactory/gdsfactory | pp/components/coupler.py | 1 | 2755 | import pp
from pp.component import Component
from pp.components.coupler_straight import coupler_straight
from pp.components.coupler_symmetric import coupler_symmetric
from pp.cross_section import get_waveguide_settings
from pp.snap import assert_on_1nm_grid
from pp.types import ComponentFactory
@pp.cell_with_validator
def coupler(
gap: float = 0.236,
length: float = 20.0,
coupler_symmetric_factory: ComponentFactory = coupler_symmetric,
coupler_straight_factory: ComponentFactory = coupler_straight,
dy: float = 5.0,
dx: float = 10.0,
waveguide: str = "strip",
**kwargs
) -> Component:
r"""Symmetric coupler.
Args:
gap: between straights
length: of coupling region
coupler_symmetric_factory
coupler_straight_factory
dy: port to port vertical spacing
dx: length of bend in x direction
waveguide: from tech.waveguide
kwargs: overwrites waveguide_settings
.. code::
dx dx
|------| |------|
W1 ________ _______E1
\ / |
\ length / |
======================= gap | dy
/ \ |
________/ \_______ |
W0 E0
coupler_straight_factory coupler_symmetric_factory
"""
assert_on_1nm_grid(length)
assert_on_1nm_grid(gap)
c = Component()
waveguide_settings = get_waveguide_settings(waveguide, **kwargs)
sbend = coupler_symmetric_factory(gap=gap, dy=dy, dx=dx, **waveguide_settings)
sr = c << sbend
sl = c << sbend
cs = c << coupler_straight_factory(length=length, gap=gap, **waveguide_settings)
sl.connect("W1", destination=cs.ports["W0"])
sr.connect("W0", destination=cs.ports["E0"])
c.add_port("W1", port=sl.ports["E0"])
c.add_port("W0", port=sl.ports["E1"])
c.add_port("E0", port=sr.ports["E0"])
c.add_port("E1", port=sr.ports["E1"])
c.absorb(sl)
c.absorb(sr)
c.absorb(cs)
c.length = sbend.length
c.min_bend_radius = sbend.min_bend_radius
return c
if __name__ == "__main__":
# c = pp.Component()
# cp1 = c << coupler(gap=0.2)
# cp2 = c << coupler(gap=0.5)
# cp1.ymin = 0
# cp2.ymin = 0
# c = coupler(gap=0.2, waveguide="nitride")
# c = coupler(width=0.9, length=1, dy=2, gap=0.2)
# print(c.settings_changed)
c = coupler(gap=0.2, waveguide="nitride")
# c = coupler(gap=0.2, waveguide="strip_heater")
c.show()
| mit | 505,920,847,848,893,060 | 29.955056 | 84 | 0.52559 | false |
Murali-group/GraphSpace | applications/uniprot/models.py | 1 | 1246 | from __future__ import unicode_literals
from sqlalchemy import ForeignKeyConstraint, text
from applications.users.models import *
from django.conf import settings
from graphspace.mixins import *
Base = settings.BASE
# ================== Table Definitions =================== #
class UniprotAlias(IDMixin, TimeStampMixin, Base):
__tablename__ = 'uniprot_alias'
accession_number = Column(String, nullable=False)
alias_source = Column(String, nullable=False)
alias_name = Column(String, nullable=False)
constraints = (
UniqueConstraint('accession_number', 'alias_source', 'alias_name', name='_uniprot_alias_uc_accession_number_alias_source_alias_name'),
)
indices = (
Index('uniprot_alias_idx_accession_number', text("accession_number gin_trgm_ops"), postgresql_using="gin"),
Index('uniprot_alias_idx_alias_name', text("alias_name gin_trgm_ops"), postgresql_using="gin"),
)
@declared_attr
def __table_args__(cls):
args = cls.constraints + cls.indices
return args
def serialize(cls, **kwargs):
return {
# 'id': cls.id,
'id': cls.accession_number,
'alias_source': cls.alias_source,
'alias_name': cls.alias_name,
'created_at': cls.created_at.isoformat(),
'updated_at': cls.updated_at.isoformat()
}
| gpl-2.0 | 1,488,446,659,459,923,500 | 27.976744 | 136 | 0.695024 | false |
rohithredd94/Computer-Vision-using-OpenCV | Particle-Filter-Tracking/PF_Tracker.py | 1 | 4110 | import cv2
import numpy as mp
from similarity import *
from hist import *
class PF_Tracker:
def __init__(self, model, search_space, num_particles=100, state_dims=2,
control_std=10, sim_std=20, alpha=0.0):
self.model = model
self.search_space = search_space[::-1]
self.num_particles = num_particles
self.state_dims = state_dims
self.control_std = control_std
self.sim_std = sim_std
self.alpha = alpha
#Initialize particles using a uniform distribution
self.particles = np.array([np.random.uniform(0, self.search_space[i],self.num_particles) for i in range(self.state_dims)]).T
self.weights = np.ones(len(self.particles)) / len(self.particles)
self.idxs = np.arange(num_particles)
self.estimate_state()
def update(self, frame):
self.displace()
self.observe(frame)
self.resample()
self.estimate_state()
if self.alpha > 0:
self.update_model(frame)
def displace(self):
#Displace particles using a normal distribution centered around 0
self.particles += np.random.normal(0, self.control_std,
self.particles.shape)
def observe(self, img):
#Get patches corresponding to each particle
mh, mw = self.model.shape[:2]
minx = (self.particles[:,0] - mw/2).astype(np.int)
miny = (self.particles[:,1] - mh/2).astype(np.int)
candidates = [img[miny[i]:miny[i]+mh, minx[i]:minx[i]+mw]
for i in range(self.num_particles)]
#Compute importance weight - similarity of each patch to the model
self.weights = np.array([similarity(cand, self.model, self.sim_std) for cand in candidates])
self.weights /= np.sum(self.weights)
def resample(self):
sw, sh = self.search_space[:2]
mh, mw = self.model.shape[:2]
j = np.random.choice(self.idxs, self.num_particles, True,
p=self.weights.T) #Sample new particle indices using the distribution of the weights
control = np.random.normal(0, self.control_std, self.particles.shape) #Get a random control input from a normal distribution
self.particles = np.array(self.particles[j])
self.particles[:,0] = np.clip(self.particles[:,0], 0, sw - 1)
self.particles[:,1] = np.clip(self.particles[:,1], 0, sh - 1)
def estimate_state(self):
state_idx = np.random.choice(self.idxs, 1, p=self.weights)
self.state = self.particles[state_idx][0]
def update_model(self, frame):
#Get current model based on belief
mh, mw = self.model.shape[:2]
minx = int(self.state[0] - mw/2)
miny = int(self.state[1] - mh/2)
best_model = frame[miny:miny+mh, minx:minx+mw]
#Apply appearance model update if new model shape is unchanged
if best_model.shape == self.model.shape:
self.model = self.alpha * best_model + (1-self.alpha) * self.model
self.model = self.model.astype(np.uint8)
def visualize_filter(self, img):
self.draw_particles(img)
self.draw_window(img)
self.draw_std(img)
def draw_particles(self, img):
for p in self.particles:
cv2.circle(img, tuple(p.astype(int)), 2, (180,255,0), -1)
def draw_window(self, img):
best_idx = cv2.minMaxLoc(self.weights)[3][1]
best_state = self.particles[best_idx]
pt1 = (best_state - np.array(self.model.shape[::-1])/2).astype(np.int)
pt2 = pt1 + np.array(self.model.shape[::-1])
cv2.rectangle(img, tuple(pt1), tuple(pt2), (0,255,0), 2)
def draw_std(self, img):
weighted_sum = 0
dist = np.linalg.norm(self.particles - self.state)
weighted_sum = np.sum(dist * self.weights.reshape((-1,1)))
cv2.circle(img, tuple(self.state.astype(np.int)),
int(weighted_sum), (255,255,255), 1) | mit | -8,084,760,379,983,337,000 | 37.92233 | 132 | 0.584672 | false |
weso/CWR-DataApi | tests/grammar/factory/record/test_publisher_territory.py | 1 | 4905 | # -*- coding: utf-8 -*-
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR Publisher Territory of Control (SPT) grammar tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestPublisherTerritoryGrammar(unittest.TestCase):
"""
Tests that the NPN grammar decodes correctly formatted strings
"""
def setUp(self):
self.grammar = get_record_grammar('publisher_territory')
def test_valid_common(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT000001790000054770 013330133301333I0484Y001'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(179, result.transaction_sequence_n)
self.assertEqual(547, result.record_sequence_n)
self.assertEqual('70', result.ip_n)
self.assertEqual(13.33, result.pr_collection_share)
self.assertEqual(13.33, result.mr_collection_share)
self.assertEqual(13.33, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(484, result.tis_numeric_code)
self.assertEqual(True, result.shares_change)
self.assertEqual(1, result.sequence_n)
def test_valid_common_short(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT0000000100000002160694172 050000500000000I0484N01'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(1, result.transaction_sequence_n)
self.assertEqual(2, result.record_sequence_n)
self.assertEqual('160694172', result.ip_n)
self.assertEqual(50, result.pr_collection_share)
self.assertEqual(50, result.mr_collection_share)
self.assertEqual(0, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(484, result.tis_numeric_code)
self.assertEqual(False, result.shares_change)
self.assertEqual(1, result.sequence_n)
def test_valid_full(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT0000123400000023A12345678 010120500002520I0008Y012'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('A12345678', result.ip_n)
self.assertEqual(10.12, result.pr_collection_share)
self.assertEqual(50, result.mr_collection_share)
self.assertEqual(25.2, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(8, result.tis_numeric_code)
self.assertEqual(True, result.shares_change)
self.assertEqual(12, result.sequence_n)
def test_valid_min(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT0000000100000001 000000000000000I0008Y012'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(1, result.transaction_sequence_n)
self.assertEqual(1, result.record_sequence_n)
self.assertEqual(None, result.ip_n)
self.assertEqual(0, result.pr_collection_share)
self.assertEqual(0, result.mr_collection_share)
self.assertEqual(0, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(8, result.tis_numeric_code)
self.assertEqual(True, result.shares_change)
self.assertEqual(12, result.sequence_n)
class TestPublisherTerritoryGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('publisher_territory')
def test_empty(self):
"""
Tests that a exception is thrown when the the works number is zero.
"""
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
| mit | 8,518,170,453,060,950,000 | 36.435115 | 102 | 0.681077 | false |
dc3-plaso/plaso | tests/storage/fake_storage.py | 1 | 6205 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the fake storage."""
import unittest
from plaso.containers import errors
from plaso.containers import event_sources
from plaso.containers import reports
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.lib import definitions
from plaso.storage import fake_storage
from plaso.storage import zip_file
from tests import test_lib as shared_test_lib
from tests.storage import test_lib
class FakeStorageWriterTest(test_lib.StorageTestCase):
"""Tests for the fake storage writer object."""
def testAddAnalysisReport(self):
"""Tests the AddAnalysisReport function."""
session = sessions.Session()
analysis_report = reports.AnalysisReport(
plugin_name=u'test', text=u'test report')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddAnalysisReport(analysis_report)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddAnalysisReport(analysis_report)
def testAddError(self):
"""Tests the AddError function."""
session = sessions.Session()
extraction_error = errors.ExtractionError(
message=u'Test extraction error')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddError(extraction_error)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddError(extraction_error)
def testAddEvent(self):
"""Tests the AddEvent function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEvent(event)
def testAddEventSource(self):
"""Tests the AddEventSource function."""
session = sessions.Session()
event_source = event_sources.EventSource()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddEventSource(event_source)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventSource(event_source)
def testAddEventTag(self):
"""Tests the AddEventTag function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
event_tags = self._CreateTestEventTags()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
for event in test_events:
storage_writer.AddEvent(event)
event_tag = None
for event_tag in event_tags:
storage_writer.AddEventTag(event_tag)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventTag(event_tag)
def testOpenClose(self):
"""Tests the Open and Close functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
storage_writer.Close()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.Open()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.Close()
# TODO: add test for GetEvents.
# TODO: add test for GetFirstWrittenEventSource and
# GetNextWrittenEventSource.
@shared_test_lib.skipUnlessHasTestFile([u'psort_test.json.plaso'])
@shared_test_lib.skipUnlessHasTestFile([u'pinfo_test.json.plaso'])
def testMergeFromStorage(self):
"""Tests the MergeFromStorage function."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
test_file = self._GetTestFilePath([u'psort_test.json.plaso'])
storage_reader = zip_file.ZIPStorageFileReader(test_file)
storage_writer.MergeFromStorage(storage_reader)
test_file = self._GetTestFilePath([u'pinfo_test.json.plaso'])
storage_reader = zip_file.ZIPStorageFileReader(test_file)
storage_writer.MergeFromStorage(storage_reader)
storage_writer.Close()
# TODO: add test for GetNextEventSource.
def testWriteSessionStartAndCompletion(self):
"""Tests the WriteSessionStart and WriteSessionCompletion functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.WriteSessionStart()
storage_writer.WriteSessionCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer.Close()
def testWriteTaskStartAndCompletion(self):
"""Tests the WriteTaskStart and WriteTaskCompletion functions."""
session = sessions.Session()
task = tasks.Task(session_identifier=session.identifier)
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
storage_writer.Open()
storage_writer.WriteTaskStart()
storage_writer.WriteTaskCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer.Close()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,102,286,598,320,775,000 | 27.204545 | 75 | 0.72361 | false |
ella/django-ratings | django_ratings/aggregation.py | 1 | 1768 | """
This file is for aggregation records from Rating,Agg tables to Agg and TotalRate table
"""
import logging
from datetime import datetime, timedelta
from django_ratings.models import Rating, Agg, TotalRate
logger = logging.getLogger('django_ratings')
# aggregate ratings older than 2 years by year
DELTA_TIME_YEAR = 2*365*24*60*60
# ratings older than 2 months by month
DELTA_TIME_MONTH = 2*30*24*60*60
# rest of the ratings (last 2 months) aggregate daily
DELTA_TIME_DAY = -24*60*60
TIMES_ALL = {DELTA_TIME_YEAR : 'year', DELTA_TIME_MONTH : 'month', DELTA_TIME_DAY : 'day'}
def transfer_agg_to_totalrate():
"""
Transfer aggregation data from table Agg to table TotalRate
"""
logger.info("transfer_agg_to_totalrate BEGIN")
if TotalRate.objects.count() != 0:
TotalRate.objects.all().delete()
Agg.objects.agg_to_totalrate()
logger.info("transfer_agg_to_totalrate END")
def transfer_agg_to_agg():
"""
aggregation data from table Agg to table Agg
"""
logger.info("transfer_agg_to_agg BEGIN")
timenow = datetime.now()
for t in TIMES_ALL:
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
Agg.objects.move_agg_to_agg(time_agg, TIMES_ALL[t])
Agg.objects.agg_assume()
logger.info("transfer_agg_to_agg END")
def transfer_data():
"""
transfer data from table Rating to table Agg
"""
logger.info("transfer_data BEGIN")
timenow = datetime.now()
for t in sorted(TIMES_ALL.keys(), reverse=True):
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
Rating.objects.move_rate_to_agg(time_agg, TIMES_ALL[t])
transfer_agg_to_agg()
transfer_agg_to_totalrate()
logger.info("transfer_data END")
| bsd-3-clause | 4,587,567,461,580,501,500 | 28.966102 | 90 | 0.675339 | false |
natcoin/natcoin | contrib/bitrpc/bitrpc.py | 1 | 7836 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Natcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Natcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit | 1,866,030,577,551,255,300 | 23.185185 | 79 | 0.66169 | false |
javipalanca/ojoalplato | ojoalplato/users/models.py | 1 | 1358 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
USER_STATUS_CHOICES = (
(0, "active"),
)
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
login = models.CharField(max_length=60, default="")
url = models.URLField(max_length=100, blank=True)
activation_key = models.CharField(max_length=60, default="0")
status = models.IntegerField(default=0, choices=USER_STATUS_CHOICES)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
class UserMeta(models.Model):
"""
Meta information about a user.
"""
id = models.IntegerField(primary_key=True)
user = models.ForeignKey(User, related_name="meta", blank=True, null=True)
key = models.CharField(max_length=255)
value = models.TextField()
def __unicode__(self):
return u"%s: %s" % (self.key, self.value)
| mit | 3,922,233,845,667,998,000 | 30.581395 | 78 | 0.694404 | false |
dwitvliet/CATMAID | django/applications/catmaid/control/link.py | 1 | 6452 | import json
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from catmaid.models import UserRole, Project, Relation, Treenode, Connector, \
TreenodeConnector, ClassInstance
from catmaid.control.authentication import requires_user_role, can_edit_or_fail
@requires_user_role(UserRole.Annotate)
def create_link(request, project_id=None):
""" Create a link, currently only a presynaptic_to or postsynaptic_to relationship
between a treenode and a connector.
"""
from_id = int(request.POST.get('from_id', 0))
to_id = int(request.POST.get('to_id', 0))
link_type = request.POST.get('link_type', 'none')
try:
project = Project.objects.get(id=project_id)
relation = Relation.objects.get(project=project, relation_name=link_type)
from_treenode = Treenode.objects.get(id=from_id)
to_connector = Connector.objects.get(id=to_id, project=project)
links = TreenodeConnector.objects.filter(
connector=to_id,
treenode=from_id,
relation=relation.id)
except ObjectDoesNotExist as e:
return HttpResponse(json.dumps({'error': e.message}))
if links.count() > 0:
return HttpResponse(json.dumps({'error': "A relation '%s' between these two elements already exists!" % link_type}))
related_skeleton_count = ClassInstance.objects.filter(project=project, id=from_treenode.skeleton.id).count()
if related_skeleton_count > 1:
# Can never happen. What motivated this check for an error of this kind? Would imply that a treenode belongs to more than one skeleton, which was possible when skeletons owned treendoes via element_of relations rather than by the skeleton_id column.
return HttpResponse(json.dumps({'error': 'Multiple rows for treenode with ID #%s found' % from_id}))
elif related_skeleton_count == 0:
return HttpResponse(json.dumps({'error': 'Failed to retrieve skeleton id of treenode #%s' % from_id}))
if link_type == 'presynaptic_to':
# Enforce only one presynaptic link
presyn_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation)
if (presyn_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s does not have zero presynaptic connections.' % to_id}))
# The object returned in case of success
result = {}
if link_type == 'postsynaptic_to':
# Warn if there is already a link from the source skeleton to the
# target skeleton. This can happen and is not necessarely wrong, but
# worth to double check, because it is likely a mistake.
post_links_to_skeleton = TreenodeConnector.objects.filter(project=project,
connector=to_connector, relation=relation, skeleton_id=from_treenode.skeleton_id).count()
if post_links_to_skeleton == 1:
result['warning'] = 'There is already one post-synaptic ' \
'connection to the target skeleton'
elif post_links_to_skeleton > 1:
result['warning'] = 'There are already %s post-synaptic ' \
'connections to the target skeleton' % post_links_to_skeleton
# Enforce only synaptic links
gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector,
relation__relation_name='gapjunction_with')
if (gapjunction_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s cannot have both a gap junction and a postsynaptic node.' % to_id}))
if link_type == 'gapjunction_with':
# Enforce only two gap junction links
gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation)
synapse_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation__relation_name__endswith='synaptic_to')
if (gapjunction_links.count() > 1):
return HttpResponse(json.dumps({'error': 'Connector %s can only have two gap junction connections.' % to_id}))
if (synapse_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s is part of a synapse, and gap junction can not be added.' % to_id}))
# Enforce same relations across all linked connectors; only new postsynaptic links are valid
if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to':
return HttpResponse(json.dumps({'error': 'Cannot add %s connection to a linked connector.' % link_type}))
TreenodeConnector(
user=request.user,
project=project,
relation=relation,
treenode=from_treenode, # treenode_id = from_id
skeleton=from_treenode.skeleton, # treenode.skeleton_id where treenode.id = from_id
connector=to_connector # connector_id = to_id
).save()
result['message'] = 'success'
return HttpResponse(json.dumps(result), content_type='application/json')
@requires_user_role(UserRole.Annotate)
def delete_link(request, project_id=None):
connector_id = int(request.POST.get('connector_id', 0))
treenode_id = int(request.POST.get('treenode_id', 0))
links = TreenodeConnector.objects.filter(
connector=connector_id,
treenode=treenode_id)
if links.count() == 0:
return HttpResponse(json.dumps({'error': 'Failed to delete connector #%s from geometry domain.' % connector_id}))
# Enforce same relations across all linked connectors; only removal of postsynaptic links are valid
try:
to_connector = Connector.objects.get(id=connector_id, project=project_id)
link_type = links[0].relation.relation_name
except ObjectDoesNotExist as e:
return HttpResponse(json.dumps({'error': e.message}))
if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to':
return HttpResponse(json.dumps({'error': 'Cannot remove %s connection to a linked connector.' % link_type}))
# Could be done by filtering above when obtaining the links,
# but then one cannot distinguish between the link not existing
# and the user_id not matching or not being superuser.
can_edit_or_fail(request.user, links[0].id, 'treenode_connector')
links[0].delete()
return HttpResponse(json.dumps({'result': 'Removed treenode to connector link'}))
| gpl-3.0 | -8,270,022,877,487,769,000 | 51.032258 | 257 | 0.688314 | false |
manankalra/Twitter-Sentiment-Analysis | main/sentiment/tweepy_demo/tweep.py | 1 | 1099 | #!/usr/bin/env python
"""
tweepy(Twitter API) demo
"""
__author__ = "Manan Kalra"
__email__ = "[email protected]"
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
import time
# Add your own
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
class listener(StreamListener):
def on_data(self, raw_data):
try:
# print(raw_data)
tweet = raw_data.split(",\"text\":")[1].split(",\"source\"")[0]
print(tweet)
save_time = str(time.time()) + "::" + tweet
save_file = open('tweetDB.csv', 'a')
save_file.write(save_time)
save_file.write("\n")
save_file.close()
return True
except BaseException:
print("Failed")
def on_error(self, status_code):
print(status_code)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["<anything: noun/verb/adverb/...>"])
| mit | 3,081,403,557,936,290,300 | 23.422222 | 75 | 0.605096 | false |
Bladefidz/wfuzz | plugins/iterations.py | 1 | 2703 | from externals.moduleman.plugin import moduleman_plugin
import itertools
class piterator_void:
text="void"
def count(self):
return self.__count
def __init__(self, *i):
self._dic = i
self.__count = max(map(lambda x:x.count(), i))
self.it = self._dic[0]
def next(self):
return (self.it.next(),)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = self._dic[0]
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class zip:
name = "zip"
description = "Returns an iterator that aggregates elements from each of the iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.izip(*self._dic)
self.__count = min(map(lambda x:x.count(), i)) # Only possible match counted.
def count(self):
return self.__count
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.izip.__init__(self, *self._dic)
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class product:
name = "product"
description = "Returns an iterator cartesian product of input iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.product(*self._dic)
self.__count = reduce(lambda x,y:x*y.count(), i[1:], i[0].count())
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.product(*self._dic)
def count(self):
return self.__count
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class chain:
name = "chain"
description = "Returns an iterator returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted."
category = ["default"]
priority = 99
def count(self):
return self.__count
def __init__(self, *i):
self.__count = sum(map(lambda x:x.count(), i))
self._dic = i
self.it = itertools.chain(*i)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.chain(*self._dic)
def next(self):
return (self.it.next(),)
def __iter__(self):
self.restart()
return self
| gpl-2.0 | 2,271,427,541,823,712,800 | 24.261682 | 181 | 0.564928 | false |
berkmancenter/mediacloud | apps/common/src/python/mediawords/db/locks.py | 1 | 3477 | """Constants and routines for handling advisory postgres locks."""
import mediawords.db
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
"""
This package just has constants that can be passed to the first value of the postgres pg_advisory_*lock functions.
If you are using an advisory lock, you should use the two key version and use a constant from this package to
avoid conflicts.
"""
# locks to make sure we are not mining or snapshotting a topic in more than one process at a time
LOCK_TYPES = {
'test-a': 10,
'test-b': 11,
'MediaWords::Job::TM::MineTopic': 12,
'MediaWords::Job::TM::SnapshotTopic': 13,
'MediaWords::TM::Media::media_normalized_urls': 14,
'MediaWords::Crawler::Engine::run_fetcher': 15,
# Testing lock types
'TestPerlWorkerLock': 900,
'TestPythonWorkerLock': 901,
}
class McDBLocksException(Exception):
"""Default exception for package."""
pass
def get_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int, wait: bool = False) -> bool:
"""Get a postgres advisory lock with the lock_type and lock_id as the two keys.
Arguments:
db - db handle
lock_type - must be in LOCK_TYPES dict above
lock_id - id for the particular lock within the type
wait - if true, block while waiting for the lock, else return false if the lock is not available
Returns:
True if the lock is available
"""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if isinstance(wait, bytes):
wait = decode_object_from_bytes_if_needed(wait)
wait = bool(wait)
log.debug("trying for lock: %s, %d" % (lock_type, lock_id))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
if wait:
db.query("select pg_advisory_lock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
return True
else:
r = db.query("select pg_try_advisory_lock(%(a)s, %(b)s) as locked", {'a': lock_type_id, 'b': lock_id}).hash()
return r['locked']
def release_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int) -> None:
"""Release the postgres advisory lock if it is held."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
db.query("select pg_advisory_unlock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
def list_session_locks(db: mediawords.db.DatabaseHandler, lock_type: str) -> list:
"""Return a list of all locked ids for the given lock_type."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
# noinspection SqlResolve
return db.query(
"select objid from pg_locks where locktype = 'advisory' and classid = %(a)s",
{'a': lock_type_id}).flat()
| agpl-3.0 | 1,828,699,678,270,009,600 | 33.425743 | 117 | 0.667242 | false |
QTek/QRadio | tramatego/src/tramatego/transforms/ipv4_to_score.py | 1 | 1161 | #!/usr/bin/env python
from canari.maltego.utils import debug, progress
from canari.framework import configure #, superuser
from canari.maltego.entities import IPv4Address, Phrase
from common.launchers import get_qradio_data
__author__ = 'Zappus'
__copyright__ = 'Copyright 2016, TramaTego Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Zappus'
__email__ = '[email protected]'
__status__ = 'Development'
__all__ = [
'dotransform',
#'onterminate' # comment out this line if you don't need this function.
]
#@superuser
@configure(
label='IPv4 to Score',
description='Converts IPv4 into Score using QRadio.',
uuids=[ 'TramaTego.v1.IPv4ToScore' ],
inputs=[ ( 'TramaTego', IPv4Address ) ],
debug=True
)
def dotransform(request, response, config):
command = "--ipv4_to_score " + request.value
qradio_output = get_qradio_data(command, 3)
for entry in qradio_output:
response += Phrase(entry)
return response
def onterminate():
"""
TODO: Write your cleanup logic below or delete the onterminate function and remove it from the __all__ variable
"""
pass | apache-2.0 | 7,825,119,063,382,835,000 | 24.822222 | 115 | 0.676141 | false |
coinkite/connectrum | connectrum/findall.py | 1 | 4527 | #!/usr/bin/env python3
#
#
import bottom, random, time, asyncio
from .svr_info import ServerInfo
import logging
logger = logging.getLogger('connectrum')
class IrcListener(bottom.Client):
def __init__(self, irc_nickname=None, irc_password=None, ssl=True):
self.my_nick = irc_nickname or 'XC%d' % random.randint(1E11, 1E12)
self.password = irc_password or None
self.results = {} # by hostname
self.servers = set()
self.all_done = asyncio.Event()
super(IrcListener, self).__init__(host='irc.freenode.net', port=6697 if ssl else 6667, ssl=ssl)
# setup event handling
self.on('CLIENT_CONNECT', self.connected)
self.on('PING', self.keepalive)
self.on('JOIN', self.joined)
self.on('RPL_NAMREPLY', self.got_users)
self.on('RPL_WHOREPLY', self.got_who_reply)
self.on("client_disconnect", self.reconnect)
self.on('RPL_ENDOFNAMES', self.got_end_of_names)
async def collect_data(self):
# start it process
self.loop.create_task(self.connect())
# wait until done
await self.all_done.wait()
# return the results
return self.results
def connected(self, **kwargs):
logger.debug("Connected")
self.send('NICK', nick=self.my_nick)
self.send('USER', user=self.my_nick, realname='Connectrum Client')
# long delay here as it does an failing Ident probe (10 seconds min)
self.send('JOIN', channel='#electrum')
#self.send('WHO', mask='E_*')
def keepalive(self, message, **kwargs):
self.send('PONG', message=message)
async def joined(self, nick=None, **kwargs):
# happens when we or someone else joins the channel
# seem to take 10 seconds or longer for me to join
logger.debug('Joined: %r' % kwargs)
if nick != self.my_nick:
await self.add_server(nick)
async def got_who_reply(self, nick=None, real_name=None, **kws):
'''
Server replied to one of our WHO requests, with details.
'''
#logger.debug('who reply: %r' % kws)
nick = nick[2:] if nick[0:2] == 'E_' else nick
host, ports = real_name.split(' ', 1)
self.servers.remove(nick)
logger.debug("Found: '%s' at %s with port list: %s",nick, host, ports)
self.results[host.lower()] = ServerInfo(nick, host, ports)
if not self.servers:
self.all_done.set()
async def got_users(self, users=[], **kws):
# After successful join to channel, we are given a list of
# users on the channel. Happens a few times for busy channels.
logger.debug('Got %d (more) users in channel', len(users))
for nick in users:
await self.add_server(nick)
async def add_server(self, nick):
# ignore everyone but electrum servers
if nick.startswith('E_'):
self.servers.add(nick[2:])
async def who_worker(self):
# Fetch details on each Electrum server nick we see
logger.debug('who task starts')
copy = self.servers.copy()
for nn in copy:
logger.debug('do WHO for: ' + nn)
self.send('WHO', mask='E_'+nn)
logger.debug('who task done')
def got_end_of_names(self, *a, **k):
logger.debug('Got all the user names')
assert self.servers, "No one on channel!"
# ask for details on all of those users
self.loop.create_task(self.who_worker())
async def reconnect(self, **kwargs):
# Trigger an event that may cascade to a client_connect.
# Don't continue until a client_connect occurs, which may be never.
logger.warn("Disconnected (will reconnect)")
# Note that we're not in a coroutine, so we don't have access
# to await and asyncio.sleep
time.sleep(3)
# After this line we won't necessarily be connected.
# We've simply scheduled the connect to happen in the future
self.loop.create_task(self.connect())
logger.debug("Reconnect scheduled.")
if __name__ == '__main__':
import logging
logging.getLogger('bottom').setLevel(logging.DEBUG)
logging.getLogger('connectrum').setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
bot = IrcListener(ssl=False)
bot.loop.set_debug(True)
fut = bot.collect_data()
#bot.loop.create_task(bot.connect())
rv = bot.loop.run_until_complete(fut)
print(rv)
| mit | 8,429,156,360,593,355,000 | 31.106383 | 103 | 0.610559 | false |
wilima/cryptography | tests/test.py | 1 | 3828 | import unittest
from cryptography import (eratosthenes, euler, extended_gcd, factorization,
gcd, modular_multiplicative_inverse)
from cryptography.ciphers import affine, shift, substitution, vigener
from .context import cryptography
class GcdTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_gcd(self):
self.assertEqual(
gcd.gcd(1071, 462),
21)
def test_gcd2(self):
self.assertEqual(
gcd.gcd(270, 192),
6)
class ExtendedGcdTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_extended_gcd(self):
self.assertEqual(
extended_gcd.extended_gcd(1914, 899),
(29, 8, -17))
class ModularInverseTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_modular_inverse(self):
self.assertEqual(
modular_multiplicative_inverse.inverse(5, 26),
21)
class FactorizationTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_factorization(self):
self.assertEqual(
factorization.integer_factorization(315),
[3, 3, 5, 7])
class EratosthenesTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_eratosthenes_sieve(self):
self.assertEqual(
eratosthenes.eratosthenes_sieve(20),
[2, 3, 5, 7, 11, 13, 17, 19])
class EulerFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_euler_function(self):
self.assertEqual(
euler.euler_function(1),
1)
def test_euler_function2(self):
self.assertEqual(
euler.euler_function(5),
4)
class ShiftCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_shift_encrypt_function(self):
self.assertEqual(
shift.encrypt('BARBARIUTOCI', 3),
'eduedulxwrfl'.upper())
def test_shift_decrypt_function(self):
self.assertEqual(
shift.decrypt('eduedulxwrfl', 3),
'BARBARIUTOCI')
def test_shift_crack_function(self):
self.assertEqual(
'BARBARIUTOCI' in shift.crack('eduedulxwrfl', 26),
True)
class AffineCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_affine_encrypt_function(self):
self.assertEqual(
affine.encrypt('THEINITIAL', (5, 9)),
'ASDXWXAXJM')
def test_affine_decrypt_function(self):
self.assertEqual(
affine.decrypt('ASDXWXAXJM', (5, 9)),
'THEINITIAL')
def test_affine_crack_function(self):
self.assertEqual(
'THEINITIAL' in affine.crack('ASDXWXAXJM', 26),
True)
class SubstitutionCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_substitution_encrypt_function(self):
self.assertEqual(
substitution.encrypt('FLEEATONCEWEAREDISCOVERED', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')),
'SIAAZQLKBAVAZOARFPBLUAOAR')
def test_substitution_decrypt_function(self):
self.assertEqual(
substitution.decrypt('SIAAZQLKBAVAZOARFPBLUAOAR', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')),
'FLEEATONCEWEAREDISCOVERED')
class VigenerCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_vigener_encrypt_function(self):
self.assertEqual(
vigener.encrypt('KULTURNIATASEJESPION', 'PES'),
'ZYDIYJCMSIEKTNWHTADR')
def test_vigener_decrypt_function(self):
self.assertEqual(
vigener.decrypt('ZYDIYJCMSIEKTNWHTADR', 'PES'),
'KULTURNIATASEJESPION')
if __name__ == '__main__':
unittest.main()
| mit | -8,716,949,002,094,197,000 | 26.148936 | 124 | 0.625653 | false |
zstyblik/infernal-twin | sql_insert.py | 1 | 3025 | import MySQLdb
import db_connect_creds
from datetime import datetime
username, password = db_connect_creds.read_creds()
cxn = MySQLdb.connect('localhost', user=username, passwd=password)
date = datetime.now()
cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
cxn.commit()
cxn.close()
cxn = MySQLdb.connect(db='InfernalWireless')
cur = cxn.cursor()
current_project_id = 0
#~ cxn = MySQLdb.connect('localhost','root',"")
#~
#~ date = datetime.now()
#~
#~
#~ cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
#~
#~ cxn.commit()
#~ cxn.close()
#~
#~ cxn = MySQLdb.connect(db='InfernalWireless')
#~
#~ cur = cxn.cursor()
#~
#~ current_project_id = 0
def create_project_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
#~ cur.execute("CREATE TABLE mytable (id AUTO_INCREMENT")
PROJECT_TITLE = '''CREATE TABLE IF NOT EXISTS Projects (
ProjectId MEDIUMINT NOT NULL AUTO_INCREMENT, ProjectName TEXT, PRIMARY KEY (ProjectId), AuditorName TEXT, TargetName TEXT, date TEXT)'''
cur.execute(PROJECT_TITLE)
create_project_table()
def project_details(projectname, Authors_name, TargetName, date):
PROJECT_DETAILS = 'INSERT INTO Projects (ProjectName, AuditorName, TargetName, date) VALUES ("%s","%s","%s","%s")'%(projectname, Authors_name, TargetName, date)
cur.execute(PROJECT_DETAILS)
current_project_id_tmp = cur.lastrowid
current_project_id = current_project_id_tmp
print "report is generated"
return current_project_id_tmp
def create_report_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
report_table = '''CREATE TABLE IF NOT EXISTS Reports (findingID MEDIUMINT NOT NULL AUTO_INCREMENT, finding_name TEXT, phase TEXT, PRIMARY KEY (findingID), risk_level TEXT, risk_category TEXT, Findings_detail TEXT, Notes TEXT, Project_fk_Id MEDIUMINT, FOREIGN KEY (Project_fk_Id) REFERENCES Projects (ProjectId))'''
cur.execute(report_table)
create_report_table()
def create_report(self, finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id):
########## THIS IS GOING TO INSERT DATA INTO FINDINGS TABLE
pID = current_project_id
REPORT_DETAILS = 'INSERT INTO Reports (finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) VALUES ("%s","%s","%s","%s","%s","%s","%s")'%( finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id)
cur.execute(REPORT_DETAILS)
print pID
def print_hello(test_data):
print test_data
################ DB POPULATE DATABASE ###########
#~ prID = project_details('test','est','23s','12/12/12')
#~
#~ create_report('Title of the finding','Choose a phase','Choose a category','Choose risk level','Enter the findings details','Notes on the findings',int(prID))
################################################################### DUMMY DATABASE QUERIES ##############
#~ print type(prID)
cur.close()
cxn.commit()
cxn.close()
print "DB has been updated"
| gpl-3.0 | -3,604,638,107,569,597,000 | 25.077586 | 315 | 0.676694 | false |
kevinlee12/oppia | core/domain/draft_upgrade_services_test.py | 1 | 56055 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for draft upgrade services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import draft_upgrade_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.tests import test_utils
import feconf
import python_utils
import utils
class DraftUpgradeUnitTests(test_utils.GenericTestBase):
"""Test the draft upgrade services module."""
EXP_ID = 'exp_id'
USER_ID = 'user_id'
OTHER_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'New title'
})]
EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '0',
'to_version': python_utils.UNICODE(
feconf.CURRENT_STATE_SCHEMA_VERSION)
})]
DRAFT_CHANGELIST = [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'old_value': None,
'new_value': 'Updated title'})]
def setUp(self):
super(DraftUpgradeUnitTests, self).setUp()
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
def test_try_upgrade_with_no_version_difference(self):
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, 1, self.EXP_ID))
def test_try_upgrade_raises_exception_if_versions_are_invalid(self):
with self.assertRaisesRegexp(
utils.InvalidInputException,
'Current draft version is greater than the exploration version.'):
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 2, 1, self.EXP_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unsupported_commit_type(self):
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unimplemented_upgrade_methods(self):
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.EXP_MIGRATION_CHANGE_LIST,
'Ran Exploration Migration job.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_extract_html_from_draft_change_list(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
draft_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_content,
'y': html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': html_content
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': html_content,
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'data_format': 'html',
'translation': html_content,
'needs_update': False
},
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': html_content
},
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': html_content
}
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'Intro',
'new_state_name': 'Introduction',
})
]
list_of_html = (
draft_upgrade_services.extract_html_from_draft_change_list(
draft_change_list))
self.assertEqual(len(list_of_html), 27)
expected_html_strings = [
html_content, '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>',
'<p>This is solution for state1</p>', 'Hey!', 'hello!']
for html in list_of_html:
self.assertTrue(html in expected_html_strings)
class DraftUpgradeUtilUnitTests(test_utils.GenericTestBase):
"""Test the DraftUpgradeUtil module."""
EXP_ID = 'exp_id'
USER_ID = 'user_id'
EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '34',
'to_version': '35'
})]
# EXP_ID and USER_ID used to create default explorations.
EXP_ID = 'exp_id'
USER_ID = 'user_id'
def create_and_migrate_new_exploration(
self, current_schema_version, target_schema_version):
"""Creates an exploration and applies a state schema migration to it.
Creates an exploration and migrates its state schema from version
current_schema_version to target_schema_version. Asserts that the
exploration was successfully migrated.
Args:
current_schema_version: string. The current schema version of the
exploration (eg. '29').
target_schema_version: string. The schema version to upgrade
the exploration to (eg. '30').
"""
# Create an exploration change list with the command that will migrate
# the schema from current_schema_version to target_schema_version.
exp_migration_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': current_schema_version,
'to_version': target_schema_version
})
]
# The migration will automatically migrate the exploration to the latest
# state schema version, so we set the latest schema version to be the
# target_schema_version.
with self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION',
int(target_schema_version)):
# Create and migrate the exploration.
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, exp_migration_change_list,
'Ran Exploration Migration job.')
# Assert that the update was applied and that the exploration state
# schema was successfully updated.
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertEqual(
python_utils.UNICODE(
exploration.states_schema_version),
target_schema_version)
def test_convert_to_latest_schema_version_implemented(self):
state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION
conversion_fn_name = '_convert_states_v%s_dict_to_v%s_dict' % (
state_schema_version - 1, state_schema_version)
self.assertTrue(
hasattr(
draft_upgrade_services.DraftUpgradeUtil, conversion_fn_name),
msg='Current schema version is %d but DraftUpgradeUtil.%s is '
'unimplemented.' % (state_schema_version, conversion_fn_name))
def test_convert_states_v36_dict_to_v37_dict(self):
draft_change_list_v36 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'CaseSensitiveEquals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_v37 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
# Migrate exploration to state schema version 37.
self.create_and_migrate_new_exploration('36', '37')
migrated_draft_change_list_v37 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v36, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v37_dict_list = [
change.to_dict() for change in draft_change_list_v37
]
migrated_draft_change_list_v37_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v37
]
self.assertEqual(
draft_change_list_v37_dict_list,
migrated_draft_change_list_v37_dict_list)
def test_convert_states_v35_dict_to_v36_dict(self):
draft_change_list_1_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_customization_args',
'new_value': {}
})
]
draft_change_list_2_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 36.
self.create_and_migrate_new_exploration('35', '36')
migrated_draft_change_list_1_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v35, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v36)
migrated_draft_change_list_2_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v35, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v35_dict_list = [
change.to_dict() for change in draft_change_list_2_v35
]
migrated_draft_change_list_2_v36_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v36
]
self.assertEqual(
draft_change_list_2_v35_dict_list,
migrated_draft_change_list_2_v36_dict_list)
def test_convert_states_v34_dict_to_v35_dict(self):
draft_change_list_1_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'IsMathematicallyEquivalentTo',
'inputs': {
'x': 'x+y/2'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_2_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 35.
self.create_and_migrate_new_exploration('34', '35')
migrated_draft_change_list_1_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v34, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v35)
migrated_draft_change_list_2_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v34, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v34_dict_list = [
change.to_dict() for change in draft_change_list_2_v34
]
migrated_draft_change_list_2_v35_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v35
]
self.assertEqual(
draft_change_list_2_v34_dict_list,
migrated_draft_change_list_2_v35_dict_list)
def test_convert_states_v33_dict_to_v34_dict(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
draft_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_content,
'y': html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': html_content
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'html': html_content,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': html_content,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': html_content
},
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': html_content
}
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'Intro',
'new_state_name': 'Introduction',
})
]
self.create_and_migrate_new_exploration('33', '34')
migrated_draft_change_list = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list, 1, 2, self.EXP_ID))
self.assertEqual(
migrated_draft_change_list[0].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
expected_html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[1].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': expected_html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[expected_html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': expected_html_content,
'y': expected_html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[expected_html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': expected_html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}).to_dict())
self.assertEqual(
migrated_draft_change_list[2].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': expected_html_content
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[3].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'html': expected_html_content,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': expected_html_content,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[4].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': expected_html_content
},
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[5].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[expected_html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[6].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': expected_html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[7].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': expected_html_content
}
}]
}).to_dict())
def test_convert_states_v32_dict_to_v33_dict(self):
draft_change_list_v32 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Version 33 adds a showChoicesInShuffledOrder bool, which doesn't
# impact the second ExplorationChange because it will only impact
# it if 'choices' is the only key for new_value.
expected_draft_change_list_v33 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'showChoicesInShuffledOrder': {
'value': False
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Migrate exploration to state schema version 33.
self.create_and_migrate_new_exploration('32', '33')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v33 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v32, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v33_dict_list = [
change.to_dict() for change in expected_draft_change_list_v33
]
migrated_draft_change_list_v33_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v33
]
self.assertEqual(
expected_draft_change_list_v33_dict_list,
migrated_draft_change_list_v33_dict_list)
def test_convert_states_v31_dict_to_v32_dict(self):
draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 32.
self.create_and_migrate_new_exploration('31', '32')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no changes to the
# draft change list since version 32 adds a customization arg
# for the "Add" button text in SetInput interaction for the
# exploration, for which there should be no changes to drafts.
migrated_draft_change_list_v32 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v31, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v31_dict_list = [
change.to_dict() for change in draft_change_list_v31
]
migrated_draft_change_list_v32_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v32
]
self.assertEqual(
draft_change_list_v31_dict_list,
migrated_draft_change_list_v32_dict_list)
def test_convert_states_v30_dict_to_v31_dict(self):
draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_name': 100,
'filename': 'atest.mp3',
'needs_update': False
}
}
}
}
})
]
# Version 31 adds the duration_secs property.
expected_draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_name': 100,
'filename': 'atest.mp3',
'needs_update': False,
'duration_secs': 0.0
}
}
}
}
})
]
# Migrate exploration to state schema version 31.
self.create_and_migrate_new_exploration('30', '31')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v31 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v30, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v31_dict_list = [
change.to_dict() for change in expected_draft_change_list_v31
]
migrated_draft_change_list_v31_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v31
]
self.assertEqual(
expected_draft_change_list_v31_dict_list,
migrated_draft_change_list_v31_dict_list)
def test_convert_states_v29_dict_to_v30_dict(self):
draft_change_list_v29 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_misconception_id': None
}
})
]
# Version 30 replaces the tagged_misconception_id in version 29
# with tagged_skill_misconception_id.
expected_draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}
})
]
# Migrate exploration to state schema version 30.
self.create_and_migrate_new_exploration('29', '30')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v30 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v29, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v30_dict_list = [
change.to_dict() for change in expected_draft_change_list_v30
]
migrated_draft_change_list_v30_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v30
]
self.assertEqual(
expected_draft_change_list_v30_dict_list,
migrated_draft_change_list_v30_dict_list)
def test_convert_states_v28_dict_to_v29_dict(self):
draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 29.
self.create_and_migrate_new_exploration('28', '29')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no change to the
# draft change list since version 29 adds the
# solicit_answer_details boolean variable to the exploration
# state, for which there should be no changes to drafts.
migrated_draft_change_list_v29 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v28, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v28_dict_list = [
change.to_dict() for change in draft_change_list_v28
]
migrated_draft_change_list_v29_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v29
]
self.assertEqual(
draft_change_list_v28_dict_list,
migrated_draft_change_list_v29_dict_list)
def test_convert_states_v27_dict_to_v28_dict(self):
draft_change_list_v27 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'content_ids_to_audio_translations',
'state_name': 'State B',
'new_value': 'new value',
})
]
# Version 28 adds voiceovers_mapping.
expected_draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'recorded_voiceovers',
'state_name': 'State B',
'new_value': {'voiceovers_mapping': 'new value'}
})
]
# Migrate exploration to state schema version 28.
self.create_and_migrate_new_exploration('27', '28')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v28 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v27, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v28_dict_list = [
change.to_dict() for change in expected_draft_change_list_v28
]
migrated_draft_change_list_v28_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v28
]
self.assertEqual(
expected_draft_change_list_v28_dict_list,
migrated_draft_change_list_v28_dict_list)
| apache-2.0 | 3,962,984,640,161,375,000 | 40.399557 | 80 | 0.42758 | false |
ksteinfe/decodes | src/decodes/core/dc_mesh.py | 1 | 6004 | from decodes.core import *
from . import dc_base, dc_vec, dc_point, dc_has_pts #here we may only import modules that have been loaded before this one. see core/__init__.py for proper order
if VERBOSE_FS: print("mesh.py loaded")
import copy, collections
class Mesh(HasPts):
"""
a very simple mesh class
"""
subclass_attr = [] # this list of props is unset any time this HasPts object changes
def __init__(self, vertices=None, faces=None, basis=None):
""" Mesh Constructor.
:param vertices: The vertices of the mesh.
:type vertices: [Point]
:param faces: List of ordered faces.
:type faces: [int]
:param basis: The (optional) basis of the mesh.
:type basis: Basis
:result: Mesh object.
:rtype: Mesh
::
pts=[
Point(0,0,0),
Point(0,1,0),
Point(1,1,0),
Point(1,0,0),
Point(0,0,1),
Point(0,1,1),
Point(1,1,1),
Point(1,0,1),
]
quad_faces=[[0,1,2,3],[4,5,6,7],[0,4,5,1],[3,7,6,2]]
quadmesh=Mesh(pts,quad_faces)
"""
super(Mesh,self).__init__(vertices,basis) #HasPts constructor handles initalization of verts and basis
self._faces = [] if (faces is None) else faces
@property
def faces(self):
""" Returns a list of mesh faces.
:result: List of mesh faces.
:rtype: list
"""
return self._faces
def add_face(self,a,b,c,d=-1):
""" Adds a face to the mesh.
:param a,b,c,d: Face to be added to the list of faces.
:type a,b,c,d: int.
:result: Modifies list of faces.
:rtype: None
::
quadmesh.add_face(4,5,6,7)
"""
#TODO: add lists of faces just the same
if max(a,b,c,d) < len(self.pts):
if (d>=0) : self._faces.append([a,b,c,d])
else: self._faces.append([a,b,c])
def face_pts(self,index):
""" Returns the points of a given face.
:param index: Face's index
:type index: int
:returns: Vertices.
:rtype: Point
::
quadmesh.face_pts(0)
"""
return [self.pts[i] for i in self.faces[index]]
def face_centroid(self,index):
""" Returns the centroids of individual mesh faces.
:param index: Index of a face.
:type index: int
:returns: The centroid of a face.
:rtype: Point
::
quadmesh.face_centroid(0)
"""
return Point.centroid(self.face_pts(index))
def face_normal(self,index):
""" Returns the normal vector of a face.
:param index: Index of a face.
:type index: int
:returns: Normal vector.
:rtype: Vec
::
quadmesh.face_normal(0)
"""
verts = self.face_pts(index)
if len(verts) == 3 : return Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[2])).normalized()
else :
v0 = Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[3])).normalized()
v1 = Vec(verts[2],verts[3]).cross(Vec(verts[2],verts[1])).normalized()
return Vec.bisector(v0,v1).normalized()
def __repr__(self):
return "msh[{0}v,{1}f]".format(len(self._verts),len(self._faces))
@staticmethod
def explode(msh):
""" Explodes a mesh into individual faces.
:param msh: Mesh to explode.
:type msh: Mesh
:returns: List of meshes.
:type: [Mesh]
::
Mesh.explode(quadmesh)
"""
exploded_meshes = []
for face in msh.faces:
pts = [msh.pts[v] for v in face]
nface = [0,1,2] if len(face)==3 else [0,1,2,3]
exploded_meshes.append(Mesh(pts,[nface]))
return exploded_meshes
def to_pt_graph(self):
""" Returns a Graph representation of the mesh points by index.
:returns: A Graph of point indexes.
:rtype: Graph
::
quadmesh.to_pt_graph()
"""
graph = Graph()
for index in range(len(self.pts)):
for face in self.faces:
for px in face:
if index in face and index!=px: graph.add_edge(index, px)
return graph
def to_face_graph(self, val=1):
""" Returns a Graph representation of the mesh faces by index.
:param val: number of coincident points for neighborness.
:type val: int
:returns: A Graph of face indexes.
:rtype: Graph
::
quadmesh.to_face_graph(2)
"""
from decodes.extensions.graph import Graph
graph = Graph()
graph.naked_nodes = []
for f1 in range(len(self.faces)):
for f2 in range(len(self.faces)):
if f1 != f2:
count = 0
for index in self.faces[f2]:
if index in self.faces[f1]:
count+=1
if count >= val:
graph.add_edge(f1,f2)
if len(graph.edges[f1]) < len(self.faces[f1]):
if f1 not in graph.naked_nodes:
graph.naked_nodes.append(f1)
return graph
| gpl-3.0 | 8,685,016,591,844,526,000 | 30.276042 | 164 | 0.460693 | false |
hjoliver/cylc | tests/unit/tui/test_data.py | 1 | 1331 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cylc.flow.tui.data
from cylc.flow.tui.data import generate_mutation
def test_generate_mutation(monkeypatch):
"""It should produce a GraphQL mutation with the args filled in."""
arg_types = {
'foo': 'String!',
'bar': '[Int]'
}
monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types)
assert generate_mutation(
'my_mutation',
['foo', 'bar']
) == '''
mutation($foo: String!, $bar: [Int]) {
my_mutation (foos: $foo, bars: $bar) {
result
}
}
'''
| gpl-3.0 | -3,680,273,964,827,430,400 | 34.026316 | 72 | 0.664162 | false |
JonathonReinhart/scuba | scuba/config.py | 1 | 11648 | import os
import yaml
import re
import shlex
from .constants import *
from .utils import *
class ConfigError(Exception):
pass
class ConfigNotFoundError(ConfigError):
pass
# http://stackoverflow.com/a/9577670
class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
self._cache = dict()
super().__init__(stream)
def from_yaml(self, node):
'''
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Split on unquoted spaces
parts = shlex.split(content)
if len(parts) != 2:
raise yaml.YAMLError('Two arguments expected to !from_yaml')
filename, key = parts
# path is relative to the current YAML document
path = os.path.join(self._root, filename)
# Load the other YAML document
doc = self._cache.get(path)
if not doc:
with open(path, 'r') as f:
doc = yaml.load(f, self.__class__)
self._cache[path] = doc
# Retrieve the key
try:
cur = doc
# Use a negative look-behind to split the key on non-escaped '.' characters
for k in re.split(r'(?<!\\)\.', key):
cur = cur[k.replace('\\.', '.')] # Be sure to replace any escaped '.' characters with *just* the '.'
except KeyError:
raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename))
return cur
Loader.add_constructor('!from_yaml', Loader.from_yaml)
def find_config():
'''Search up the directory hierarchy for .scuba.yml
Returns: path, rel, config on success, or None if not found
path The absolute path of the directory where .scuba.yml was found
rel The relative path from the directory where .scuba.yml was found
to the current directory
config The loaded configuration
'''
cross_fs = 'SCUBA_DISCOVERY_ACROSS_FILESYSTEM' in os.environ
path = os.getcwd()
rel = ''
while True:
cfg_path = os.path.join(path, SCUBA_YML)
if os.path.exists(cfg_path):
return path, rel, load_config(cfg_path)
if not cross_fs and os.path.ismount(path):
msg = '{} not found here or any parent up to mount point {}'.format(SCUBA_YML, path) \
+ '\nStopping at filesystem boundary (SCUBA_DISCOVERY_ACROSS_FILESYSTEM not set).'
raise ConfigNotFoundError(msg)
# Traverse up directory hierarchy
path, rest = os.path.split(path)
if not rest:
raise ConfigNotFoundError('{} not found here or any parent directories'.format(SCUBA_YML))
# Accumulate the relative path back to where we started
rel = os.path.join(rest, rel)
def _process_script_node(node, name):
'''Process a script-type node
This handles nodes that follow the *Common script schema*,
as outlined in doc/yaml-reference.md.
'''
if isinstance(node, str):
# The script is just the text itself
return [node]
if isinstance(node, dict):
# There must be a "script" key, which must be a list of strings
script = node.get('script')
if not script:
raise ConfigError("{}: must have a 'script' subkey".format(name))
if isinstance(script, list):
return script
if isinstance(script, str):
return [script]
raise ConfigError("{}.script: must be a string or list".format(name))
raise ConfigError("{}: must be string or dict".format(name))
def _process_environment(node, name):
# Environment can be either a list of strings ("KEY=VALUE") or a mapping
# Environment keys and values are always strings
result = {}
if not node:
pass
elif isinstance(node, dict):
for k, v in node.items():
if v is None:
v = os.getenv(k, '')
result[k] = str(v)
elif isinstance(node, list):
for e in node:
k, v = parse_env_var(e)
result[k] = v
else:
raise ConfigError("'{}' must be list or mapping, not {}".format(
name, type(node).__name__))
return result
def _get_entrypoint(data):
# N.B. We can't use data.get() here, because that might return
# None, leading to ambiguity between entrypoint being absent or set
# to a null value.
#
# "Note that a null is different from an empty string and that a
# mapping entry with some key and a null value is valid and
# different from not having that key in the mapping."
# - http://yaml.org/type/null.html
key = 'entrypoint'
if not key in data:
return None
ep = data[key]
# We represent a null value as an empty string.
if ep is None:
ep = ''
if not isinstance(ep, str):
raise ConfigError("'{}' must be a string, not {}".format(
key, type(ep).__name__))
return ep
class ScubaAlias:
def __init__(self, name, script, image, entrypoint, environment, shell, as_root):
self.name = name
self.script = script
self.image = image
self.entrypoint = entrypoint
self.environment = environment
self.shell = shell
self.as_root = as_root
@classmethod
def from_dict(cls, name, node):
script = _process_script_node(node, name)
image = None
entrypoint = None
environment = None
shell = None
as_root = False
if isinstance(node, dict): # Rich alias
image = node.get('image')
entrypoint = _get_entrypoint(node)
environment = _process_environment(
node.get('environment'),
'{}.{}'.format(name, 'environment'))
shell = node.get('shell')
as_root = node.get('root', as_root)
return cls(name, script, image, entrypoint, environment, shell, as_root)
class ScubaContext:
pass
class ScubaConfig:
def __init__(self, **data):
optional_nodes = ('image','aliases','hooks','entrypoint','environment','shell')
# Check for unrecognized nodes
extra = [n for n in data if not n in optional_nodes]
if extra:
raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML,
's' if len(extra) > 1 else '', ', '.join(extra)))
self._image = data.get('image')
self._shell = data.get('shell', DEFAULT_SHELL)
self._entrypoint = _get_entrypoint(data)
self._load_aliases(data)
self._load_hooks(data)
self._environment = self._load_environment(data)
def _load_aliases(self, data):
self._aliases = {}
for name, node in data.get('aliases', {}).items():
if ' ' in name:
raise ConfigError('Alias names cannot contain spaces')
self._aliases[name] = ScubaAlias.from_dict(name, node)
def _load_hooks(self, data):
self._hooks = {}
for name in ('user', 'root',):
node = data.get('hooks', {}).get(name)
if node:
hook = _process_script_node(node, name)
self._hooks[name] = hook
def _load_environment(self, data):
return _process_environment(data.get('environment'), 'environment')
@property
def image(self):
if not self._image:
raise ConfigError("Top-level 'image' not set")
return self._image
@property
def entrypoint(self):
return self._entrypoint
@property
def aliases(self):
return self._aliases
@property
def hooks(self):
return self._hooks
@property
def environment(self):
return self._environment
@property
def shell(self):
return self._shell
def process_command(self, command, image=None, shell=None):
'''Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
image Override the image from .scuba.yml
shell Override the shell from .scuba.yml
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
'''
result = ScubaContext()
result.script = None
result.image = None
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
result.shell = self.shell
result.as_root = False
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
if alias.shell is not None:
result.shell = alias.shell
if alias.as_root:
result.as_root = True
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
# If a shell was given on the CLI, it should override the shell set by
# the alias or top-level config
if shell:
result.shell = shell
# If an image was given, it overrides what might have been set by an alias
if image:
result.image = image
# If the image was still not set, then try to get it from the confg,
# which will raise a ConfigError if it is not set
if not result.image:
result.image = self.image
return result
def load_config(path):
try:
with open(path, 'r') as f:
data = yaml.load(f, Loader)
except IOError as e:
raise ConfigError('Error opening {}: {}'.format(SCUBA_YML, e))
except yaml.YAMLError as e:
raise ConfigError('Error loading {}: {}'.format(SCUBA_YML, e))
return ScubaConfig(**(data or {}))
| mit | 8,631,149,680,808,919,000 | 31.088154 | 117 | 0.571171 | false |
kmiller96/Shipping-Containers-Software | lib/core.py | 1 | 8600 | # AUTHOR: Kale Miller
# DESCRIPTION: The 'main brain' of the program is held in here.
# 50726f6772616d6d696e6720697320627265616b696e67206f66206f6e652062696720696d706f737369626c65207461736b20696e746f20736576
# 6572616c207665727920736d616c6c20706f737369626c65207461736b732e
# DEVELOPMENT LOG:
# 07/12/16: Initialized file. Moved IDGenerator class into the script. Added holding bay class.
# 12/12/16: Tweaked the IDGenerator class to help remove dependancy.
# 13/12/16: Fleshed out the NewHoldingBay class.
# 15/12/16: Added methods to add auxilary labels. Added method to generate information label. Small bug fixes.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS/GLOBALS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os, time
import numpy as np
from lib import containers
CONTAINER_CLASSES = [
containers.BasicContainer,
containers.HeavyContainer,
containers.RefrigeratedContainer,
containers.LiquidContainer,
containers.ExplosivesContainer,
containers.ToxicContainer,
containers.ChemicalContainer
]
CONTAINER_TYPES = ['basic', 'heavy', 'refrigerated', 'liquid', 'explosive', 'toxic', 'chemical']
SERIAL_CODES = ['B', 'H', 'R', 'L', 'E', 'T', 'C']
TAG_APPLICATION_TIME = 0.2
PRINTALL_TIME = 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def processshipfile(filename, path):
"""Processes the csv file that the ship supplies."""
def _deletenewline(string):
"""Deletes the \n symbol from a string if it exists."""
try:
truncatedstring = string[:string.index('\n')]
except ValueError:
truncatedstring = string
finally:
return truncatedstring
try:
home = os.getcwd()
os.chdir(path)
except WindowsError: # Would this hold true on all machines?
raise NameError, "The path specified does not exist."
rawfile = open(filename, 'r')
arylines = rawfile.readlines()
basematrix = map(lambda x: _deletenewline(x).split(','), arylines)
numpyarray = np.array(basematrix)
return numpyarray
class IDGenerator:
"""Controls the assignment of id tags on the containers."""
# TODO: Change the __init__ such that it works by reading a collection of tuples instead of two lists.
def __init__(self):
"""Initialise the id generator."""
self._COUNTERS = [0] * len(CONTAINER_TYPES)
return
def _findindex(self, container):
"""Determines the index in the lists the class should use."""
return CONTAINER_TYPES.index(container)
def _serialcode(self, index):
"""Fetches the serial code for a supplied index."""
return SERIAL_CODES[index]
def _counter(self, index):
"""Fetches the counter for a specific serial type and increments it by one."""
self._COUNTERS[index] += 1
return self._COUNTERS[index]
def newid(self, containertype):
"""Generates a new id."""
ii = self._findindex(containertype)
idtag = self._serialcode(ii) + str(self._counter(ii)).zfill(5)
return idtag
class NewHoldingBay:
"""Creates a new holding bay for the containers. Thus it contains all of the information about the containers
along with the methods controlling unloading and loading them."""
def __init__(self):
self._path = os.getcwd()
self.idgenerator = IDGenerator()
self.containerlist = list()
self._iOnship = 0
self._iLoaded = 0
self._iHolding = 0
return None
def _createcontainer(self, containerstr, parameters):
"""Creates a new container class based off the first column of the CSV."""
# TODO: Fix this method up to catch more and print useful error messages.
if not isinstance(containerstr, str):
raise TypeError, "The parameter passed must be a string."
elif len(containerstr) == 1:
try:
ii = SERIAL_CODES.index(containerstr)
except ValueError:
raise Exception("Bad input.") # TODO: Fix this area up.
elif len(containerstr) != 1:
try:
ii = CONTAINER_TYPES.index(containerstr)
except ValueError:
raise Exception("Bad input.")
idtag = self.idgenerator.newid(CONTAINER_TYPES[ii])
return CONTAINER_CLASSES[ii](idtag, *parameters)
def defineship(self, file):
"""Pass in the CSV file of the ship in order to unload it."""
shipdata = processshipfile(file, self._path)
shipdata = shipdata[1::] # Throw out the headers.
for line in shipdata:
newcontainer = self._createcontainer(line[0], (line[1], line[3]))
self.containerlist.append(newcontainer)
self._iOnship += 1
def printcontainer(self, serial):
"""Prints the information about a specific container."""
for container in self.containerlist:
if container.id() == serial:
container.information()
return None
else:
continue
raise NameError, "Unable to find container with serial code %s" % serial
return -1
def printallinformation(self):
"""Prints the information of all the containers."""
for container in self.containerlist:
container.information()
time.sleep(PRINTALL_TIME)
return None
def unloadall(self, debug=False):
"""Unloads all of the containers from the ship."""
for container in self.containerlist:
container.unload(debug=debug)
self._iHolding += 1
self._iOnship -= 1
return None
def loadall(self, debug=False):
"""Loads all of the containers into trucks and trains."""
# TODO: Proper loading locations.
ii = 1
for container in self.containerlist:
container.load('Truck ' + str(ii).zfill(3), debug=debug)
self._iHolding -= 1
self._iLoaded += 1
ii += 1
return None
def printauditedload(self):
"""Prints information about the holding bay at this time."""
iOnship = 0; iLoaded = 0; iHolding = 0
iContainercount = [0] * len(CONTAINER_TYPES)
for container in self.containerlist:
try:
ii = CONTAINER_TYPES.index(container._type)
iContainercount[ii] += 1
except ValueError:
raise NameError, "One (or more) containers don't have a valid type."
# Print the appropriate information.
print "----------------------------------------------------------------------"
print "TOTAL CONTAINERS: %i" % len(self.containerlist); time.sleep(0.3)
print "CONTAINERS CURRENTLY STILL ON SHIP: %i" % self._iOnship; time.sleep(0.3)
print "CONTAINERS LOADED ON TRUCKS AND TRAINS: %i" % self._iLoaded; time.sleep(0.3)
print "CONTAINERS BEING HELD IN THE HOLDING BAY: %i" % self._iHolding; time.sleep(0.3)
print ""
print "THE NUMBER OF CONTAINERS FOR EACH TYPE:"; time.sleep(0.3)
for ii in xrange(len(CONTAINER_TYPES)):
if iContainercount[ii] == 0: continue
print "\t%s: %i" % (CONTAINER_TYPES[ii], iContainercount[ii]); time.sleep(0.3)
print "----------------------------------------------------------------------"
return None
def addidtags(self, debug=False):
"""Applys appropriate serial numbers to all of the containers."""
for container in self.containerlist:
print "Applying id tag to container %s" % container.id()
if not debug: time.sleep(TAG_APPLICATION_TIME)
container.addidtag()
return None
def applyauxilarylabels(self):
"""Applys the labels that should go on containers about their contents and handling."""
for container in self.containerlist:
print "Adding labels to container %s" % container.id()
container.addauxilarylabels()
return None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| mit | 3,282,703,609,942,576,000 | 39.148325 | 120 | 0.570698 | false |
amerlyq/airy | vim/res/ycm_extra_conf.py | 1 | 5213 | # SEE: CACHE/bundle/YouCompleteMe/cpp/ycm/.ycm_extra_conf.py
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall', '-Wextra', '-Werror', '-Wc++98-compat',
'-Wno-long-long', '-Wno-variadic-macros', '-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
#'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-isystem', '../BoostParts',
# This path will only work on OS X, but extra paths that don't exist are not harmful
'-isystem', '/System/Library/Frameworks/Python.framework/Headers',
'-isystem', '../llvm/include',
'-isystem', '../llvm/tools/clang/include',
'-I', '.',
'-I', './ClangCompleter',
'-isystem', './tests/gmock/gtest',
'-isystem', './tests/gmock/gtest/include',
'-isystem', './tests/gmock',
'-isystem', './tests/gmock/include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = os.path.abspath( '~/aura/pdrm/gerrit/build' )
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags, 'do_cache': True }
| mit | 1,835,712,547,090,115,600 | 36.235714 | 115 | 0.689047 | false |
sangwonl/stage34 | webapp/api/handlers/stage.py | 1 | 6612 | from django.views import View
from django.conf import settings
from datetime import datetime
from api.helpers.mixins import AuthRequiredMixin
from api.helpers.http.jsend import JSENDSuccess, JSENDError
from api.models.resources import Membership, Stage
from libs.utils.model_ext import model_to_dict
from worker.tasks.deployment import (
task_provision_stage,
task_change_stage_status,
task_delete_stage,
task_refresh_stage
)
import pytz
import os
import json
import jwt
SERIALIZE_FIELDS = [
'id',
'title',
'endpoint',
'status',
'repo',
'default_branch',
'branch',
'created_at'
]
class StageRootHandler(AuthRequiredMixin, View):
def get(self, request, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stages_qs = Stage.objects.filter(org=org)
stages = [model_to_dict(s, fields=SERIALIZE_FIELDS) for s in stages_qs]
return JSENDSuccess(status_code=200, data=stages)
def post(self, request, *args, **kwargs):
json_body = json.loads(request.body)
title = json_body.get('title')
repo = json_body.get('repo')
branch= json_body.get('branch')
default_branch= json_body.get('default_branch')
run_on_create = json_body.get('run_on_create', False)
if not (title and repo and default_branch and branch):
return JSENDError(status_code=400, msg='invalid stage info')
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = Stage.objects.create(
org=org,
title=title,
repo=repo,
default_branch=default_branch,
branch=branch
)
github_access_key = request.user.jwt_payload.get('access_token')
task_provision_stage.apply_async(args=[github_access_key, stage.id, repo, branch, run_on_create])
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=200, data=stage_dict)
class StageDetailHandler(AuthRequiredMixin, View):
def get_stage(self, org, stage_id):
try:
stage = Stage.objects.get(org=org, id=stage_id)
except Stage.DoesNotExist:
return None
return stage
def get(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=200, data=stage_dict)
def put(self, request, stage_id, *args, **kwargs):
json_body = json.loads(request.body)
new_status = json_body.get('status')
if not new_status or new_status not in ('running', 'paused'):
return JSENDError(status_code=400, msg='invalid stage status')
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
cur_status = stage.status
if cur_status != new_status:
github_access_key = request.user.jwt_payload.get('access_token')
task_change_stage_status.apply_async(args=[github_access_key, stage_id, new_status])
new_status = 'changing'
stage.title = json_body.get('title', stage.title)
stage.repo = json_body.get('repo', stage.repo)
stage.default_branch = json_body.get('default_branch', stage.default_branch)
stage.branch = json_body.get('branch', stage.branch)
stage.status = new_status
stage.save()
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=204)
def delete(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
stage.status = 'deleting'
stage.save()
github_access_key = request.user.jwt_payload.get('access_token')
task_delete_stage.apply_async(args=[github_access_key, stage_id])
return JSENDSuccess(status_code=204)
class StageLogHandler(AuthRequiredMixin, View):
def get_log_path(self, stage_id):
return os.path.join(settings.STAGE_REPO_HOME, stage_id, 'output.log')
def get(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
log_path = self.get_log_path(stage_id)
if not os.path.exists(log_path):
return JSENDError(status_code=404, msg='log file not found')
log_msgs = []
with open(log_path, 'rt') as f:
log_msg = f.read()
log_msgs = [l for l in log_msg.split('\n') if l]
ts = os.path.getmtime(log_path)
tz = pytz.timezone(settings.TIME_ZONE)
dt = datetime.fromtimestamp(ts, tz=tz)
log_data = {'log_messages': log_msgs, 'log_time': dt.isoformat()}
return JSENDSuccess(status_code=200, data=log_data)
class StageRefreshHandler(AuthRequiredMixin, View):
def get_stage(self, org, stage_id):
try:
stage = Stage.objects.get(org=org, id=stage_id)
except Stage.DoesNotExist:
return None
return stage
def post(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
github_access_key = request.user.jwt_payload.get('access_token')
task_refresh_stage.apply_async(args=[github_access_key, stage_id])
stage.status = 'changing'
stage.save()
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=204)
| mit | 3,603,858,691,656,666,600 | 33.082474 | 105 | 0.628403 | false |
globaltoken/globaltoken | test/functional/test_framework/authproxy.py | 1 | 7759 | # Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to globaltokend.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
import http.client
import json
import logging
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 30
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy():
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
port = 80 if self.__url.port is None else self.__url.port
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def get_request(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
return {'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout as e:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
| mit | -3,351,838,090,374,952,400 | 42.105556 | 155 | 0.621601 | false |
jtomasek/tuskar-ui-1 | tuskar_ui/infrastructure/resource_management/resource_classes/workflows.py | 1 | 12384 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from tuskar_ui import api as tuskar
import tuskar_ui.workflows
import re
from tuskar_ui.infrastructure. \
resource_management.resource_classes.tables import FlavorTemplatesTable
from tuskar_ui.infrastructure. \
resource_management.resource_classes.tables import RacksTable
class ResourceClassInfoAndFlavorsAction(workflows.Action):
name = forms.CharField(max_length=255,
label=_("Class Name"),
help_text="",
required=True)
service_type = forms.ChoiceField(label=_('Class Type'),
required=True,
choices=[('', ''),
('compute',
('Compute')),
('not_compute',
('Non Compute')),
],
widget=forms.Select(
attrs={'class': 'switchable'})
)
image = forms.ChoiceField(label=_('Provisioning Image'),
required=True,
choices=[('compute-img', ('overcloud-compute'))],
widget=forms.Select(
attrs={'class': 'switchable'})
)
def clean(self):
cleaned_data = super(ResourceClassInfoAndFlavorsAction,
self).clean()
name = cleaned_data.get('name')
resource_class_id = self.initial.get('resource_class_id', None)
try:
resource_classes = tuskar.ResourceClass.list(self.request)
except Exception:
resource_classes = []
msg = _('Unable to get resource class list')
exceptions.check_message(["Connection", "refused"], msg)
raise
for resource_class in resource_classes:
if resource_class.name == name and \
resource_class_id != resource_class.id:
raise forms.ValidationError(
_('The name "%s" is already used by'
' another resource class.')
% name
)
return cleaned_data
class Meta:
name = _("Class Settings")
help_text = _("From here you can fill the class "
"settings and add flavors to class.")
class CreateResourceClassInfoAndFlavors(tuskar_ui.workflows.TableStep):
table_classes = (FlavorTemplatesTable,)
action_class = ResourceClassInfoAndFlavorsAction
template_name = 'infrastructure/resource_management/resource_classes/'\
'_resource_class_info_and_flavors_step.html'
contributes = ("name", "service_type", "flavors_object_ids",
'max_vms')
def contribute(self, data, context):
request = self.workflow.request
if data:
context["flavors_object_ids"] =\
request.POST.getlist("flavors_object_ids")
# todo: lsmola django can't parse dictionaruy from POST
# this should be rewritten to django formset
context["max_vms"] = {}
for index, value in request.POST.items():
match = re.match(
'^(flavors_object_ids__max_vms__(.*?))$',
index)
if match:
context["max_vms"][match.groups()[1]] = value
context.update(data)
return context
def get_flavors_data(self):
try:
resource_class_id = self.workflow.context.get("resource_class_id")
if resource_class_id:
resource_class = tuskar.ResourceClass.get(
self.workflow.request,
resource_class_id)
# TODO(lsmola ugly interface, rewrite)
self._tables['flavors'].active_multi_select_values = \
resource_class.flavortemplates_ids
all_flavors = resource_class.all_flavors
else:
all_flavors = tuskar.FlavorTemplate.list(
self.workflow.request)
except Exception:
all_flavors = []
exceptions.handle(self.workflow.request,
_('Unable to retrieve resource flavors list.'))
return all_flavors
class RacksAction(workflows.Action):
class Meta:
name = _("Racks")
class CreateRacks(tuskar_ui.workflows.TableStep):
table_classes = (RacksTable,)
action_class = RacksAction
contributes = ("racks_object_ids")
template_name = 'infrastructure/resource_management/'\
'resource_classes/_racks_step.html'
def contribute(self, data, context):
request = self.workflow.request
context["racks_object_ids"] =\
request.POST.getlist("racks_object_ids")
context.update(data)
return context
def get_racks_data(self):
try:
resource_class_id = self.workflow.context.get("resource_class_id")
if resource_class_id:
resource_class = tuskar.ResourceClass.get(
self.workflow.request,
resource_class_id)
# TODO(lsmola ugly interface, rewrite)
self._tables['racks'].active_multi_select_values = \
resource_class.racks_ids
racks = \
resource_class.all_racks
else:
racks = \
tuskar.Rack.list(self.workflow.request, True)
except Exception:
racks = []
exceptions.handle(self.workflow.request,
_('Unable to retrieve racks list.'))
return racks
class ResourceClassWorkflowMixin:
# FIXME active tabs coflict
# When on page with tabs, the workflow with more steps is used,
# there is a conflict of active tabs and it always shows the
# first tab after an action. So I explicitly specify to what
# tab it should redirect after action, until the coflict will
# be fixed in Horizon.
def get_index_url(self):
"""This url is used both as success and failure url"""
return "%s?tab=resource_management_tabs__resource_classes_tab" %\
reverse("horizon:infrastructure:resource_management:index")
def get_success_url(self):
return self.get_index_url()
def get_failure_url(self):
return self.get_index_url()
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def _get_flavors(self, request, data):
flavors = []
flavor_ids = data.get('flavors_object_ids') or []
max_vms = data.get('max_vms')
resource_class_name = data['name']
for template_id in flavor_ids:
template = tuskar.FlavorTemplate.get(request, template_id)
capacities = []
for c in template.capacities:
capacities.append({'name': c.name,
'value': str(c.value),
'unit': c.unit})
# FIXME: tuskar uses resource-class-name prefix for flavors,
# e.g. m1.large, we add rc name to the template name:
flavor_name = "%s.%s" % (resource_class_name, template.name)
flavors.append({'name': flavor_name,
'max_vms': max_vms.get(template.id, None),
'capacities': capacities})
return flavors
def _add_racks(self, request, data, resource_class):
ids_to_add = data.get('racks_object_ids') or []
resource_class.set_racks(request, ids_to_add)
class CreateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow):
default_steps = (CreateResourceClassInfoAndFlavors,
CreateRacks)
slug = "create_resource_class"
name = _("Create Class")
finalize_button_name = _("Create Class")
success_message = _('Created class "%s".')
failure_message = _('Unable to create class "%s".')
def _create_resource_class_info(self, request, data):
try:
flavors = self._get_flavors(request, data)
return tuskar.ResourceClass.create(
request,
name=data['name'],
service_type=data['service_type'],
flavors=flavors)
except Exception:
redirect = self.get_failure_url()
exceptions.handle(request,
_('Unable to create resource class.'),
redirect=redirect)
return None
def handle(self, request, data):
resource_class = self._create_resource_class_info(request, data)
self._add_racks(request, data, resource_class)
return True
class UpdateResourceClassInfoAndFlavors(CreateResourceClassInfoAndFlavors):
depends_on = ("resource_class_id",)
class UpdateRacks(CreateRacks):
depends_on = ("resource_class_id",)
class UpdateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow):
default_steps = (UpdateResourceClassInfoAndFlavors,
UpdateRacks)
slug = "update_resource_class"
name = _("Update Class")
finalize_button_name = _("Update Class")
success_message = _('Updated class "%s".')
failure_message = _('Unable to update class "%s".')
def _update_resource_class_info(self, request, data):
try:
flavors = self._get_flavors(request, data)
return tuskar.ResourceClass.update(
request,
data['resource_class_id'],
name=data['name'],
service_type=data['service_type'],
flavors=flavors)
except Exception:
redirect = self.get_failure_url()
exceptions.handle(request,
_('Unable to create resource class.'),
redirect=redirect)
return None
def handle(self, request, data):
resource_class = self._update_resource_class_info(request, data)
self._add_racks(request, data, resource_class)
return True
class DetailUpdateWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__overview" % (
reverse(url, args=(self.context["resource_class_id"])))
class UpdateRacksWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__racks" % (
reverse(url, args=(self.context["resource_class_id"])))
class UpdateFlavorsWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__flavors" % (
reverse(url, args=(self.context["resource_class_id"])))
| apache-2.0 | -6,819,238,934,805,473,000 | 37.222222 | 79 | 0.566053 | false |
forman/dectree | examples/intertidal_flat_classif/intertidal_flat_classif.py | 1 | 12362 |
from numba import jit, jitclass, float64
import numpy as np
@jit(nopython=True)
def _B1_LT_085(x):
# B1.LT_085: lt(0.85)
if 0.0 == 0.0:
return 1.0 if x < 0.85 else 0.0
x1 = 0.85 - 0.0
x2 = 0.85 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B1_GT_1(x):
# B1.GT_1: gt(1.0)
if 0.0 == 0.0:
return 1.0 if x > 1.0 else 0.0
x1 = 1.0 - 0.0
x2 = 1.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B2_GT_0(x):
# B2.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B3_LT_005(x):
# B3.LT_005: lt(0.05)
if 0.0 == 0.0:
return 1.0 if x < 0.05 else 0.0
x1 = 0.05 - 0.0
x2 = 0.05 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_01(x):
# B3.LT_01: lt(0.1)
if 0.0 == 0.0:
return 1.0 if x < 0.1 else 0.0
x1 = 0.1 - 0.0
x2 = 0.1 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_015(x):
# B3.LT_015: lt(0.15)
if 0.0 == 0.0:
return 1.0 if x < 0.15 else 0.0
x1 = 0.15 - 0.0
x2 = 0.15 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_02(x):
# B3.LT_02: lt(0.2)
if 0.0 == 0.0:
return 1.0 if x < 0.2 else 0.0
x1 = 0.2 - 0.0
x2 = 0.2 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B4_NODATA(x):
# B4.NODATA: eq(0.0)
if 0.0 == 0.0:
return 1.0 if x == 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0
x3 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
if x <= x3:
return 1.0 - (x - x2) / (x3 - x2)
return 0.0
@jit(nopython=True)
def _B5_LT_01(x):
# B5.LT_01: lt(0.1)
if 0.0 == 0.0:
return 1.0 if x < 0.1 else 0.0
x1 = 0.1 - 0.0
x2 = 0.1 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B7_LT_05(x):
# B7.LT_05: lt(0.5)
if 0.0 == 0.0:
return 1.0 if x < 0.5 else 0.0
x1 = 0.5 - 0.0
x2 = 0.5 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B8_GT_0(x):
# B8.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_LT_009(x):
# B8.LT_009: lt(0.09)
if 0.0 == 0.0:
return 1.0 if x < 0.09 else 0.0
x1 = 0.09 - 0.0
x2 = 0.09 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B8_GT_033(x):
# B8.GT_033: gt(0.33)
if 0.0 == 0.0:
return 1.0 if x > 0.33 else 0.0
x1 = 0.33 - 0.0
x2 = 0.33 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_035(x):
# B8.GT_035: gt(0.35)
if 0.0 == 0.0:
return 1.0 if x > 0.35 else 0.0
x1 = 0.35 - 0.0
x2 = 0.35 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_04(x):
# B8.GT_04: gt(0.4)
if 0.0 == 0.0:
return 1.0 if x > 0.4 else 0.0
x1 = 0.4 - 0.0
x2 = 0.4 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_045(x):
# B8.GT_045: gt(0.45)
if 0.0 == 0.0:
return 1.0 if x > 0.45 else 0.0
x1 = 0.45 - 0.0
x2 = 0.45 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_LT_085(x):
# B8.LT_085: lt(0.85)
if 0.0 == 0.0:
return 1.0 if x < 0.85 else 0.0
x1 = 0.85 - 0.0
x2 = 0.85 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B16_GT_0(x):
# B16.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B19_GT_015(x):
# B19.GT_015: gt(0.15)
if 0.0 == 0.0:
return 1.0 if x > 0.15 else 0.0
x1 = 0.15 - 0.0
x2 = 0.15 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_011(x):
# BSum.GT_011: gt(0.11)
if 0.0 == 0.0:
return 1.0 if x > 0.11 else 0.0
x1 = 0.11 - 0.0
x2 = 0.11 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_013(x):
# BSum.GT_013: gt(0.13)
if 0.0 == 0.0:
return 1.0 if x > 0.13 else 0.0
x1 = 0.13 - 0.0
x2 = 0.13 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_016(x):
# BSum.GT_016: gt(0.16)
if 0.0 == 0.0:
return 1.0 if x > 0.16 else 0.0
x1 = 0.16 - 0.0
x2 = 0.16 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _Class_FALSE(x):
# Class.FALSE: false()
return 0.0
@jit(nopython=True)
def _Class_TRUE(x):
# Class.TRUE: true()
return 1.0
_InputsSpec = [
("b1", float64[:]),
("b2", float64[:]),
("b3", float64[:]),
("b4", float64[:]),
("b5", float64[:]),
("b6", float64[:]),
("b7", float64[:]),
("b8", float64[:]),
("b12", float64[:]),
("b13", float64[:]),
("b14", float64[:]),
("b15", float64[:]),
("b16", float64[:]),
("b19", float64[:]),
("b100", float64[:]),
("bsum", float64[:]),
]
@jitclass(_InputsSpec)
class Inputs:
def __init__(self, size: int):
self.b1 = np.zeros(size, dtype=np.float64)
self.b2 = np.zeros(size, dtype=np.float64)
self.b3 = np.zeros(size, dtype=np.float64)
self.b4 = np.zeros(size, dtype=np.float64)
self.b5 = np.zeros(size, dtype=np.float64)
self.b6 = np.zeros(size, dtype=np.float64)
self.b7 = np.zeros(size, dtype=np.float64)
self.b8 = np.zeros(size, dtype=np.float64)
self.b12 = np.zeros(size, dtype=np.float64)
self.b13 = np.zeros(size, dtype=np.float64)
self.b14 = np.zeros(size, dtype=np.float64)
self.b15 = np.zeros(size, dtype=np.float64)
self.b16 = np.zeros(size, dtype=np.float64)
self.b19 = np.zeros(size, dtype=np.float64)
self.b100 = np.zeros(size, dtype=np.float64)
self.bsum = np.zeros(size, dtype=np.float64)
_OutputsSpec = [
("nodata", float64[:]),
("Wasser", float64[:]),
("Schill", float64[:]),
("Muschel", float64[:]),
("dense2", float64[:]),
("dense1", float64[:]),
("Strand", float64[:]),
("Sand", float64[:]),
("Misch", float64[:]),
("Misch2", float64[:]),
("Schlick", float64[:]),
("schlick_t", float64[:]),
("Wasser2", float64[:]),
]
@jitclass(_OutputsSpec)
class Outputs:
def __init__(self, size: int):
self.nodata = np.zeros(size, dtype=np.float64)
self.Wasser = np.zeros(size, dtype=np.float64)
self.Schill = np.zeros(size, dtype=np.float64)
self.Muschel = np.zeros(size, dtype=np.float64)
self.dense2 = np.zeros(size, dtype=np.float64)
self.dense1 = np.zeros(size, dtype=np.float64)
self.Strand = np.zeros(size, dtype=np.float64)
self.Sand = np.zeros(size, dtype=np.float64)
self.Misch = np.zeros(size, dtype=np.float64)
self.Misch2 = np.zeros(size, dtype=np.float64)
self.Schlick = np.zeros(size, dtype=np.float64)
self.schlick_t = np.zeros(size, dtype=np.float64)
self.Wasser2 = np.zeros(size, dtype=np.float64)
@jit(nopython=True)
def apply_rules(inputs: Inputs, outputs: Outputs):
for i in range(len(outputs.nodata)):
t0 = 1.0
# if b4 is NODATA:
t1 = min(t0, _B4_NODATA(inputs.b4[i]))
# nodata = TRUE
outputs.nodata[i] = t1
# else:
t1 = min(t0, 1.0 - t1)
# if (b8 is GT_033 and b1 is LT_085) or b8 is LT_009:
t2 = min(t1, max(min(_B8_GT_033(inputs.b8[i]), _B1_LT_085(inputs.b1[i])), _B8_LT_009(inputs.b8[i])))
# if b5 is LT_01:
t3 = min(t2, _B5_LT_01(inputs.b5[i]))
# Wasser = TRUE
outputs.Wasser[i] = t3
# else:
t3 = min(t2, 1.0 - t3)
# if (b19 is GT_015 and (b8 is GT_04 and b8 is LT_085) and b7 is LT_05) or (b8 is GT_04 and bsum is GT_011) or (b8 is GT_035 and bsum is GT_016):
t4 = min(t3, max(max(min(min(_B19_GT_015(inputs.b19[i]), min(_B8_GT_04(inputs.b8[i]), _B8_LT_085(inputs.b8[i]))), _B7_LT_05(inputs.b7[i])), min(_B8_GT_04(inputs.b8[i]), _BSum_GT_011(inputs.bsum[i]))), min(_B8_GT_035(inputs.b8[i]), _BSum_GT_016(inputs.bsum[i]))))
# if bsum is GT_013:
t5 = min(t4, _BSum_GT_013(inputs.bsum[i]))
# Schill = TRUE
outputs.Schill[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# Muschel = TRUE
outputs.Muschel[i] = t5
# else:
t4 = min(t3, 1.0 - t4)
# if b8 is GT_045:
t5 = min(t4, _B8_GT_045(inputs.b8[i]))
# dense2 = TRUE
outputs.dense2[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# dense1 = TRUE
outputs.dense1[i] = t5
# else:
t2 = min(t1, 1.0 - t2)
# if b1 is GT_1:
t3 = min(t2, _B1_GT_1(inputs.b1[i]))
# Strand = TRUE
outputs.Strand[i] = t3
# else:
t3 = min(t2, 1.0 - t3)
# if b3 is LT_005:
t4 = min(t3, _B3_LT_005(inputs.b3[i]))
# Sand = TRUE
outputs.Sand[i] = t4
# else:
t4 = min(t3, 1.0 - t4)
# if b3 is LT_01 and b8 is GT_0:
t5 = min(t4, min(_B3_LT_01(inputs.b3[i]), _B8_GT_0(inputs.b8[i])))
# Misch = TRUE
outputs.Misch[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# if b3 is LT_015 and b8 is GT_0:
t6 = min(t5, min(_B3_LT_015(inputs.b3[i]), _B8_GT_0(inputs.b8[i])))
# Misch2 = TRUE
outputs.Misch2[i] = t6
# else:
t6 = min(t5, 1.0 - t6)
# if b3 is LT_02 and b2 is GT_0 and b8 is GT_0:
t7 = min(t6, min(min(_B3_LT_02(inputs.b3[i]), _B2_GT_0(inputs.b2[i])), _B8_GT_0(inputs.b8[i])))
# Schlick = TRUE
outputs.Schlick[i] = t7
# else:
t7 = min(t6, 1.0 - t7)
# if b16 is GT_0 and b8 is GT_0:
t8 = min(t7, min(_B16_GT_0(inputs.b16[i]), _B8_GT_0(inputs.b8[i])))
# schlick_t = TRUE
outputs.schlick_t[i] = t8
# else:
t8 = min(t7, 1.0 - t8)
# Wasser2 = TRUE
outputs.Wasser2[i] = t8
| mit | -7,789,529,066,740,844,000 | 24.647303 | 270 | 0.44928 | false |
pfouque/deezer-python | deezer/tests/test_resources.py | 1 | 9053 | # -*- coding: utf-8 -*-
import json
import unittest
from types import GeneratorType
import deezer
from mock import patch
from .mocked_methods import fake_urlopen
class TestResources(unittest.TestCase):
def setUp(self):
self.patcher = patch('deezer.client.urlopen', fake_urlopen)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_resource_dict(self):
"""
Test that resource can be converted to dict
"""
client = deezer.Client()
response = fake_urlopen(client.object_url('track', 3135556))
resp_str = response.read().decode('utf-8')
response.close()
data = json.loads(resp_str)
resource = deezer.resources.Resource(client, data)
self.assertEqual(resource.asdict(), data)
def test_resource_relation(self):
"""
Test passing parent object when using get_relation
"""
client = deezer.Client()
album = client.get_album(302127)
tracks = album.get_tracks()
self.assertTrue(tracks[0].album is album)
def test_album_attributes(self):
"""
Test album resource
"""
client = deezer.Client()
album = client.get_album(302127)
self.assertTrue(hasattr(album, 'title'))
self.assertEqual(repr(album), '<Album: Discovery>')
artist = album.get_artist()
self.assertIsInstance(artist, deezer.resources.Artist)
self.assertEqual(repr(artist), '<Artist: Daft Punk>')
def test_album_tracks(self):
"""
Test tracks method of album resource
"""
client = deezer.Client()
album = client.get_album(302127)
tracks = album.get_tracks()
self.assertIsInstance(tracks, list)
track = tracks[0]
self.assertIsInstance(track, deezer.resources.Track)
self.assertEqual(repr(track), '<Track: One More Time>')
self.assertEqual(type(album.iter_tracks()), GeneratorType)
track = list(album.iter_tracks())[0]
self.assertIsInstance(track, deezer.resources.Track)
def test_artist_attributes(self):
"""
Test artist resource
"""
client = deezer.Client()
artist = client.get_artist(27)
self.assertTrue(hasattr(artist, 'name'))
self.assertIsInstance(artist, deezer.resources.Artist)
self.assertEqual(repr(artist), '<Artist: Daft Punk>')
def test_artist_albums(self):
"""
Test albums method of artist resource
"""
client = deezer.Client()
artist = client.get_artist(27)
albums = artist.get_albums()
self.assertIsInstance(albums, list)
album = albums[0]
self.assertIsInstance(album, deezer.resources.Album)
self.assertEqual(repr(album),
'<Album: Human After All (Remixes) (Remixes)>')
self.assertEqual(type(artist.iter_albums()), GeneratorType)
def test_artist_top(self):
"""
Test top method of artist resource
"""
client = deezer.Client()
artist = client.get_artist(27)
tracks = artist.get_top()
self.assertIsInstance(tracks, list)
track = tracks[0]
self.assertIsInstance(track, deezer.resources.Track)
self.assertEqual(repr(track), '<Track: Get Lucky (Radio Edit)>')
def test_artist_radio(self):
"""
Test radio method of artist resource
"""
client = deezer.Client()
artist = client.get_artist(27)
tracks = artist.get_radio()
self.assertIsInstance(tracks, list)
track = tracks[0]
self.assertIsInstance(track, deezer.resources.Track)
self.assertEqual(repr(track), '<Track: Lose Yourself to Dance>')
def test_artist_related(self):
"""
Test related method of artist resource
"""
client = deezer.Client()
artist = client.get_artist(27)
artists = artist.get_related()
self.assertIsInstance(artists, list)
artist = artists[0]
self.assertIsInstance(artist, deezer.resources.Artist)
self.assertEqual(repr(artist), '<Artist: Justice>')
self.assertEqual(type(artist.iter_related()), GeneratorType)
def test_track_attributes(self):
"""
Test track resource
"""
client = deezer.Client()
track = client.get_track(3135556)
artist = track.get_artist()
album = track.get_album()
self.assertTrue(hasattr(track, 'title'))
self.assertIsInstance(track, deezer.resources.Track)
self.assertIsInstance(artist, deezer.resources.Artist)
self.assertIsInstance(album, deezer.resources.Album)
self.assertEqual(repr(track), '<Track: Harder Better Faster Stronger>')
self.assertEqual(repr(artist), '<Artist: Daft Punk>')
self.assertEqual(repr(album), '<Album: Discovery>')
def test_radio_attributes(self):
"""
Test radio resource
"""
client = deezer.Client()
radio = client.get_radio(23261)
self.assertTrue(hasattr(radio, 'title'))
self.assertIsInstance(radio, deezer.resources.Radio)
self.assertEqual(repr(radio), '<Radio: Telegraph Classical>')
def test_radio_tracks(self):
"""
Test tracks method of radio resource
"""
client = deezer.Client()
radio = client.get_radio(23261)
tracks = radio.get_tracks()
self.assertIsInstance(tracks, list)
track = tracks[2]
self.assertIsInstance(track, deezer.resources.Track)
self.assertEqual(repr(track), '<Track: Schumann: Kinderszenen, Op.15 - 11. Fürchtenmachen>')
self.assertEqual(type(radio.iter_tracks()), GeneratorType)
def test_genre_attributes(self):
"""
Test genre resource
"""
client = deezer.Client()
genre = client.get_genre(106)
self.assertTrue(hasattr(genre, 'name'))
self.assertIsInstance(genre, deezer.resources.Genre)
self.assertEqual(repr(genre), '<Genre: Electro>')
def test_genre_artists(self):
"""
Test artists method of genre resource
"""
client = deezer.Client()
genre = client.get_genre(106)
artists = genre.get_artists()
self.assertIsInstance(artists, list)
artist = artists[0]
self.assertIsInstance(artist, deezer.resources.Artist)
self.assertEqual(repr(artist), '<Artist: Calvin Harris>')
self.assertEqual(type(genre.iter_artists()), GeneratorType)
def test_genre_radios(self):
"""
Test radios method of genre resource
"""
client = deezer.Client()
genre = client.get_genre(106)
radios = genre.get_radios()
self.assertIsInstance(radios, list)
radio = radios[0]
self.assertIsInstance(radio, deezer.resources.Radio)
self.assertEqual(repr(radio), '<Radio: Techno/House>')
self.assertEqual(type(genre.iter_radios()), GeneratorType)
def test_chart_tracks(self):
"""
Test tracks method of chart resource
"""
client = deezer.Client()
chart = client.get_chart()
tracks = chart.get_tracks()
self.assertIsInstance(tracks, list)
track = tracks[0]
self.assertIsInstance(track, deezer.resources.Track)
self.assertEqual(repr(track), '<Track: Starboy>')
self.assertEqual(type(chart.iter_tracks()), GeneratorType)
def test_chart_artists(self):
"""
Test artists method of chart resource
"""
client = deezer.Client()
chart = client.get_chart()
artists = chart.get_artists()
self.assertIsInstance(artists, list)
artist = artists[0]
self.assertIsInstance(artist, deezer.resources.Artist)
self.assertEqual(repr(artist), '<Artist: Pnl>')
self.assertEqual(type(chart.iter_artists()), GeneratorType)
def test_chart_albums(self):
"""
Test albums method of chart resource
"""
client = deezer.Client()
chart = client.get_chart()
albums = chart.get_albums()
self.assertIsInstance(albums, list)
album = albums[0]
self.assertIsInstance(album, deezer.resources.Album)
self.assertEqual(repr(album),
"<Album: Where Is l'album de Gradur>")
self.assertEqual(type(chart.iter_albums()), GeneratorType)
def test_chart_playlists(self):
"""
Test playlists method of chart resource
"""
client = deezer.Client()
chart = client.get_chart()
playlists = chart.get_playlists()
self.assertIsInstance(playlists, list)
playlist = playlists[0]
self.assertIsInstance(playlist, deezer.resources.Playlist)
self.assertEqual(repr(playlist),
"<Playlist: Top France>")
self.assertEqual(type(chart.iter_playlists()), GeneratorType)
| mit | 5,145,083,881,017,740,000 | 34.498039 | 100 | 0.610252 | false |
isabellemao/Hello-World | python/Junior2015CCCJ4.py | 1 | 1278 | #Problem J4: Arrival Time
departure_time = input()
split_departure = list(departure_time) #The time of departure, split into a list.
#Split the list
departure_hour = split_departure[0:2]
departure_minute = split_departure[3:5]
#Change the split list to integers.
departure_hour = int("".join(departure_hour))
departure_minute = int("".join(departure_minute))
#The start and end of the rush hours
rh_start_1 = 7
rh_end_1 = 10
rh_start_2 = 15
rh_end_2 = 19
#Set the current time
hour = departure_hour
minute = departure_minute
#For the 120 minutes it usually takes Fiona to commute
for counter in range(1, 121):
#If it's currently rush hour
if hour >= rh_start_1 and hour < rh_end_1 or hour >= rh_start_2 and hour < rh_end_2:
#Twice as slow if rush hour
minute += 2
else:
#Normal speed if normal time
minute += 1
if minute >= 60:
minute = 0
#Reset hour
hour += 1
if hour == 24:
hour = 0
#Add fake zeroes if required.
if hour < 10:
hour = str(hour)
hour = "0" + hour
else:
hour = str(hour)
if minute < 10:
minute = str(minute)
minute = "0" + minute
else:
minute = str(minute)
#Make a valid output.
output = hour , ":" , minute
output = "".join(output)
print(output)
| apache-2.0 | -2,193,184,907,640,587,300 | 22.666667 | 88 | 0.640063 | false |
googleapis/python-pubsublite | google/cloud/pubsublite_v1/types/__init__.py | 1 | 4702 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .admin import (
CreateReservationRequest,
CreateSubscriptionRequest,
CreateTopicRequest,
DeleteReservationRequest,
DeleteSubscriptionRequest,
DeleteTopicRequest,
GetReservationRequest,
GetSubscriptionRequest,
GetTopicPartitionsRequest,
GetTopicRequest,
ListReservationsRequest,
ListReservationsResponse,
ListReservationTopicsRequest,
ListReservationTopicsResponse,
ListSubscriptionsRequest,
ListSubscriptionsResponse,
ListTopicsRequest,
ListTopicsResponse,
ListTopicSubscriptionsRequest,
ListTopicSubscriptionsResponse,
OperationMetadata,
SeekSubscriptionRequest,
SeekSubscriptionResponse,
TopicPartitions,
UpdateReservationRequest,
UpdateSubscriptionRequest,
UpdateTopicRequest,
)
from .common import (
AttributeValues,
Cursor,
PubSubMessage,
Reservation,
SequencedMessage,
Subscription,
TimeTarget,
Topic,
)
from .cursor import (
CommitCursorRequest,
CommitCursorResponse,
InitialCommitCursorRequest,
InitialCommitCursorResponse,
ListPartitionCursorsRequest,
ListPartitionCursorsResponse,
PartitionCursor,
SequencedCommitCursorRequest,
SequencedCommitCursorResponse,
StreamingCommitCursorRequest,
StreamingCommitCursorResponse,
)
from .publisher import (
InitialPublishRequest,
InitialPublishResponse,
MessagePublishRequest,
MessagePublishResponse,
PublishRequest,
PublishResponse,
)
from .subscriber import (
FlowControlRequest,
InitialPartitionAssignmentRequest,
InitialSubscribeRequest,
InitialSubscribeResponse,
MessageResponse,
PartitionAssignment,
PartitionAssignmentAck,
PartitionAssignmentRequest,
SeekRequest,
SeekResponse,
SubscribeRequest,
SubscribeResponse,
)
from .topic_stats import (
ComputeHeadCursorRequest,
ComputeHeadCursorResponse,
ComputeMessageStatsRequest,
ComputeMessageStatsResponse,
ComputeTimeCursorRequest,
ComputeTimeCursorResponse,
)
__all__ = (
"CreateReservationRequest",
"CreateSubscriptionRequest",
"CreateTopicRequest",
"DeleteReservationRequest",
"DeleteSubscriptionRequest",
"DeleteTopicRequest",
"GetReservationRequest",
"GetSubscriptionRequest",
"GetTopicPartitionsRequest",
"GetTopicRequest",
"ListReservationsRequest",
"ListReservationsResponse",
"ListReservationTopicsRequest",
"ListReservationTopicsResponse",
"ListSubscriptionsRequest",
"ListSubscriptionsResponse",
"ListTopicsRequest",
"ListTopicsResponse",
"ListTopicSubscriptionsRequest",
"ListTopicSubscriptionsResponse",
"OperationMetadata",
"SeekSubscriptionRequest",
"SeekSubscriptionResponse",
"TopicPartitions",
"UpdateReservationRequest",
"UpdateSubscriptionRequest",
"UpdateTopicRequest",
"AttributeValues",
"Cursor",
"PubSubMessage",
"Reservation",
"SequencedMessage",
"Subscription",
"TimeTarget",
"Topic",
"CommitCursorRequest",
"CommitCursorResponse",
"InitialCommitCursorRequest",
"InitialCommitCursorResponse",
"ListPartitionCursorsRequest",
"ListPartitionCursorsResponse",
"PartitionCursor",
"SequencedCommitCursorRequest",
"SequencedCommitCursorResponse",
"StreamingCommitCursorRequest",
"StreamingCommitCursorResponse",
"InitialPublishRequest",
"InitialPublishResponse",
"MessagePublishRequest",
"MessagePublishResponse",
"PublishRequest",
"PublishResponse",
"FlowControlRequest",
"InitialPartitionAssignmentRequest",
"InitialSubscribeRequest",
"InitialSubscribeResponse",
"MessageResponse",
"PartitionAssignment",
"PartitionAssignmentAck",
"PartitionAssignmentRequest",
"SeekRequest",
"SeekResponse",
"SubscribeRequest",
"SubscribeResponse",
"ComputeHeadCursorRequest",
"ComputeHeadCursorResponse",
"ComputeMessageStatsRequest",
"ComputeMessageStatsResponse",
"ComputeTimeCursorRequest",
"ComputeTimeCursorResponse",
)
| apache-2.0 | 5,778,659,300,799,625,000 | 26.658824 | 74 | 0.746278 | false |
robosafe/testbench_vRAL_hydro | bert2_simulator/sim_step_monitors/assertion_monitor_manager.py | 1 | 2830 | #!/usr/bin/env python
"""
Assertion Monitor Manager
Created by David Western, June 2015.
"""
from coverage import coverage
import imp
import rospkg
import rospy
from std_msgs.msg import UInt64
from std_srvs.srv import Empty
import sys
class AMM:
def __init__(self,AM_list_file,trace_label):
# Read list of assertion monitors to run (from file?):
rospack = rospkg.RosPack()
path = rospack.get_path('bert2_simulator')
path = path+'/sim_step_monitors/'
print("--- Assertion monitors to run:")
self.AM_names = [line.rstrip('\n') for line in open(path+AM_list_file)]
print(self.AM_names)
# Instantiate assertion monitors:
self.AMs = [] # Initialise empty list of AMs.
for idx, class_name in enumerate(self.AM_names):
print(class_name)
print path+class_name+'.py'
module = imp.load_source(class_name, path+class_name+'.py')
#module = __import__(path+class_name) # N.B. These two lines imply that we
class_ = getattr(module, class_name) # require the AM to be defined in a
# file with the same name as the class.
self.AMs.append(class_(trace_label))
# Check AM has the mandatory attributes:
mand_attrs = ['step']
for attr in mand_attrs:
if not hasattr(self.AMs[idx],attr):
rospy.logerr("Assertion monitor specification '%s' does not define the attribute \
'%s', which is required by AMM (assertion_monitor_manager.py). \
Does %s inherite from an assertion monitor base class?",
self.AMs[idx].__name__, attr, self.AMs[idx].__name__)
# Get service
self.unpause_gazebo = rospy.ServiceProxy('gazebo/unpause_physics',Empty)
# Subscriber to triggers, which come on each sim step:
rospy.Subscriber("AM_trigger", UInt64, self.trigger_AMs)
def trigger_AMs(self,data):
iteration = data.data
sim_time = rospy.get_time()
# Step all assertion monitors:
for idx, AM in enumerate(self.AMs):
AM.step(iteration,sim_time)
# Release gazebo now we've finished the checks for this step:
#print "unpausing"
#self.unpause_gazebo()
# Problem: This line prevents Gazebo's pause button from working (unless you
# get a lucky click).
if __name__ == '__main__':
try:
if len(sys.argv) < 3:
print("usage: rosrun [package_name] assertion_monitor_manager.py AM_list_file.txt report_file_name")
else:
rospy.init_node('AMM')
AMMInst = AMM(sys.argv[1],sys.argv[2])
rospy.spin()
except rospy.ROSInterruptException: #to stop the code when pressing Ctr+c
pass
| gpl-3.0 | 1,416,007,924,697,365,000 | 31.906977 | 109 | 0.602473 | false |
eayunstack/eayunstack-upgrade | ansible/library/keystone_v2_endpoint.py | 1 | 9178 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kevin Carter <[email protected]>
#
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on Jimmy Tang's implementation
DOCUMENTATION = """
---
module: keystone_v2_endpoint
short_description:
- Manage OpenStack Identity (keystone) v2 endpoint.
description:
- Manage OpenStack Identity (keystone) v2 endpoint.
endpoints.
options:
token:
description:
- The token to be uses in case the password is not specified
required: true
default: None
endpoint:
description:
- The keystone url for authentication
required: true
service_name:
description:
- Name of the service.
required: true
default: None
region_name:
description:
- Name of the region.
required: true
default: None
service_type:
description:
- Type of service.
required: true
default: None
endpoint_dict:
description:
- Dict of endpoint urls to add to keystone for a service
required: true
default: None
type: dict
state:
description:
- Ensuring the endpoint is either present, absent.
- It always ensures endpoint is updated to latest url.
required: False
default: 'present'
requirements: [ python-keystoneclient ]
"""
EXAMPLES = """
# Create an endpoint
- keystone_v2_endpoint:
region_name: "RegionOne"
service_name: "glance"
service_type: "image"
endpoint: "http://127.0.0.1:5000/v2.0/"
token: "ChangeMe"
endpoint_dict:
publicurl: "http://127.0.0.1:9292"
adminurl: "http://127.0.0.1:9292"
internalurl: "http://127.0.0.1:9292"
"""
try:
from keystoneclient.v2_0 import client
except ImportError:
keystoneclient_found = False
else:
keystoneclient_found = True
class ManageKeystoneV2Endpoint(object):
def __init__(self, module):
"""Manage Keystone via Ansible."""
self.state_change = False
self.keystone = None
# Load AnsibleModule
self.module = module
@staticmethod
def _facts(facts):
"""Return a dict for our Ansible facts.
:param facts: ``dict`` Dict with data to return
"""
return {'keystone_facts': facts}
def failure(self, error, rc, msg):
"""Return a Failure when running an Ansible command.
:param error: ``str`` Error that occurred.
:param rc: ``int`` Return code while executing an Ansible command.
:param msg: ``str`` Message to report.
"""
self.module.fail_json(msg=msg, rc=rc, err=error)
def _authenticate(self):
"""Return a keystone client object."""
endpoint = self.module.params.get('endpoint')
token = self.module.params.get('token')
if token is None:
self.failure(
error='Missing Auth Token',
rc=2,
msg='Auto token is required!'
)
if token:
self.keystone = client.Client(
endpoint=endpoint,
token=token
)
def _get_service(self, name, srv_type=None):
for entry in self.keystone.services.list():
if srv_type is not None:
if entry.type == srv_type and name == entry.name:
return entry
elif entry.name == name:
return entry
else:
return None
def _get_endpoint(self, region, service_id):
""" Getting endpoints per complete definition
Returns the endpoint details for an endpoint matching
region, service id.
:param service_id: service to which the endpoint belongs
:param region: geographic location of the endpoint
"""
for entry in self.keystone.endpoints.list():
check = [
entry.region == region,
entry.service_id == service_id,
]
if all(check):
return entry
else:
return None
def _compare_endpoint_info(self, endpoint, endpoint_dict):
""" Compare existed endpoint with module parameters
Return True if public, admin, internal urls are all the same.
:param endpoint: endpoint existed
:param endpoint_dict: endpoint info passed in
"""
check = [
endpoint.adminurl == endpoint_dict.get('adminurl'),
endpoint.publicurl == endpoint_dict.get('publicurl'),
endpoint.internalurl == endpoint_dict.get('internalurl')
]
if all(check):
return True
else:
return False
def ensure_endpoint(self):
"""Ensures the deletion/modification/addition of endpoints
within Keystone.
Returns the endpoint ID on a successful run.
"""
self._authenticate()
service_name = self.module.params.get('service_name')
service_type = self.module.params.get('service_type')
region = self.module.params.get('region_name')
endpoint_dict = self.module.params.get('endpoint_dict')
state = self.module.params.get('state')
endpoint_dict = {
'adminurl': endpoint_dict.get('adminurl', ''),
'publicurl': endpoint_dict.get('publicurl', ''),
'internalurl': endpoint_dict.get('internalurl', '')
}
service = self._get_service(name=service_name, srv_type=service_type)
if service is None:
self.failure(
error='service [ %s ] was not found.' % service_name,
rc=2,
msg='Service was not found, does it exist?'
)
existed_endpoint = self._get_endpoint(
region=region,
service_id=service.id,
)
delete_existed = False
if state == 'present':
''' Creating an endpoint (if it does
not exist) or creating a new one,
and then deleting the existing
endpoint that matches the service
type, name, and region.
'''
if existed_endpoint:
if not self._compare_endpoint_info(existed_endpoint,
endpoint_dict):
delete_existed = True
else:
endpoint = existed_endpoint
if (not existed_endpoint or
delete_existed):
self.state_change = True
endpoint = self.keystone.endpoints.create(
region=region,
service_id=service.id,
**endpoint_dict
)
elif state == 'absent':
if existed_endpoint is not None:
self.state_change = True
delete_existed = True
if delete_existed:
result = self.keystone.endpoints.delete(existed_endpoint.id)
if result[0].status_code != 204:
self.module.fail()
if state != 'absent':
facts = self._facts(endpoint.to_dict())
else:
facts = self._facts({})
self.module.exit_json(
changed=self.state_change,
ansible_facts=facts
)
# TODO(evrardjp): Deprecate state=update in Q.
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(
required=True
),
endpoint=dict(
required=True,
),
region_name=dict(
required=True
),
service_name=dict(
required=True
),
service_type=dict(
required=True
),
endpoint_dict=dict(
required=True,
type='dict'
),
state=dict(
choices=['present', 'absent'],
required=False,
default='present'
)
),
supports_check_mode=False,
)
km = ManageKeystoneV2Endpoint(module=module)
if not keystoneclient_found:
km.failure(
error='python-keystoneclient is missing',
rc=2,
msg='keystone client was not importable, is it installed?'
)
facts = km.ensure_endpoint()
# import module snippets
from ansible.module_utils.basic import * # NOQA
if __name__ == '__main__':
main()
| apache-2.0 | -101,433,040,338,243,950 | 28.322684 | 78 | 0.552953 | false |
shakamunyi/nova | nova/tests/unit/db/test_migrations.py | 1 | 32632 | # Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import glob
import logging
import os
from migrate.versioning import repository
import mock
from oslo.db.sqlalchemy import test_base
from oslo.db.sqlalchemy import test_migrations
from oslo.db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import test
LOG = logging.getLogger(__name__)
class NovaMigrationsCheckers(test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
TIMEOUT_SCALING_FACTOR = 2
snake_walk = True
downgrade = True
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def setUp(self):
super(NovaMigrationsCheckers, self).setUp()
# NOTE(viktors): We should reduce log output because it causes issues,
# when we run tests with testr
migrate_log = logging.getLogger('migrate')
old_level = migrate_log.level
migrate_log.setLevel(logging.WARN)
self.addCleanup(migrate_log.setLevel, old_level)
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(oslodbutils.column_exists(engine, table_name, column))
def assertColumnNotExists(self, engine, table_name, column):
self.assertFalse(oslodbutils.column_exists(engine, table_name, column))
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(oslodbutils.index_exists(engine, table_name, index))
def assertIndexNotExists(self, engine, table_name, index):
self.assertFalse(oslodbutils.index_exists(engine, table_name, index))
def assertIndexMembers(self, engine, table, index, members):
# NOTE(johannes): Order of columns can matter. Most SQL databases
# can use the leading columns for optimizing queries that don't
# include all of the covered columns.
self.assertIndexExists(engine, table, index)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = [c.name for c in idx.columns]
break
self.assertEqual(members, index_columns)
def _skippable_migrations(self):
special = [
216, # Havana
272, # NOOP migration due to revert
]
havana_placeholders = range(217, 227)
icehouse_placeholders = range(235, 244)
juno_placeholders = range(255, 265)
return (special +
havana_placeholders +
icehouse_placeholders +
juno_placeholders)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('DB Migration %i does not have a '
'test. Please add one!') % version)
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(self.snake_walk, self.downgrade)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
'project_id': 'fake_project',
'user_id': 'fake_user',
'resource': 'injected_file_content_bytes',
'hard_limit': 10}
table.insert().execute(fake_quotas)
# Check we can get the longest resource name.
quota = table.select(table.c.id == 5).execute().first()
self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _post_downgrade_228(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'metrics')
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _post_downgrade_229(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'extra_resources')
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _post_downgrade_230(self, engine):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnNotExists(engine, table_name, 'host')
self.assertColumnNotExists(engine, table_name, 'details')
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _post_downgrade_231(self, engine):
self.assertColumnNotExists(engine, 'instances', 'ephemeral_key_uuid')
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
'instance_faults', 'migrations']
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _post_downgrade_233(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'stats')
# confirm compute_node_stats exists
oslodbutils.get_table(engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_244(self, engine, data):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _post_downgrade_244(self, engine):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(36, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
fake_network = {'id': 1}
networks.insert().execute(fake_network)
def _check_245(self, engine, data):
networks = oslodbutils.get_table(engine, 'networks')
network = networks.select(networks.c.id == 1).execute().first()
# mtu should default to None
self.assertIsNone(network.mtu)
# dhcp_server should default to None
self.assertIsNone(network.dhcp_server)
# enable dhcp should default to true
self.assertTrue(network.enable_dhcp)
# share address should default to false
self.assertFalse(network.share_address)
def _post_downgrade_245(self, engine):
self.assertColumnNotExists(engine, 'networks', 'mtu')
self.assertColumnNotExists(engine, 'networks', 'dhcp_server')
self.assertColumnNotExists(engine, 'networks', 'enable_dhcp')
self.assertColumnNotExists(engine, 'networks', 'share_address')
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _post_downgrade_246(self, engine):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertTrue(pci_devices.c.deleted.nullable)
self.assertFalse(pci_devices.c.product_id.nullable)
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _post_downgrade_247(self, engine):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertTrue(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertFalse(pci_devices.c.deleted.nullable)
self.assertTrue(pci_devices.c.product_id.nullable)
self.assertTrue(pci_devices.c.vendor_id.nullable)
self.assertTrue(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _post_downgrade_248(self, engine):
reservations = oslodbutils.get_table(engine, 'reservations')
index_names = [idx.name for idx in reservations.indexes]
self.assertNotIn('reservations_deleted_expire_idx', index_names)
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _post_downgrade_249(self, engine):
# The duplicate index is not created on downgrade, so this
# asserts that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _post_downgrade_250(self, engine):
oslodbutils.get_table(engine, 'instance_group_metadata')
oslodbutils.get_table(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(engine,
'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _post_downgrade_251(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnNotExists(engine, 'shadow_compute_nodes',
'numa_topology')
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
self.assertIndexMembers(engine, 'instance_extra',
'instance_extra_idx',
['instance_uuid'])
def _post_downgrade_252(self, engine):
self.assertTableNotExists(engine, 'instance_extra')
self.assertTableNotExists(engine, 'shadow_instance_extra')
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(engine,
'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _post_downgrade_253(self, engine):
self.assertColumnNotExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnNotExists(engine, 'shadow_instance_extra',
'pci_requests')
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
engine, 'shadow_pci_devices', 'request_id')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.request_id.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _post_downgrade_254(self, engine):
self.assertColumnNotExists(engine, 'pci_devices', 'request_id')
self.assertColumnNotExists(
engine, 'shadow_pci_devices', 'request_id')
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _post_downgrade_265(self, engine):
# The duplicated index is not created on downgrade, so this
# asserts that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _post_downgrade_266(self, engine):
self.assertTableNotExists(engine, 'tags')
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def _post_downgrade_267(self, engine):
# Make sure the UC is gone and the column is nullable again.
instances = oslodbutils.get_table(engine, 'instances')
self.assertTrue(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_instances0uuid', constraint_names)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _post_downgrade_268(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'host')
self.assertColumnNotExists(engine, 'shadow_compute_nodes', 'host')
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _post_downgrade_269(self, engine):
self.assertColumnNotExists(engine, 'pci_devices', 'numa_node')
self.assertColumnNotExists(engine, 'shadow_pci_devices', 'numa_node')
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.flavor.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _post_downgrade_270(self, engine):
self.assertColumnNotExists(engine, 'instance_extra', 'flavor')
self.assertColumnNotExists(engine, 'shadow_instance_extra', 'flavor')
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
self.assertIndexMembers(engine, 'block_device_mapping',
'volume_id', ['volume_id'])
self.assertIndexMembers(engine, 'dns_domains',
'dns_domains_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'network_id', ['network_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_instance_uuid_fkey',
['instance_uuid'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id'])
self.assertIndexMembers(engine, 'floating_ips',
'fixed_ip_id', ['fixed_ip_id'])
self.assertIndexMembers(engine, 'iscsi_targets',
'iscsi_targets_volume_id_fkey', ['volume_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey',
['instance_uuid'])
# Removed on MySQL, never existed on other databases
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _post_downgrade_271(self, engine):
self.assertIndexNotExists(engine, 'dns_domains',
'dns_domains_project_id_idx')
self.assertIndexNotExists(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx')
if engine.name == 'mysql':
self.assertIndexMembers(engine, 'dns_domains',
'project_id',
['project_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'network_id',
['network_id'])
# Rest of indexes will still exist on MySQL
return
# Never existed on non-MySQL databases, so shouldn't exist now
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
for table_name, index_name in [
('block_device_mapping', 'snapshot_id'),
('block_device_mapping', 'volume_id'),
('dns_domains', 'dns_domains_project_id_idx'),
('fixed_ips', 'network_id'),
('fixed_ips', 'fixed_ips_instance_uuid_fkey'),
('fixed_ips', 'fixed_ips_virtual_interface_id_fkey'),
('floating_ips', 'fixed_ip_id'),
('iscsi_targets', 'iscsi_targets_volume_id_fkey'),
('virtual_interfaces', 'virtual_interfaces_network_id_idx'),
('virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey')]:
self.assertIndexNotExists(engine, table_name, index_name)
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test.TestCase,
test_base.DbTestCase):
pass
class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
test.TestCase,
test_base.MySQLOpportunisticTestCase):
def test_innodb_tables(self):
with mock.patch.object(sa_migration, 'get_engine',
return_value=self.migrate_engine):
sa_migration.db_sync()
total = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA = '%(database)s'" %
{'database': self.migrate_engine.url.database})
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%(database)s' "
"AND ENGINE != 'InnoDB' "
"AND TABLE_NAME != 'migrate_version'" %
{'database': self.migrate_engine.url.database})
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
test.TestCase,
test_base.PostgreSQLOpportunisticTestCase):
pass
class ProjectTestCase(test.NoDBTestCase):
def test_all_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
missing_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and not has_downgrade:
fname = os.path.basename(path)
missing_downgrade.append(fname)
helpful_msg = ("The following migrations are missing a downgrade:"
"\n\t%s" % '\n\t'.join(sorted(missing_downgrade)))
self.assertFalse(missing_downgrade, helpful_msg)
| apache-2.0 | -8,949,497,123,520,721,000 | 44.009655 | 79 | 0.610199 | false |
Kivvix/stage-LPC | compareSrc/searchSDSSdata.py | 1 | 4221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
import glob
from config import *
import data.calexp
import data.src
## @def attributs
# @brief attributs which we select in SDSS DB and src fits file
attributs = 'objid,run,camcol,field,ra,dec,u,g,r,i,z'
## Calexp treatment ##
def coordCalexp( fitsNum , calexpFits , first=True ):
coordMin, coordMax = data.calexp.coord( calexpFits , first )
if ( first ):
return coordMin
else:
return coordMax
def savCalexp( coordMin , coordMax , fitsNum ):
global attributs , PATH_OUTPUT
calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum )
data.calexp.write( calexpLines , attributs , fitsNum , PATH_OUTPUT , True )
def calexp( fitsNum , calexpFits , first=True ):
"""
find and write calexp data (id,ra,dec,mag)
:param fitsNum: number of fits file (``rrrrrr-bc-ffff``)
:param calexpFits: name of calexp fits file
:param first: take all the picture or less 128 first pixels
:type fitsNum: string
:type calexpFits: string
:type first: boolean
"""
global attributs , PATH_OUTPUT
coordMin, coordMax = data.calexp.coord( calexpFits , first )
calexpLines = data.calexp.query( coordMin , coordMax , attributs , fitsNum )
data.calexp.write( calexpLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first )
## Src treatment ##
def src( fitsNum , srcFits , first=True ):
"""
find and write src data (id,ra,dec,mag)
:param fitsNum: number of fits file (``rrrrrr-bc-ffff``)
:param srcFits: name of src fits file
:param first: take all the picture or less 128 first pixels
:type fitsNum: string
:type srcFits: string
:type first: boolean
"""
global attributs , PATH_OUTPUT
srcCoord,srcMag = data.src.coord( srcFits , fitsNum , first )
srcLines = data.src.map( srcCoord , srcMag )
data.src.write( srcLines , attributs , fitsNum[0:9] , PATH_OUTPUT , first )
def analyCol( runNum , c ):
"""
function threaded calling research of data
:param runNum_c: tupe with run number and column of the CCD (1-6)
:type runNum_c: tuple of string
"""
global b , PATH_DATA , PWD
print " " + str(c) + " ",
# data of each pair of fits files
first = True
for fits in glob.glob( c + "/" + b + "/calexp/calexp*.fits" ):
fitsNum = fits[18:32]
## @def calexpFits
# @brief path and name of calexp fits file
calexpFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/calexp/calexp-" + fitsNum + ".fits"
## @def srcFits
# @brief path and name of src fits file
#srcFits = PATH_DATA + "/" + runNum + "/" + c + "/" + b + "/src/src-" + fitsNum + ".fits"
#calexp( fitsNum , calexpFits , first )
if ( first ):
coordMin = coordCalexp( fitsNum , calexpFits , first )
else:
coordMax = coordCalexp( fitsNum , calexpFits , first )
#src( fitsNum , srcFits , first )
first = False
savCalexp( coordMin , coordMax , "%06d" % int(runNum) + "-" + b + c )
def analyRun( runNum ):
global b , PWD , PATH_DATA , PATH_OUTPUT , attributs
print "run : " + str(runNum ) + " : ",
os.chdir( PATH_DATA + "/" + runNum )
columns = glob.glob( "*" )
for c in columns :
analyCol( runNum , c )
if __name__ == '__main__':
os.chdir( PATH_DATA )
runs = glob.glob( "*" )
#runs = ( 7158, 7112, 5924, 5566, 6421, 7057, 6430, 4895, 5895, 6474, 6383, 7038, 5642, 6409, 6513, 6501, 6552, 2650, 6559, 6355, 7177, 7121, 3465, 7170, 7051, 6283, 6458, 5853, 6484, 5765, 2708, 5786, 4253, 6934, 6508, 2662, 6518, 6584, 4188, 6976, 7202, 7173, 4153, 5820, 2649, 7140, 6330, 3388, 7117, 6504, 6314, 4128, 6596, 6564, 5807, 6367, 6373, 5622, 5882, 7034, 7136, 6577, 6600, 2768, 3437, 4927, 6414, 3434, 5813, 7084, 4858, 7124, 6982, 4917, 4192, 5898, 6479, 4868, 7106, 7195, 5744, 3360, 4198, 6963, 6533, 4933, 5603, 3384, 7155, 5619, 4207, 4849, 5582, 7024, 1755, 5709, 5781, 5770, 7145, 5754, 5646, 5800, 5759, 6287, 6568, 7054, 4203, 5776, 6433, 4247, 5823, 5052, 3325, 5836, 5590, 6580, 7161, 2728, 4145, 5633, 6461, 6555, 6955, 4874, 5792, 5918, 6425, 6377, 4263, 5878, 6441, 6447, 7080, 5905, 5713, 6618, 6537, 5637, 6402, 6530, 7047, 6524, 7101, 6293 )
for r in runs :
analyRun( r )
print " "
time.sleep(60)
| mit | -5,985,041,647,614,164,000 | 33.040323 | 875 | 0.644871 | false |
xaled/wunderous-analytics | wunderous/drive.py | 1 | 5688 | import os
import sys
import httplib2
from oauth2client.file import Storage
from apiclient import discovery
from oauth2client.client import OAuth2WebServerFlow
from wunderous.config import config
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
SHEETS_OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.readonly https://www.googleapis.com/auth/drive.file https://www.googleapis.com/auth/spreadsheets https://www.googleapis.com/auth/spreadsheets.readonly'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'credentials.json')
SHEETS_CREDS_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'sheets_credentials.json')
# CONFIG_FILE = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "wunderous.config.json")
sheet_service = None
drive_service = None
# def load_configs():
# client_secret = config['client_secret']
# client_id = config['client_id']
# return client_id, client_secret
def init_drive_service():
global drive_service
if drive_service:
return drive_service
storage = Storage(CREDS_FILE)
credentials = storage.get()
if credentials is None:
# Run through the OAuth flow and retrieve credentials
# client_id, client_secret = load_configs()
flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print('Go to the following link in your browser: ' + authorize_url)
code = input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
storage.put(credentials)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = discovery.build('drive', 'v2', http=http)
return drive_service
def init_sheet_service():
global sheet_service
if sheet_service:
return sheet_service
storage = Storage(SHEETS_CREDS_FILE)
credentials = storage.get()
if credentials is None:
# Run through the OAuth flow and retrieve credentials
# client_id, client_secret = load_configs()
flow = OAuth2WebServerFlow(config['drive']['client_id'], config['drive']['client_secret'], OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print('Go to the following link in your browser: ' + authorize_url)
code = input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
storage.put(credentials)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
sheet_service = discovery.build('sheets', 'v4', http=http)
return sheet_service
def list_files(service):
page_token = None
while True:
param = {}
if page_token:
param['pageToken'] = page_token
files = service.files().list(**param).execute()
for item in files['items']:
yield item
page_token = files.get('nextPageToken')
if not page_token:
break
def _download_file(drive_service, download_url, outfile):
resp, content = drive_service._http.request(download_url)
if resp.status == 200:
with open(outfile, 'wb') as f:
f.write(content)
print("OK")
return
else:
raise Exception("ERROR downloading %s, response code is not 200!" % outfile)
def download_file(outfile, fileid):
drive_service = init_drive_service()
for item in list_files(drive_service):
if fileid == item.get('id'):
if 'downloadUrl' in item:
_download_file(drive_service, item['downloadUrl'], outfile)
return
else:
raise Exception("No download link is found for file: %s" % item['title'])
raise Exception("No file with id: %s is found " % fileid)
def get_sheet_metadata(spreadsheet_id):
sheet_service = init_sheet_service()
sheet_metadata = sheet_service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute()
return sheet_metadata
def get_sheet_values(spreadsheet_id, range_):
sheet_service = init_sheet_service()
request = sheet_service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_,
valueRenderOption='FORMATTED_VALUE',
dateTimeRenderOption='SERIAL_NUMBER')
response = request.execute()
return response
def get_sheet_value(spreadsheet_id, range_):
response = get_sheet_values(spreadsheet_id, range_)
try:
return response['values'][0][0]
except:
return ''
def update_sheet_values(spreadsheet_id, range_, values):
sheet_service = init_sheet_service()
body = {'values': values}
result = sheet_service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=range_, body=body,
valueInputOption='USER_ENTERED').execute()
return result.get('updatedCells')
def append_sheet_values(spreadsheet_id, range_, values):
sheet_service = init_sheet_service()
body = {'values': values}
result = sheet_service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_, body=body,
valueInputOption='USER_ENTERED').execute()
return result.get('updates').get('updatedCells')
| mit | 7,538,950,005,650,568,000 | 37.174497 | 249 | 0.651371 | false |
Goamaral/SCC | inputWindow.py | 1 | 31922 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'inputWindow.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(708, 428)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.List = QtGui.QVBoxLayout()
self.List.setObjectName(_fromUtf8("List"))
self.listItem_3 = QtGui.QWidget(self.centralwidget)
self.listItem_3.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_3.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_3.setObjectName(_fromUtf8("listItem_3"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.listItem_3)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.nameLabel_3 = QtGui.QLabel(self.listItem_3)
self.nameLabel_3.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_3.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_3.setFont(font)
self.nameLabel_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_3.setObjectName(_fromUtf8("nameLabel_3"))
self.horizontalLayout_5.addWidget(self.nameLabel_3)
self.nameLabel_27 = QtGui.QLabel(self.listItem_3)
self.nameLabel_27.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_27.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_27.setFont(font)
self.nameLabel_27.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_27.setObjectName(_fromUtf8("nameLabel_27"))
self.horizontalLayout_5.addWidget(self.nameLabel_27)
self.mediaChegadaA = QtGui.QLineEdit(self.listItem_3)
self.mediaChegadaA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaChegadaA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaChegadaA.setText(_fromUtf8(""))
self.mediaChegadaA.setObjectName(_fromUtf8("mediaChegadaA"))
self.horizontalLayout_5.addWidget(self.mediaChegadaA)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.List.addWidget(self.listItem_3)
self.listItem_6 = QtGui.QWidget(self.centralwidget)
self.listItem_6.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_6.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_6.setObjectName(_fromUtf8("listItem_6"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.listItem_6)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.nameLabel_7 = QtGui.QLabel(self.listItem_6)
self.nameLabel_7.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_7.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_7.setFont(font)
self.nameLabel_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_7.setObjectName(_fromUtf8("nameLabel_7"))
self.horizontalLayout_7.addWidget(self.nameLabel_7)
self.nameLabel_8 = QtGui.QLabel(self.listItem_6)
self.nameLabel_8.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_8.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_8.setFont(font)
self.nameLabel_8.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_8.setObjectName(_fromUtf8("nameLabel_8"))
self.horizontalLayout_7.addWidget(self.nameLabel_8)
self.mediaPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.mediaPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoA.setText(_fromUtf8(""))
self.mediaPerfuracaoA.setObjectName(_fromUtf8("mediaPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.mediaPerfuracaoA)
self.nameLabel_9 = QtGui.QLabel(self.listItem_6)
self.nameLabel_9.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_9.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_9.setFont(font)
self.nameLabel_9.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_9.setObjectName(_fromUtf8("nameLabel_9"))
self.horizontalLayout_7.addWidget(self.nameLabel_9)
self.desvioPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.desvioPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoA.setText(_fromUtf8(""))
self.desvioPerfuracaoA.setObjectName(_fromUtf8("desvioPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.desvioPerfuracaoA)
self.nameLabel_10 = QtGui.QLabel(self.listItem_6)
self.nameLabel_10.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_10.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_10.setFont(font)
self.nameLabel_10.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_10.setObjectName(_fromUtf8("nameLabel_10"))
self.horizontalLayout_7.addWidget(self.nameLabel_10)
self.nMaquinasPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.nMaquinasPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoA.setText(_fromUtf8(""))
self.nMaquinasPerfuracaoA.setObjectName(_fromUtf8("nMaquinasPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.nMaquinasPerfuracaoA)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem1)
self.List.addWidget(self.listItem_6)
self.listItem_7 = QtGui.QWidget(self.centralwidget)
self.listItem_7.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_7.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_7.setObjectName(_fromUtf8("listItem_7"))
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.listItem_7)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.nameLabel_11 = QtGui.QLabel(self.listItem_7)
self.nameLabel_11.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_11.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_11.setFont(font)
self.nameLabel_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_11.setObjectName(_fromUtf8("nameLabel_11"))
self.horizontalLayout_8.addWidget(self.nameLabel_11)
self.nameLabel_12 = QtGui.QLabel(self.listItem_7)
self.nameLabel_12.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_12.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_12.setFont(font)
self.nameLabel_12.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_12.setObjectName(_fromUtf8("nameLabel_12"))
self.horizontalLayout_8.addWidget(self.nameLabel_12)
self.mediaPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.mediaPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPolimentoA.setText(_fromUtf8(""))
self.mediaPolimentoA.setObjectName(_fromUtf8("mediaPolimentoA"))
self.horizontalLayout_8.addWidget(self.mediaPolimentoA)
self.nameLabel_13 = QtGui.QLabel(self.listItem_7)
self.nameLabel_13.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_13.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_13.setFont(font)
self.nameLabel_13.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_13.setObjectName(_fromUtf8("nameLabel_13"))
self.horizontalLayout_8.addWidget(self.nameLabel_13)
self.desvioPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.desvioPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPolimentoA.setText(_fromUtf8(""))
self.desvioPolimentoA.setObjectName(_fromUtf8("desvioPolimentoA"))
self.horizontalLayout_8.addWidget(self.desvioPolimentoA)
self.nameLabel_14 = QtGui.QLabel(self.listItem_7)
self.nameLabel_14.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_14.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_14.setFont(font)
self.nameLabel_14.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_14.setObjectName(_fromUtf8("nameLabel_14"))
self.horizontalLayout_8.addWidget(self.nameLabel_14)
self.nMaquinasPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.nMaquinasPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoA.setText(_fromUtf8(""))
self.nMaquinasPolimentoA.setObjectName(_fromUtf8("nMaquinasPolimentoA"))
self.horizontalLayout_8.addWidget(self.nMaquinasPolimentoA)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.List.addWidget(self.listItem_7)
self.line_2 = QtGui.QFrame(self.centralwidget)
self.line_2.setMinimumSize(QtCore.QSize(5, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.line_2.setFont(font)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.List.addWidget(self.line_2)
self.listItem_4 = QtGui.QWidget(self.centralwidget)
self.listItem_4.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_4.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_4.setObjectName(_fromUtf8("listItem_4"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.listItem_4)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.nameLabel_4 = QtGui.QLabel(self.listItem_4)
self.nameLabel_4.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_4.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_4.setFont(font)
self.nameLabel_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_4.setObjectName(_fromUtf8("nameLabel_4"))
self.horizontalLayout_6.addWidget(self.nameLabel_4)
self.nameLabel_31 = QtGui.QLabel(self.listItem_4)
self.nameLabel_31.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_31.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_31.setFont(font)
self.nameLabel_31.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_31.setObjectName(_fromUtf8("nameLabel_31"))
self.horizontalLayout_6.addWidget(self.nameLabel_31)
self.mediaChegadaB = QtGui.QLineEdit(self.listItem_4)
self.mediaChegadaB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaChegadaB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaChegadaB.setText(_fromUtf8(""))
self.mediaChegadaB.setObjectName(_fromUtf8("mediaChegadaB"))
self.horizontalLayout_6.addWidget(self.mediaChegadaB)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem3)
self.List.addWidget(self.listItem_4)
self.listItem_9 = QtGui.QWidget(self.centralwidget)
self.listItem_9.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_9.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_9.setObjectName(_fromUtf8("listItem_9"))
self.horizontalLayout_13 = QtGui.QHBoxLayout(self.listItem_9)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.nameLabel_36 = QtGui.QLabel(self.listItem_9)
self.nameLabel_36.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_36.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_36.setFont(font)
self.nameLabel_36.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_36.setObjectName(_fromUtf8("nameLabel_36"))
self.horizontalLayout_13.addWidget(self.nameLabel_36)
self.nameLabel_37 = QtGui.QLabel(self.listItem_9)
self.nameLabel_37.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_37.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_37.setFont(font)
self.nameLabel_37.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_37.setObjectName(_fromUtf8("nameLabel_37"))
self.horizontalLayout_13.addWidget(self.nameLabel_37)
self.mediaPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.mediaPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoB.setText(_fromUtf8(""))
self.mediaPerfuracaoB.setObjectName(_fromUtf8("mediaPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.mediaPerfuracaoB)
self.nameLabel_38 = QtGui.QLabel(self.listItem_9)
self.nameLabel_38.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_38.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_38.setFont(font)
self.nameLabel_38.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_38.setObjectName(_fromUtf8("nameLabel_38"))
self.horizontalLayout_13.addWidget(self.nameLabel_38)
self.desvioPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.desvioPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoB.setText(_fromUtf8(""))
self.desvioPerfuracaoB.setObjectName(_fromUtf8("desvioPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.desvioPerfuracaoB)
self.nameLabel_39 = QtGui.QLabel(self.listItem_9)
self.nameLabel_39.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_39.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_39.setFont(font)
self.nameLabel_39.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_39.setObjectName(_fromUtf8("nameLabel_39"))
self.horizontalLayout_13.addWidget(self.nameLabel_39)
self.nMaquinasPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.nMaquinasPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoB.setText(_fromUtf8(""))
self.nMaquinasPerfuracaoB.setObjectName(_fromUtf8("nMaquinasPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.nMaquinasPerfuracaoB)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem4)
self.List.addWidget(self.listItem_9)
self.listItem_8 = QtGui.QWidget(self.centralwidget)
self.listItem_8.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_8.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_8.setObjectName(_fromUtf8("listItem_8"))
self.horizontalLayout_10 = QtGui.QHBoxLayout(self.listItem_8)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.nameLabel_19 = QtGui.QLabel(self.listItem_8)
self.nameLabel_19.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_19.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_19.setFont(font)
self.nameLabel_19.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_19.setObjectName(_fromUtf8("nameLabel_19"))
self.horizontalLayout_10.addWidget(self.nameLabel_19)
self.nameLabel_20 = QtGui.QLabel(self.listItem_8)
self.nameLabel_20.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_20.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_20.setFont(font)
self.nameLabel_20.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_20.setObjectName(_fromUtf8("nameLabel_20"))
self.horizontalLayout_10.addWidget(self.nameLabel_20)
self.mediaPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.mediaPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPolimentoB.setText(_fromUtf8(""))
self.mediaPolimentoB.setObjectName(_fromUtf8("mediaPolimentoB"))
self.horizontalLayout_10.addWidget(self.mediaPolimentoB)
self.nameLabel_21 = QtGui.QLabel(self.listItem_8)
self.nameLabel_21.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_21.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_21.setFont(font)
self.nameLabel_21.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_21.setObjectName(_fromUtf8("nameLabel_21"))
self.horizontalLayout_10.addWidget(self.nameLabel_21)
self.desvioPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.desvioPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPolimentoB.setText(_fromUtf8(""))
self.desvioPolimentoB.setObjectName(_fromUtf8("desvioPolimentoB"))
self.horizontalLayout_10.addWidget(self.desvioPolimentoB)
self.nameLabel_22 = QtGui.QLabel(self.listItem_8)
self.nameLabel_22.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_22.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_22.setFont(font)
self.nameLabel_22.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_22.setObjectName(_fromUtf8("nameLabel_22"))
self.horizontalLayout_10.addWidget(self.nameLabel_22)
self.nMaquinasPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.nMaquinasPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoB.setText(_fromUtf8(""))
self.nMaquinasPolimentoB.setObjectName(_fromUtf8("nMaquinasPolimentoB"))
self.horizontalLayout_10.addWidget(self.nMaquinasPolimentoB)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem5)
self.List.addWidget(self.listItem_8)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setMinimumSize(QtCore.QSize(0, 5))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.List.addWidget(self.line)
self.listItem_11 = QtGui.QWidget(self.centralwidget)
self.listItem_11.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_11.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_11.setObjectName(_fromUtf8("listItem_11"))
self.horizontalLayout_12 = QtGui.QHBoxLayout(self.listItem_11)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.nameLabel_23 = QtGui.QLabel(self.listItem_11)
self.nameLabel_23.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_23.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_23.setFont(font)
self.nameLabel_23.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_23.setObjectName(_fromUtf8("nameLabel_23"))
self.horizontalLayout_12.addWidget(self.nameLabel_23)
self.nameLabel_24 = QtGui.QLabel(self.listItem_11)
self.nameLabel_24.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_24.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_24.setFont(font)
self.nameLabel_24.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_24.setObjectName(_fromUtf8("nameLabel_24"))
self.horizontalLayout_12.addWidget(self.nameLabel_24)
self.mediaEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.mediaEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.mediaEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.mediaEnvernizamento.setText(_fromUtf8(""))
self.mediaEnvernizamento.setObjectName(_fromUtf8("mediaEnvernizamento"))
self.horizontalLayout_12.addWidget(self.mediaEnvernizamento)
self.nameLabel_25 = QtGui.QLabel(self.listItem_11)
self.nameLabel_25.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_25.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_25.setFont(font)
self.nameLabel_25.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_25.setObjectName(_fromUtf8("nameLabel_25"))
self.horizontalLayout_12.addWidget(self.nameLabel_25)
self.desvioEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.desvioEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.desvioEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.desvioEnvernizamento.setText(_fromUtf8(""))
self.desvioEnvernizamento.setObjectName(_fromUtf8("desvioEnvernizamento"))
self.horizontalLayout_12.addWidget(self.desvioEnvernizamento)
self.nameLabel_26 = QtGui.QLabel(self.listItem_11)
self.nameLabel_26.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_26.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_26.setFont(font)
self.nameLabel_26.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_26.setObjectName(_fromUtf8("nameLabel_26"))
self.horizontalLayout_12.addWidget(self.nameLabel_26)
self.nMaquinasEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.nMaquinasEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasEnvernizamento.setText(_fromUtf8(""))
self.nMaquinasEnvernizamento.setObjectName(_fromUtf8("nMaquinasEnvernizamento"))
self.horizontalLayout_12.addWidget(self.nMaquinasEnvernizamento)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem6)
self.List.addWidget(self.listItem_11)
self.verticalLayout_4.addLayout(self.List)
self.footer = QtGui.QWidget(self.centralwidget)
self.footer.setMaximumSize(QtCore.QSize(100000, 50))
self.footer.setObjectName(_fromUtf8("footer"))
self.horizontalLayout = QtGui.QHBoxLayout(self.footer)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.nameLabel_30 = QtGui.QLabel(self.footer)
self.nameLabel_30.setMinimumSize(QtCore.QSize(130, 0))
self.nameLabel_30.setMaximumSize(QtCore.QSize(130, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_30.setFont(font)
self.nameLabel_30.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_30.setObjectName(_fromUtf8("nameLabel_30"))
self.horizontalLayout.addWidget(self.nameLabel_30)
self.tipoLimite = QtGui.QComboBox(self.footer)
self.tipoLimite.setMinimumSize(QtCore.QSize(125, 0))
self.tipoLimite.setMaximumSize(QtCore.QSize(125, 16777215))
self.tipoLimite.setObjectName(_fromUtf8("tipoLimite"))
self.tipoLimite.addItem(_fromUtf8(""))
self.tipoLimite.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.tipoLimite)
self.nameLabel_28 = QtGui.QLabel(self.footer)
self.nameLabel_28.setMinimumSize(QtCore.QSize(50, 0))
self.nameLabel_28.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_28.setFont(font)
self.nameLabel_28.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_28.setObjectName(_fromUtf8("nameLabel_28"))
self.horizontalLayout.addWidget(self.nameLabel_28)
self.valorLimite = QtGui.QLineEdit(self.footer)
self.valorLimite.setMinimumSize(QtCore.QSize(75, 25))
self.valorLimite.setMaximumSize(QtCore.QSize(75, 25))
self.valorLimite.setText(_fromUtf8(""))
self.valorLimite.setObjectName(_fromUtf8("valorLimite"))
self.horizontalLayout.addWidget(self.valorLimite)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem7)
self.nameLabel_29 = QtGui.QLabel(self.footer)
self.nameLabel_29.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_29.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_29.setFont(font)
self.nameLabel_29.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_29.setObjectName(_fromUtf8("nameLabel_29"))
self.horizontalLayout.addWidget(self.nameLabel_29)
self.nRepeticoes = QtGui.QLineEdit(self.footer)
self.nRepeticoes.setMinimumSize(QtCore.QSize(50, 25))
self.nRepeticoes.setMaximumSize(QtCore.QSize(50, 25))
self.nRepeticoes.setText(_fromUtf8(""))
self.nRepeticoes.setObjectName(_fromUtf8("nRepeticoes"))
self.horizontalLayout.addWidget(self.nRepeticoes)
self.botaoSimular = QtGui.QPushButton(self.footer)
self.botaoSimular.setMinimumSize(QtCore.QSize(100, 25))
self.botaoSimular.setMaximumSize(QtCore.QSize(100, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.botaoSimular.setFont(font)
self.botaoSimular.setLayoutDirection(QtCore.Qt.RightToLeft)
self.botaoSimular.setAutoFillBackground(False)
self.botaoSimular.setStyleSheet(_fromUtf8(""))
self.botaoSimular.setFlat(False)
self.botaoSimular.setObjectName(_fromUtf8("botaoSimular"))
self.horizontalLayout.addWidget(self.botaoSimular)
self.verticalLayout_4.addWidget(self.footer)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Descriçao da simulaçao", None))
self.nameLabel_3.setText(_translate("MainWindow", "Peças grandes (A)", None))
self.nameLabel_27.setText(_translate("MainWindow", "Media chegada", None))
self.nameLabel_7.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_8.setText(_translate("MainWindow", "Media", None))
self.nameLabel_9.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_10.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_11.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_12.setText(_translate("MainWindow", "Media", None))
self.nameLabel_13.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_14.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_4.setText(_translate("MainWindow", "Peças grandes (B)", None))
self.nameLabel_31.setText(_translate("MainWindow", "Media chegada", None))
self.nameLabel_36.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_37.setText(_translate("MainWindow", "Media", None))
self.nameLabel_38.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_39.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_19.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_20.setText(_translate("MainWindow", "Media", None))
self.nameLabel_21.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_22.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_23.setText(_translate("MainWindow", "Envernizamento", None))
self.nameLabel_24.setText(_translate("MainWindow", "Media", None))
self.nameLabel_25.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_26.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_30.setText(_translate("MainWindow", "Limites da simulacao", None))
self.tipoLimite.setItemText(0, _translate("MainWindow", "Tempo simulacao", None))
self.tipoLimite.setItemText(1, _translate("MainWindow", "Nº Clientes", None))
self.nameLabel_28.setText(_translate("MainWindow", "Valor", None))
self.nameLabel_29.setText(_translate("MainWindow", "Nº Repeticoes", None))
self.botaoSimular.setText(_translate("MainWindow", "Simular", None))
| mit | -5,630,341,833,755,718,000 | 55.576241 | 105 | 0.701934 | false |
toinbis/369old | src/web369/conf/base.py | 1 | 2325 | from pkg_resources import resource_filename
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'web369',
'USER': 'root',
'PASSWORD': '',
}
}
TIME_ZONE = 'Europe/Vilnius'
LANGUAGE_CODE = 'lt'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
STATIC_URL = '/static/'
STATIC_ROOT = resource_filename('web369', '../../var/htdocs/static')
STATICFILES_DIRS = (
resource_filename('web369', 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = resource_filename('web369', '../../var/htdocs/media')
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
SECRET_KEY = 'SBX*YTL!cANetM&uFTf6R5Je(@PX3!rtgo)kgwNT'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'web369.urls.default'
TEMPLATE_DIRS = (
resource_filename('web369', 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'south',
'web369',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
'TIMEOUT': 60,
'OPTIONS': {
'MAX_ENTRIES': 1000
}
}
}
# Word count will be updated when new documents are scrapped:
LIVE_WORD_COUNT = True
| bsd-3-clause | -2,580,828,059,716,364,300 | 23.734043 | 73 | 0.667097 | false |
smallyear/linuxLearn | salt/salt/client/ssh/state.py | 1 | 6047 | # -*- coding: utf-8 -*-
'''
Create ssh executor system
'''
from __future__ import absolute_import
# Import python libs
import os
import tarfile
import tempfile
import json
import shutil
from contextlib import closing
# Import salt libs
import salt.client.ssh.shell
import salt.client.ssh
import salt.utils
import salt.utils.thin
import salt.utils.url
import salt.roster
import salt.state
import salt.loader
import salt.minion
class SSHState(salt.state.State):
'''
Create a State object which wraps the SSH functions for state operations
'''
def __init__(self, opts, pillar=None, wrapper=None):
self.wrapper = wrapper
super(SSHState, self).__init__(opts, pillar)
def load_modules(self, data=None, proxy=None):
'''
Load up the modules for remote compilation via ssh
'''
self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils)
self.rend = salt.loader.render(self.opts, self.functions)
def check_refresh(self, data, ret):
'''
Stub out check_refresh
'''
return
def module_refresh(self):
'''
Module refresh is not needed, stub it out
'''
return
class SSHHighState(salt.state.BaseHighState):
'''
Used to compile the highstate on the master
'''
stack = []
def __init__(self, opts, pillar=None, wrapper=None, fsclient=None):
self.client = fsclient
salt.state.BaseHighState.__init__(self, opts)
self.state = SSHState(opts, pillar, wrapper)
self.matcher = salt.minion.Matcher(self.opts)
def load_dynamic(self, matches):
'''
Stub out load_dynamic
'''
return
def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
refs = {}
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = 'base'
crefs = []
for state in chunk:
if state == '__env__':
saltenv = chunk[state]
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
refs[env].append([x])
return refs
def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, str):
if data.startswith(proto) and data not in ret:
ret.append(data)
if isinstance(data, list):
for comp in data:
salt_refs(comp, ret)
if isinstance(data, dict):
for comp in data:
salt_refs(data[comp], ret)
return ret
def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None):
'''
Generate the execution package from the saltenv file refs and a low state
data structure
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.fopen(lowfn, 'w+') as fp_:
fp_.write(json.dumps(chunks))
if pillar:
with salt.utils.fopen(pillarfn, 'w+') as fp_:
fp_.write(json.dumps(pillar))
cachedir = os.path.join('salt-ssh', id_)
for saltenv in file_refs:
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
os.makedirs(env_root)
for ref in file_refs[saltenv]:
for name in ref:
short = salt.utils.url.parse(name)[0]
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(path, tgt)
continue
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
if files:
for filename in files:
fn = filename[filename.find(short) + len(short):]
if fn.startswith('/'):
fn = fn.strip('/')
tgt = os.path.join(
env_root,
short,
fn,
)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(filename, tgt)
continue
try: # cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in os.walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
return trans_tar
| apache-2.0 | 7,802,137,446,918,748,000 | 30.331606 | 79 | 0.539937 | false |
rapidpro/chatpro | chatpro/rooms/models.py | 1 | 2494 | from __future__ import absolute_import, unicode_literals
from chatpro.profiles.tasks import sync_org_contacts
from dash.orgs.models import Org
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Room(models.Model):
"""
Corresponds to a RapidPro contact group
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='rooms')
name = models.CharField(verbose_name=_("Name"), max_length=128, blank=True,
help_text=_("Name of this room"))
users = models.ManyToManyField(User, verbose_name=_("Users"), related_name='rooms',
help_text=_("Users who can chat in this room"))
managers = models.ManyToManyField(User, verbose_name=_("Managers"), related_name='manage_rooms',
help_text=_("Users who can manage contacts in this room"))
is_active = models.BooleanField(default=True, help_text="Whether this room is active")
@classmethod
def create(cls, org, name, uuid):
return cls.objects.create(org=org, name=name, uuid=uuid)
@classmethod
def get_all(cls, org):
return cls.objects.filter(org=org, is_active=True)
@classmethod
def update_room_groups(cls, org, group_uuids):
"""
Updates an org's chat rooms based on the selected groups UUIDs
"""
# de-activate rooms not included
org.rooms.exclude(uuid__in=group_uuids).update(is_active=False)
# fetch group details
groups = org.get_temba_client().get_groups()
group_names = {group.uuid: group.name for group in groups}
for group_uuid in group_uuids:
existing = org.rooms.filter(uuid=group_uuid).first()
if existing:
existing.name = group_names[group_uuid]
existing.is_active = True
existing.save()
else:
cls.create(org, group_names[group_uuid], group_uuid)
sync_org_contacts.delay(org.id)
def get_contacts(self):
return self.contacts.filter(is_active=True)
def get_users(self):
return self.users.filter(is_active=True).select_related('profile')
def get_managers(self):
return self.managers.filter(is_active=True).select_related('profile')
def __unicode__(self):
return self.name
| bsd-3-clause | -7,098,435,995,584,484,000 | 34.628571 | 100 | 0.631917 | false |
JoKnopp/wp-import | test/test_postgresql.py | 1 | 4427 | # -*- coding: UTF-8 -*-
# © Copyright 2009 Wolodja Wentland. All Rights Reserved.
# This file is part of wp-import.
#
# wp-import is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wp-import is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wp-import. If not, see <http://www.gnu.org/licenses/>.
"""Tests for wp_import.postgresql
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import tempfile
from nose.tools import *
import wp_import.utils as wpi_utils
import wp_import.postgresql as wpi_psql
PREFIX = os.path.join(*os.path.split(os.path.dirname(__file__))[:-1])
TEST_DATA_DIR = os.path.join(PREFIX, 'test', 'data')
DOWNLOAD_DIR = os.path.join(TEST_DATA_DIR, 'download')
EXPECTED_STMTS = {
'categorylinks': [
"""INSERT INTO "categorylinks" VALUES """ \
"(130,'Linux','Linux\u5185\u6838','2006-07-25T19:03:22Z')"],
'langlinks': [
"""INSERT INTO "langlinks" VALUES """ \
"(43017,'af','Dante Alighieri')"],
'pagelinks': [
"""INSERT INTO "pagelinks" VALUES (12,0,'P/NP\u554f\u984c')"""],
'redirect': [
"""INSERT INTO "redirect" VALUES (71247,0,'ASCII\u827a\u672f')"""]}
class FakeOptions(object):
pass
def test_insert_statements():
fn_pat = re.compile(
r'''(?P<language>\w+)wiki-(?P<date>\d{8})-(?P<table>[\w_]+).*''')
for dump_path in sorted(wpi_utils.find('*.sql.gz', DOWNLOAD_DIR)):
filename = os.path.basename(dump_path)
mat = fn_pat.match(filename)
stmts = list(wpi_psql.insert_statements(dump_path))
eq_(list(wpi_psql.insert_statements(dump_path)),
EXPECTED_STMTS[mat.group('table')])
def test_categorylink_pipeline():
for file_path in wpi_utils.find('*categorylinks*.sql.gz', DOWNLOAD_DIR):
with wpi_utils.open_compressed(file_path) as cl_file:
eq_(list(wpi_psql.categorylinks_pipeline(cl_file)),
EXPECTED_STMTS['categorylinks'])
def test_psql_quotation():
eq_(list(wpi_psql.psql_quotation(['f `b`', 'baz', 'shrubbery ``'])),
['f "b"', 'baz', 'shrubbery ""'])
def test_timestamp_to_iso_8601():
eq_(list(wpi_psql.timestamp_to_iso_8601([',20080218135752) foo'])),
[",'2008-02-18T13:57:52Z') foo"])
def test_parse_pgpass():
with tempfile.NamedTemporaryFile() as tmp_f:
tmp_f.write('*:*:*:*:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql._parse_pgpass(tmp_f.name).next(),
{'user': '*', 'host': '*', 'port': '*', 'database': '*',
'password': 'GrailQuest'})
tmp_f.write('hostname:port:database:username:password\n')
tmp_f.seek(0)
eq_(wpi_psql._parse_pgpass(tmp_f.name).next(),
{'user': 'username', 'host': 'hostname', 'port': 'port',
'database': 'database',
'password': 'password'})
def test_password_from_pgpass():
with tempfile.NamedTemporaryFile() as tmp_f:
options = FakeOptions()
options.pg_passfile = tmp_f.name
options.pg_user = 'KingArthur'
options.pg_port = '2342'
options.pg_host = 'Camelot'
# test generic pgpass line
tmp_f.write('*:*:*:*:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
# test specific pgpass line
tmp_f.write('Camelot:2342:postgres:KingArthur:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
# test pick most specific
tmp_f.write('Jerusalem:2342:postgres:Brian:Jehova\n')
tmp_f.write('Camelot:2342:postgres:KingArthur:GrailQuest\n')
tmp_f.write('*:*:*:*:UnladenSwallow\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
tmp_f.write('*:*:*:*\n')
tmp_f.seek(0)
assert_raises(KeyError, wpi_psql.password_from_pgpass,
options=options)
| gpl-3.0 | -6,150,922,383,525,190,000 | 33.578125 | 76 | 0.615906 | false |
hkemmel/tal | affichage.py | 1 | 2209 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 14:34:25 2017
@author: manfred.madelaine
"""
import time
def affStart():
msg1 = "*** Binvenue dans i-Opinion ou Opinion Way ***"
msg2 = "Le logiciel d'analyse et de classification des revues cinématographiques !"
listMsg = []
listMsg.append("")
listMsg.append(msg1)
listMsg.append("")
listMsg.append(msg2)
listMsg.append("")
print(affBox(listMsg, 1, 1, len(msg2)))
delai()
def affEnd():
msg1 = "*** Opinion Way vous remercie de votre viste, à bientôt ! ***"
msg = []
msg.append(msg1)
box = affBox(msg, 1, 1, len(msg1)-1)
print(box)
def affMessage(msg):
deb = "\n\t--- "
fin = " ---\n\n"
print(deb + msg + fin)
delai()
def delai():
time.sleep(0.8)
"""
Affiche un message dans une boite
msg : message à afficher
x : décalage horizontal
y : décalage vertical
L : largeur de la boite
"""
def affBox(msg, x, y, L):
box = ""
#décalage vertical
box += multChaine("\n", y)
indiceLine = 0
#gestion d'une ligne
for txt in msg:
#bord suppérieur
if(indiceLine == 0):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
#décalage horizontal
box += "\n" + multChaine("\t", x)
esp = ""
mult = 1
#message
if(len(txt) < L ):
esp = " "
mult = (L - len(txt)) / 2
box += "| " + multChaine(esp, mult) + txt + multChaine(esp, mult) + " |"
#bord inférieur
if(indiceLine == len(msg) - 1 ):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
indiceLine += 1
box+="\n"
return(box)
def affErr():
affMessage("Votre réponse est incorrecte !")
def multChaine(chaine, mult):
i = 0
msg = ""
while i < mult:
msg += chaine
i += 1
return msg
| gpl-3.0 | 181,424,775,493,336,640 | 19.342593 | 87 | 0.474954 | false |
LuizGsa21/p4-conference-central | models.py | 1 | 7226 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
import datetime
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty(default='')
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.KeyProperty(kind='Conference', repeated=True)
wishList = ndb.KeyProperty(kind='Session', repeated=True)
def toForm(self):
form = ProfileForm(
displayName=self.displayName,
mainEmail=self.mainEmail,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize),
conferenceKeysToAttend=[key.urlsafe() for key in self.conferenceKeysToAttend]
)
form.check_initialized()
return form
def toMiniForm(self):
form = ProfileMiniForm(
displayName=self.displayName,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize)
)
form.check_initialized()
return form
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class Conference(ndb.Model):
"""Conference -- Conference object"""
required_fields_schema = ('name', 'organizerUserId', 'startDate', 'endDate')
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty(required=True)
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty(required=True)
month = ndb.IntegerProperty()
endDate = ndb.DateProperty(required=True)
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
@property
def sessions(self):
return Session.query(ancestor=self.key)
def toForm(self, display_name=''):
form = ConferenceForm(
websafeKey=self.key.urlsafe(),
name=self.name,
description=self.description,
organizerUserId=self.organizerUserId,
topics=self.topics,
city=self.city,
startDate=self.startDate.strftime('%Y-%m-%d'),
month=self.month,
endDate=self.endDate.strftime('%Y-%m-%d'),
maxAttendees=self.maxAttendees,
seatsAvailable=self.seatsAvailable,
organizerDisplayName=display_name
)
form.check_initialized()
return form
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class Speaker(ndb.Model):
"""Speaker -- Speaker object"""
name = ndb.StringProperty(required=True)
class Session(ndb.Model):
"""Session -- Session object"""
required_fields_schema = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime')
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StructuredProperty(modelclass=Speaker, required=True)
duration = ndb.IntegerProperty(required=True)
typeOfSession = ndb.StringProperty(required=True)
date = ndb.DateProperty(required=True)
startTime = ndb.TimeProperty(required=True)
def toForm(self):
form = SessionForm(
websafeKey=self.key.urlsafe(),
name=self.name,
highlights=self.highlights,
speaker=self.speaker.name,
duration=self.duration,
typeOfSession=self.typeOfSession,
date=self.date.strftime('%Y-%m-%d'),
startTime=self.startTime.strftime('%H:%M')
)
form.check_initialized()
return form
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
websafeKey = messages.StringField(1)
name = messages.StringField(2)
highlights = messages.StringField(3)
speaker = messages.StringField(4)
duration = messages.IntegerField(5)
typeOfSession = messages.StringField(6)
date = messages.StringField(7)
startTime = messages.StringField(8)
class SessionForms(messages.Message):
"""SessionForm -- multiple SessionForm outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound form message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
| apache-2.0 | 4,306,855,950,322,396,700 | 31.696833 | 98 | 0.687517 | false |
PyBossa/pybossa | pybossa/default_settings.py | 1 | 4813 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
DEBUG = False
# webserver host and port
HOST = '0.0.0.0'
PORT = 5000
SECRET = 'foobar'
SECRET_KEY = 'my-session-secret'
ITSDANGEROUSKEY = 'its-dangerous-key'
## project configuration
BRAND = 'PYBOSSA'
TITLE = 'PYBOSSA'
COPYRIGHT = 'Set Your Institution'
DESCRIPTION = 'Set the description in your config'
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
LOGO = ''
DEFAULT_LOCALE = 'en'
LOCALES = [('en', 'English'), ('es', u'Español'),
('it', 'Italiano'), ('fr', u'Français'),
('ja', u'日本語'), ('el', u'ελληνικά')]
## Default THEME
THEME = 'default'
## Default number of apps per page
APPS_PER_PAGE = 20
## Default allowed extensions
ALLOWED_EXTENSIONS = ['js', 'css', 'png', 'jpg', 'jpeg', 'gif', 'zip']
UPLOAD_METHOD = 'local'
## Default number of users shown in the leaderboard
LEADERBOARD = 20
## Default configuration for debug toolbar
ENABLE_DEBUG_TOOLBAR = False
# Cache default key prefix
REDIS_SENTINEL = [('localhost', 26379)]
REDIS_MASTER = 'mymaster'
REDIS_DB = 0
REDIS_KEYPREFIX = 'pybossa_cache'
## Default cache timeouts
# Project cache
AVATAR_TIMEOUT = 30 * 24 * 60 * 60
APP_TIMEOUT = 15 * 60
REGISTERED_USERS_TIMEOUT = 15 * 60
ANON_USERS_TIMEOUT = 5 * 60 * 60
STATS_FRONTPAGE_TIMEOUT = APP_TIMEOUT
STATS_APP_TIMEOUT = 12 * 60 * 60
STATS_DRAFT_TIMEOUT = 24 * 60 * 60
N_APPS_PER_CATEGORY_TIMEOUT = 60 * 60
BROWSE_TASKS_TIMEOUT = 3 * 60 * 60
# Category cache
CATEGORY_TIMEOUT = 24 * 60 * 60
# User cache
USER_TIMEOUT = 15 * 60
USER_TOP_TIMEOUT = 24 * 60 * 60
USER_TOTAL_TIMEOUT = 24 * 60 * 60
# Project Presenters
PRESENTERS = ["basic", "image", "sound", "video", "map", "pdf"]
# Default Google Docs spreadsheet template tasks URLs
TEMPLATE_TASKS = {
'image': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdHFEN29mZUF0czJWMUhIejF6dWZXdkE&usp=sharing",
'sound': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEczcWduOXRUb1JUc1VGMmJtc2xXaXc&usp=sharing",
'video': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZ2UGhxSTJjQl9YNVhfUVhGRUdoRWc&usp=sharing",
'map': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZnbjdwcnhKRVNlN1dGXy0tTnNWWXc&usp=sharing",
'pdf': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEVVamc0R0hrcjlGdXRaUXlqRXlJMEE&usp=sharing"}
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Expiration time for password protected project cookies
PASSWD_COOKIE_TIMEOUT = 60 * 30
# Expiration time for account confirmation / password recovery links
ACCOUNT_LINK_EXPIRATION = 5 * 60 * 60
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Disable new account confirmation (via email)
ACCOUNT_CONFIRMATION_DISABLED = True
# Send emails weekly update every
WEEKLY_UPDATE_STATS = 'Sunday'
# Enable Server Sent Events
SSE = False
# Pro user features. False will make the feature available to all regular users,
# while True will make it available only to pro users
PRO_FEATURES = {
'auditlog': True,
'webhooks': True,
'updated_exports': True,
'notify_blog_updates': True,
'project_weekly_report': True,
'autoimporter': True,
'better_stats': True
}
CORS_RESOURCES = {r"/api/*": {"origins": "*",
"allow_headers": ['Content-Type',
'Authorization'],
"max_age": 21600
}}
FAILED_JOBS_RETRIES = 3
FAILED_JOBS_MAILS = 7
FULLTEXTSEARCH_LANGUAGE = 'english'
STRICT_SLASHES = True
# Background jobs default time outs
MINUTE = 60
TIMEOUT = 10 * MINUTE
# OneSignal GCM Sender ID
# DO NOT MODIFY THIS
GCM_SENDER_ID = "482941778795"
# Unpublish inactive projects
UNPUBLISH_PROJECTS = True
# TTL for ZIP files of personal data
TTL_ZIP_SEC_FILES = 3
# Default cryptopan key
CRYPTOPAN_KEY = '32-char-str-for-AES-key-and-pad.'
# Instruct PYBOSSA to generate absolute paths or not for avatars
AVATAR_ABSOLUTE = True
# Spam accounts to avoid
SPAM = []
| agpl-3.0 | 167,789,960,090,039,200 | 28.429448 | 116 | 0.689806 | false |
Naoto-Imamachi/MIRAGE | scripts/module/preparation/phastcons_score_list.py | 1 | 3683 | #!usr/bin/env python
import sys
import re
import shelve
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("phastcons_score_list script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.phastcons_score_list_db_input)
utils.now_time("Reference_file: " + p.phastcons_score_list_reference)
utils.now_time("Output_file: " + p.phastcons_score_list_db_output)
output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
output_merge_shelve = shelve.open(output_merge)
#for x in ['chr21']:
for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']:
ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc...
ref_file = open(ref_s,'r')
input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db'
output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
input_shelve = shelve.open(input_s)
output_shelve = shelve.open(output_s)
score_list_dict = {}
for line in ref_file:
line = line.rstrip()
data = line.split("\t")
chrom = data[0]
if not chrom == x:
continue
strand = data[5]
if len(data) >= 12: #12bed format
exon_block = data[10].split(',')
exon_block.pop() #Remove the last item ''
exon_st = data[11].split(',')
exon_st.pop() #Remove the last item ''
name = data[3]
score_list_dict[name] = []
for y in range(len(exon_block)):
st = int(data[1]) + int(exon_st[y])
ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y])
length = ed - st
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
elif len(data) >= 3: #6bed format
st = int(data[1])
ed = int(data[2])
length = ed - st
name = data[3]
score_list_dict[name] = []
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
else:
print('ERROR: Your BED format file have less than three column.')
print ('BED format file need to have at least three column [chr, st, ed]...')
sys.exit(1)
output_shelve.update(score_list_dict)
output_merge_shelve.update(score_list_dict)
input_shelve.close()
output_shelve.close()
utils.now_time("phastcons_score_list script was successfully finished!!")
output_merge_shelve.close()
if __name__ == '__main__':
main()
| mit | -2,538,876,580,107,515,400 | 41.329412 | 203 | 0.524572 | false |
DerekK88/PICwriter | picwriter/components/stripslotconverter.py | 1 | 9317 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import gdspy
import picwriter.toolkit as tk
class StripSlotConverter(tk.Component):
"""Strip-to-Slot Side Converter Cell class. Adiabatically transforms a strip to a slot waveguide mode, with two sections. Section 1 introduces a narrow waveguide alongside the input strip waveguide and gradually lowers the gap between the strip waveguide and narrow side waveguide. Section 2 gradually converts the widths of the two waveguides until they are equal to the slot rail widths.
Args:
* **wgt_input** (WaveguideTemplate): WaveguideTemplate object for the input waveguide (should be either of type `strip` or `slot`).
* **wgt_output** (WaveguideTemplate): WaveguideTemplate object for the output waveguide (should be either of type `strip` or `slot`, opposite of the input type).
* **length1** (float): Length of section 1 that gradually changes the distance between the two waveguides.
* **length2** (float): Length of section 2 that gradually changes the widths of the two waveguides until equal to the slot waveguide rail widths.
* **start_rail_width** (float): Width of the narrow waveguide appearing next to the strip waveguide.
* **end_strip_width** (float): Width of the strip waveguide at the end of `length1` and before `length2`
* **d** (float): Distance between the outer edge of the strip waveguide and the start of the slot waveguide rail.
Keyword Args:
* **input_strip** (Boolean): If `True`, sets the input port to be the strip waveguide side. If `False`, slot waveguide is on the input. Defaults to `None`, in which case the input port waveguide template is used to choose.
* **port** (tuple): Cartesian coordinate of the input port. Defaults to (0,0).
* **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)
Members:
* **portlist** (dict): Dictionary with the relevant port information
Portlist format:
* portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}
* portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}
Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the taper, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.
'Direction' points *towards* the waveguide that will connect to it.
Note: The waveguide and cladding layer/datatype are taken from the `wgt_slot` by default.
"""
def __init__(
self,
wgt_input,
wgt_output,
length1,
length2,
start_rail_width,
end_strip_width,
d,
input_strip=None,
port=(0, 0),
direction="EAST",
):
tk.Component.__init__(self, "StripSlotConverter", locals())
self.portlist = {}
if (not isinstance(input_strip, bool)) and (input_strip != None):
raise ValueError(
"Invalid input provided for `input_strip`. Please specify a boolean."
)
if input_strip == None:
# Auto-detect based on wgt_input
self.input_strip = (
wgt_input.wg_type == "strip" or wgt_input.wg_type == "swg"
)
else:
# User-override
self.input_strip = input_strip
if self.input_strip:
self.wgt_strip = wgt_input
self.wgt_slot = wgt_output
else:
self.wgt_strip = wgt_output
self.wgt_slot = wgt_input
self.wg_spec = {
"layer": wgt_output.wg_layer,
"datatype": wgt_output.wg_datatype,
}
self.clad_spec = {
"layer": wgt_output.clad_layer,
"datatype": wgt_output.clad_datatype,
}
self.length1 = length1
self.length2 = length2
self.d = d
self.start_rail_width = start_rail_width
self.end_strip_width = end_strip_width
self.port = port
self.direction = direction
self.__build_cell()
self.__build_ports()
""" Translate & rotate the ports corresponding to this specific component object
"""
self._auto_transform_()
def __build_cell(self):
# Sequentially build all the geometric shapes using polygons
# Add strip waveguide taper for region 1
x0, y0 = (0, 0)
pts = [
(x0, y0 - self.wgt_strip.wg_width / 2.0),
(x0, y0 + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
]
strip1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the thin side waveguide for region 1
pts = [
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d),
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d + self.start_rail_width),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
]
thin_strip = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the bottom rail for region 2
pts = [
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
(x0 + self.length1 + self.length2, y0 - self.wgt_slot.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.wg_width / 2.0 + self.wgt_slot.rail,
),
]
rail1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the top rail for region 2
pts = [
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.wg_width / 2.0 - self.wgt_slot.rail,
),
(x0 + self.length1 + self.length2, y0 + self.wgt_slot.wg_width / 2.0),
]
rail2 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add a cladding polygon
pts = [
(x0, y0 + self.wgt_strip.clad_width + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.clad_width + self.wgt_slot.wg_width / 2.0,
),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.clad_width - self.wgt_slot.wg_width / 2.0,
),
(x0, y0 - self.wgt_strip.clad_width - self.wgt_strip.wg_width / 2.0),
]
clad = gdspy.Polygon(
pts, layer=self.wgt_strip.clad_layer, datatype=self.wgt_strip.clad_datatype
)
self.add(strip1)
self.add(thin_strip)
self.add(rail1)
self.add(rail2)
self.add(clad)
def __build_ports(self):
# Portlist format:
# example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}
self.portlist["input"] = {"port": (0, 0), "direction": "WEST"}
self.portlist["output"] = {
"port": (self.length1 + self.length2, 0),
"direction": "EAST",
}
if __name__ == "__main__":
from . import *
top = gdspy.Cell("top")
wgt_strip = WaveguideTemplate(bend_radius=50, wg_type="strip", wg_width=0.7)
wgt_slot = WaveguideTemplate(bend_radius=50, wg_type="slot", wg_width=0.7, slot=0.2)
wg1 = Waveguide([(0, 0), (100, 0)], wgt_strip)
tk.add(top, wg1)
ssc = StripSlotConverter(
wgt_strip,
wgt_slot,
length1=15.0,
length2=15.0,
start_rail_width=0.1,
end_strip_width=0.4,
d=1.0,
**wg1.portlist["output"]
)
tk.add(top, ssc)
(x1, y1) = ssc.portlist["output"]["port"]
wg2 = Waveguide([(x1, y1), (x1 + 100, y1)], wgt_slot)
tk.add(top, wg2)
gdspy.LayoutViewer(cells=top)
# gdspy.write_gds('StripSlotConverter.gds', unit=1.0e-6, precision=1.0e-9)
| mit | 7,310,835,208,231,276,000 | 36.268 | 396 | 0.545347 | false |
dzamie/weasyl | weasyl/blocktag.py | 1 | 4024 | # blocktag.py
from error import PostgresError
import define as d
import profile
import searchtag
from libweasyl import ratings
from weasyl.cache import region
# For blocked tags, `rating` refers to the lowest rating for which that tag is
# blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas
# (X, Y, 30) would block tag Y for only adult ratings.
def check(userid, submitid=None, charid=None, journalid=None):
"""
Returns True if the submission, character, or journal contains a search tag
that the user has blocked, else False.
"""
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['title'] in tags for b in blocked_tags)
def suggest(userid, target):
if not target:
return []
return d.execute("SELECT title FROM searchtag"
" WHERE title LIKE '%s%%' AND tagid NOT IN (SELECT tagid FROM blocktag WHERE userid = %i)"
" ORDER BY title LIMIT 10", [target, userid], options="within")
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def cached_select(userid):
return select(userid)
def insert(userid, tagid=None, title=None, rating=None):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
if tagid:
tag = int(tagid)
try:
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
except PostgresError:
return
elif title:
tag_name = d.get_search_tag(title)
try:
d.engine.execute("""
INSERT INTO blocktag (userid, tagid, rating)
VALUES (
%(user)s,
(SELECT tagid FROM searchtag WHERE title = %(tag_name)s),
%(rating)s
)
""", user=userid, tag_name=tag_name, rating=rating)
except PostgresError:
try:
tag = searchtag.create(title)
except PostgresError:
return
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
cached_select.invalidate(userid)
def remove(userid, tagid=None, title=None):
if tagid:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, %i)", [userid, tagid])
elif title:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, (SELECT tagid FROM searchtag WHERE title = '%s'))",
[userid, d.get_search_tag(title)])
cached_select.invalidate(userid)
| apache-2.0 | -8,802,983,478,453,803,000 | 30.193798 | 120 | 0.587227 | false |
EndyKaufman/django-postgres-angularjs-blog | app/manager/migrations/0006_properties.py | 1 | 1170 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-24 14:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0005_add_fields_and_set_defaults'),
]
operations = [
migrations.CreateModel(
name='Properties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=512, unique=True)),
('value', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')),
('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | -7,519,078,480,483,792,000 | 39.344828 | 150 | 0.62906 | false |
bert9bert/statsmodels | statsmodels/tsa/statespace/kalman_filter.py | 2 | 86079 | """
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import (validate_vector_shape, validate_matrix_shape,
reorder_missing_matrix, reorder_missing_vector)
from . import tools
from statsmodels.tools.sm_exceptions import ValueWarning
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6
FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7
FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3
FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4
FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5
FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2
FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST = 0x01
MEMORY_NO_PREDICTED = 0x02
MEMORY_NO_FILTERED = 0x04
MEMORY_NO_LIKELIHOOD = 0x08
MEMORY_NO_GAIN = 0x10
MEMORY_NO_SMOOTHING = 0x20
MEMORY_NO_STD_FORECAST = 0x40
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING |
MEMORY_NO_STD_FORECAST
)
TIMING_INIT_PREDICTED = 0
TIMING_INIT_FILTERED = 1
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : array_like or integer
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
filter_timing
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
"""
filter_methods = [
'filter_conventional', 'filter_exact_initial', 'filter_augmented',
'filter_square_root', 'filter_univariate', 'filter_collapsed',
'filter_extended', 'filter_unscented'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL)
"""
(bool) Flag for exact initial Kalman filtering. Not implemented.
"""
filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED)
"""
(bool) Flag for augmented Kalman filtering. Not implemented.
"""
filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT)
"""
(bool) Flag for square-root Kalman filtering. Not implemented.
"""
filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE)
"""
(bool) Flag for univariate filtering of multivariate observation vector.
"""
filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED)
"""
(bool) Flag for Kalman filtering with collapsed observation vector.
"""
filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED)
"""
(bool) Flag for extended Kalman filtering. Not implemented.
"""
filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED)
"""
(bool) Flag for unscented Kalman filtering. Not implemented.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast', 'memory_no_predicted',
'memory_no_filtered', 'memory_no_likelihood', 'memory_no_gain',
'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST)
"""
(bool) Flag to prevent storing forecasts.
"""
memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED)
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED)
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN)
"""
(bool) Flag to prevent storing the Kalman gain matrices.
"""
memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_std_forecast = (
OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST))
"""
(bool) Flag to prevent storing standardized forecast errors.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
timing_options = [
'timing_init_predicted', 'timing_init_filtered'
]
timing_init_predicted = OptionWrapper('filter_timing',
TIMING_INIT_PREDICTED)
"""
(bool) Flag for the default timing convention (Durbin and Koopman, 2012).
"""
timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED)
"""
(bool) Flag for the alternate timing convention (Kim and Nelson, 2012).
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
filter_timing = TIMING_INIT_PREDICTED
"""
(int) Filter timing.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
kalman_filter_classes=None, **kwargs):
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
# Options
self.prefix_kalman_filter_map = (
kalman_filter_classes
if kalman_filter_classes is not None
else tools.prefix_kalman_filter_map.copy())
self.set_filter_method(**kwargs)
self.set_inversion_method(**kwargs)
self.set_stability_method(**kwargs)
self.set_conserve_memory(**kwargs)
self.set_filter_timing(**kwargs)
self.tolerance = tolerance
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, filter_timing=None,
loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if filter_timing is None:
filter_timing = self.filter_timing
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = self.prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, filter_timing, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.filter_timing = filter_timing
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
r"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : integer, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
FILTER_CONVENTIONAL = 0x01
Conventional Kalman filter.
FILTER_UNIVARIATE = 0x10
Univariate approach to Kalman filtering. Overrides conventional
method if both are specified.
FILTER_COLLAPSED = 0x20
Collapsed approach to Kalman filtering. Will be used *in addition*
to conventional or univariate filtering.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.filter_method
1
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
>>> mod.ssm.set_filter_method(filter_univariate=False,
... filter_collapsed=True)
>>> mod.ssm.filter_method
33
>>> mod.ssm.set_filter_method(filter_method=1)
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate
False
>>> mod.ssm.filter_collapsed
False
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
if self._compatibility_mode and not self.filter_method == 1:
raise NotImplementedError('Only conventional Kalman filtering'
' is available. Consider updating'
' dependencies for more options.')
def set_inversion_method(self, inversion_method=None, **kwargs):
r"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : integer, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE = 0x01
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU = 0x02
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU = 0x04
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY = 0x08
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY = 0x10
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- If the filtering method is specified to be univariate, then simple
division is always used regardless of the dimension of the endogenous
time series.
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.inversion_method
1
>>> mod.ssm.solve_cholesky
True
>>> mod.ssm.invert_univariate
True
>>> mod.ssm.invert_lu
False
>>> mod.ssm.invert_univariate = False
>>> mod.ssm.inversion_method
8
>>> mod.ssm.set_inversion_method(solve_cholesky=False,
... invert_cholesky=True)
>>> mod.ssm.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
r"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : integer, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.stability_method
1
>>> mod.ssm.stability_force_symmetry
True
>>> mod.ssm.stability_force_symmetry = False
>>> mod.ssm.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
r"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : integer, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL = 0
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST = 0x01
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED = 0x02
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED = 0x04
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD = 0x08
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_NO_GAIN = 0x10
Do not store the Kalman gain matrices.
MEMORY_NO_SMOOTHING = 0x20
Do not store temporary variables related to Klaman smoothing. If
this option is used, smoothing is unavailable.
MEMORY_NO_SMOOTHING = 0x20
Do not store standardized forecast errors.
MEMORY_CONSERVE
Do not store any intermediate matrices.
Note that if using a Scipy version less than 0.16, the options
MEMORY_NO_GAIN, MEMORY_NO_SMOOTHING, and MEMORY_NO_STD_FORECAST
have no effect.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm..conserve_memory
0
>>> mod.ssm.memory_no_predicted
False
>>> mod.ssm.memory_no_predicted = True
>>> mod.ssm.conserve_memory
2
>>> mod.ssm.set_conserve_memory(memory_no_filtered=True,
... memory_no_forecast=True)
>>> mod.ssm.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_filter_timing(self, alternate_timing=None, **kwargs):
r"""
Set the filter timing convention
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
Parameters
----------
alternate_timing : integer, optional
Whether or not to use the alternate timing convention. Default is
unspecified.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
"""
if alternate_timing is not None:
self.filter_timing = int(alternate_timing)
if 'timing_init_predicted' in kwargs:
self.filter_timing = int(not kwargs['timing_init_predicted'])
if 'timing_init_filtered' in kwargs:
self.filter_timing = int(kwargs['timing_init_filtered'])
if (self._compatibility_mode and
self.filter_timing == TIMING_INIT_FILTERED):
raise NotImplementedError('Only "predicted" Kalman filter'
' timing is available. Consider'
' updating dependencies for more'
' options.')
def _filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
filter_timing=None, tolerance=None, loglikelihood_burn=None,
complex_step=False):
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Initialize the state
self._initialize_state(prefix=prefix, complex_step=complex_step)
# Run the filter
kfilter()
tmp = np.array(kfilter.loglikelihood)
tmp2 = np.array(kfilter.predicted_state)
return kfilter
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, filter_timing=None,
tolerance=None, loglikelihood_burn=None, complex_step=False):
r"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
filter_timing : int, optional
Determines the timing convention of the filter. Default is that
from Durbin and Koopman (2012), in which the filter is initialized
with predicted values.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
Notes
-----
This function by default does not compute variables required for
smoothing.
"""
if conserve_memory is None:
conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING
# Run the filter
kfilter = self._filter(
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, complex_step)
tmp = np.array(kfilter.loglikelihood)
# Create the results object
results = self.results_class(self)
results.update_representation(self)
results.update_filter(kfilter)
return results
def loglike(self, **kwargs):
r"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
return np.sum(kfilter.loglikelihood[loglikelihood_burn:])
def loglikeobs(self, **kwargs):
r"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
llf_obs = np.array(kfilter.loglikelihood, copy=True)
# Set any burned observations to have zero likelihood
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
r"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
simulated_states : array
An (nsimulations x k_states) array of simulated states.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
# Check / generate measurement shocks
if measurement_shocks is not None:
measurement_shocks = np.array(measurement_shocks)
if measurement_shocks.ndim == 0:
measurement_shocks = measurement_shocks[np.newaxis, np.newaxis]
elif measurement_shocks.ndim == 1:
measurement_shocks = measurement_shocks[:, np.newaxis]
if not measurement_shocks.shape == (nsimulations, self.k_endog):
raise ValueError('Invalid shape of provided measurement'
' shocks. Required (%d, %d)'
% (nsimulations, self.k_endog))
elif self.shapes['obs_cov'][-1] == 1:
measurement_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov'],
size=nsimulations)
# Check / generate state shocks
if state_shocks is not None:
state_shocks = np.array(state_shocks)
if state_shocks.ndim == 0:
state_shocks = state_shocks[np.newaxis, np.newaxis]
elif state_shocks.ndim == 1:
state_shocks = state_shocks[:, np.newaxis]
if not state_shocks.shape == (nsimulations, self.k_posdef):
raise ValueError('Invalid shape of provided state shocks.'
' Required (%d, %d).'
% (nsimulations, self.k_posdef))
elif self.shapes['state_cov'][-1] == 1:
state_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef), cov=self['state_cov'],
size=nsimulations)
# Get the initial states
if initial_state is not None:
initial_state = np.array(initial_state)
if initial_state.ndim == 0:
initial_state = initial_state[np.newaxis]
elif (initial_state.ndim > 1 and
not initial_state.shape == (self.k_states, 1)):
raise ValueError('Invalid shape of provided initial state'
' vector. Required (%d, 1)' % self.k_states)
elif self.initialization == 'known':
initial_state = np.random.multivariate_normal(
self._initial_state, self._initial_state_cov)
elif self.initialization == 'stationary':
from scipy.linalg import solve_discrete_lyapunov
# (I - T)^{-1} c = x => (I - T) x = c
initial_state_mean = np.linalg.solve(
np.eye(self.k_states) - self['transition', :, :, 0],
self['state_intercept', :, 0])
R = self['selection', :, :, 0]
Q = self['state_cov', :, :, 0]
selected_state_cov = R.dot(Q).dot(R.T)
initial_state_cov = solve_discrete_lyapunov(
self['transition', :, :, 0], selected_state_cov)
initial_state = np.random.multivariate_normal(
initial_state_mean, initial_state_cov)
elif self.initialization == 'approximate_diffuse':
initial_state = np.zeros(self.k_states)
else:
initial_state = np.zeros(self.k_states)
return self._simulate(nsimulations, measurement_shocks, state_shocks,
initial_state)
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
time_invariant = self.time_invariant
# Holding variables for the simulations
simulated_obs = np.zeros((nsimulations, self.k_endog),
dtype=self.dtype)
simulated_states = np.zeros((nsimulations+1, self.k_states),
dtype=self.dtype)
simulated_states[0] = initial_state
# Perform iterations to create the new time series
obs_intercept_t = 0
design_t = 0
state_intercept_t = 0
transition_t = 0
selection_t = 0
for t in range(nsimulations):
# Get the current shocks (this accomodates time-varying matrices)
if measurement_shocks is None:
measurement_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t])
else:
measurement_shock = measurement_shocks[t]
if state_shocks is None:
state_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef),
cov=self['state_cov', :, :, t])
else:
state_shock = state_shocks[t]
# Get current-iteration matrices
if not time_invariant:
obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t
design_t = 0 if self.design.shape[-1] == 1 else t
state_intercept_t = (
0 if self.state_intercept.shape[-1] == 1 else t)
transition_t = 0 if self.transition.shape[-1] == 1 else t
selection_t = 0 if self.selection.shape[-1] == 1 else t
obs_intercept = self['obs_intercept', :, obs_intercept_t]
design = self['design', :, :, design_t]
state_intercept = self['state_intercept', :, state_intercept_t]
transition = self['transition', :, :, transition_t]
selection = self['selection', :, :, selection_t]
# Iterate the measurement equation
simulated_obs[t] = (
obs_intercept + np.dot(design, simulated_states[t]) +
measurement_shock)
# Iterate the state equation
simulated_states[t+1] = (
state_intercept + np.dot(transition, simulated_states[t]) +
np.dot(selection, state_shock))
return simulated_obs, simulated_states[:-1]
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
r"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
# Since the first step is the impulse itself, we actually want steps+1
steps += 1
# Check for what kind of impulse we want
if type(impulse) == int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' index of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:, :, 0])
impulse = np.dot(state_chol, impulse)
# If we have a time-invariant system, we can solve for the IRF directly
if self.time_invariant:
# Get the state space matrices
design = self.design[:, :, 0]
transition = self.transition[:, :, 0]
selection = self.selection[:, :, 0]
# Holding arrays
irf = np.zeros((steps, self.k_endog), dtype=self.dtype)
states = np.zeros((steps, self.k_states), dtype=self.dtype)
# First iteration
states[0] = np.dot(selection, impulse)
irf[0] = np.dot(design, states[0])
# Iterations
for t in range(1, steps):
states[t] = np.dot(transition, states[t-1])
irf[t] = np.dot(design, states[t])
# Otherwise, create a new model
else:
# Get the basic model components
representation = {}
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
representation[name] = getattr(self, name)
# Allow additional specification
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `irf` has been ignored.')
exception = ('Impulse response functions for models with'
' time-varying %s matrix requires an updated'
' time-varying matrix for any periods beyond those in'
' the original model.')
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], steps)
if mat.ndim < 3 or not mat.shape[2] == steps:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states,
self.k_posdef, **model_kwargs)
model.initialize_approximate_diffuse()
model._initialize_filter()
model._initialize_state()
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
# Since simulate returns the zero-th period, we need to simulate
# steps + 1 periods and exclude the zero-th observation.
steps += 1
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
irf, _ = model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)
irf = irf[1:]
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
return irf
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
filter_timing : int
Whether or not to use the alternate timing convention.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
kalman_gain : array
The Kalman gain at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn',
'converged', 'period_converged', 'filtered_state',
'filtered_state_cov', 'predicted_state', 'predicted_state_cov',
'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts',
'forecasts_error', 'forecasts_error_cov', 'llf_obs',
'collapsed_forecasts', 'collapsed_forecasts_error',
'collapsed_forecasts_error_cov',
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : boolean, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.filter_timing = kalman_filter.filter_timing
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Reset caches
has_missing = np.sum(self.nmissing) > 0
if not self._compatibility_mode and not (self.memory_no_std_forecast or
self.invert_lu or
self.solve_lu or
self.filter_collapsed):
if has_missing:
self._standardized_forecasts_error = np.array(
reorder_missing_vector(
kalman_filter.standardized_forecast_error,
self.missing, prefix=self.prefix))
else:
self._standardized_forecasts_error = np.array(
kalman_filter.standardized_forecast_error, copy=True)
else:
self._standardized_forecasts_error = None
if not self._compatibility_mode:
# In the partially missing data case, all entries will
# be in the upper left submatrix rather than the correct placement
# Re-ordering does not make sense in the collapsed case.
if has_missing and (not self.memory_no_gain and
not self.filter_collapsed):
self._kalman_gain = np.array(reorder_missing_matrix(
kalman_filter.kalman_gain, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp1 = np.array(reorder_missing_matrix(
kalman_filter.tmp1, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp2 = np.array(reorder_missing_vector(
kalman_filter.tmp2, self.missing, prefix=self.prefix))
self.tmp3 = np.array(reorder_missing_matrix(
kalman_filter.tmp3, self.missing, reorder_rows=True,
prefix=self.prefix))
self.tmp4 = np.array(reorder_missing_matrix(
kalman_filter.tmp4, self.missing, reorder_cols=True,
reorder_rows=True, prefix=self.prefix))
else:
self._kalman_gain = np.array(
kalman_filter.kalman_gain, copy=True)
self.tmp1 = np.array(kalman_filter.tmp1, copy=True)
self.tmp2 = np.array(kalman_filter.tmp2, copy=True)
self.tmp3 = np.array(kalman_filter.tmp3, copy=True)
self.tmp4 = np.array(kalman_filter.tmp4, copy=True)
else:
self._kalman_gain = None
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Save the collapsed values
self.collapsed_forecasts = None
self.collapsed_forecasts_error = None
self.collapsed_forecasts_error_cov = None
if self.filter_collapsed:
# Copy the provided arrays (which are from the collapsed dataset)
# into new variables
self.collapsed_forecasts = self.forecasts[:self.k_states, :]
self.collapsed_forecasts_error = (
self.forecasts_error[:self.k_states, :]
)
self.collapsed_forecasts_error_cov = (
self.forecasts_error_cov[:self.k_states, :self.k_states, :]
)
# Recreate the original arrays (which should be from the original
# dataset) in the appropriate dimension
self.forecasts = np.zeros((self.k_endog, self.nobs))
self.forecasts_error = np.zeros((self.k_endog, self.nobs))
self.forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs))
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
if not (self.memory_no_forecast or self.memory_no_predicted):
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] > 0:
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the
# first blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in),
# regardless of which endogenous variables they refer to
# (i.e. the non- missing endogenous variables for that
# observation). Furthermore, the forecast error covariance
# matrix is only valid for those elements. What is done is
# to set all elements to nan for these observations so that
# they are flagged as missing. The variables
# missing_forecasts, etc. then provide the forecasts, etc.
# provided by the Kalman filter, from which the data can be
# retrieved if desired.
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error[mask, t] = (
self.endog[mask, t] - self.forecasts[mask, t])
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# In the collapsed case, everything just needs to be rebuilt
# for the original observed data, since the Kalman filter
# produced these values for the collapsed data.
elif self.filter_collapsed:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = (
self.endog[:, t] - self.forecasts[:, t]
)
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
# In the case of entirely missing observations, let the Kalman
# gain be zeros.
if self.nmissing[t] == self.k_endog:
continue
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
if self.nmissing[t] == 0:
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
else:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
self._kalman_gain[:, mask, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[mask, :, design_t]),
np.linalg.inv(F[:, :, 0])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
"""
Standardized forecast errors
Notes
-----
The forecast errors produced by the Kalman filter are
.. math::
v_t \sim N(0, F_t)
Hypothesis tests are usually applied to the standardized residuals
.. math::
v_t^s = B_t v_t \sim N(0, I)
where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then
:math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t`
and :math:`L_t` are lower triangular. Finally,
:math:`B_t v_t \sim N(0, B_t F_t B_t')` and
:math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`.
Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or
:math:`L_t v_t^s = v_t`; the latter equation is the form required to
use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is
lower triangular, we can use a triangular solver (?TRTRS).
"""
if self._standardized_forecasts_error is None:
if self.k_endog == 1:
self._standardized_forecasts_error = (
self.forecasts_error /
self.forecasts_error_cov[0, 0, :]**0.5)
else:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
if self.nmissing[t] < self.k_endog:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
upper, _ = linalg.cho_factor(F[:, :, 0])
self._standardized_forecasts_error[mask, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[mask, t], trans=1))
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
r"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Cannot predict if we do not have appropriate arrays
if self.memory_no_forecast or self.memory_no_predicted:
raise ValueError('Predict is not possible if memory conservation'
' has been used to avoid storing forecasts or'
' predicted values.')
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
ndynamic = 0
if dynamic is not None:
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.', ValueWarning)
dynamic = None
elif dynamic > self.nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.', ValueWarning)
dynamic = None
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
if dynamic is not None:
ndynamic = max(0, min(end, self.nobs) - dynamic)
# Get the number of in-sample static predictions
nstatic = min(end, self.nobs) if dynamic is None else dynamic
# Construct the design and observation intercept and covariance
# matrices for start-npadded:end. If not time-varying in the original
# model, then they will be copied over if none are provided in
# `kwargs`. Otherwise additional matrices must be provided in `kwargs`.
representation = {}
for name, shape in self.shapes.items():
if name == 'obs':
continue
representation[name] = getattr(self, name)
# Update the matrices from kwargs for forecasts
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `predict` has been ignored.')
exception = ('Forecasting for models with time-varying %s matrix'
' requires an updated time-varying matrix for the'
' period to be forecasted.')
if nforecast > 0:
for name, shape in self.shapes.items():
if name == 'obs':
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
if len(shape) == 2:
validate_vector_shape(name, mat.shape,
shape[0], nforecast)
if mat.ndim < 2 or not mat.shape[1] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
else:
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], nforecast)
if mat.ndim < 3 or not mat.shape[2] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Update the matrices from kwargs for dynamic prediction in the case
# that `end` is less than `nobs` and `dynamic` is less than `end`. In
# this case, any time-varying matrices in the default `representation`
# will be too long, causing an error to be thrown below in the
# KalmanFilter(...) construction call, because the endog has length
# nstatic + ndynamic + nforecast, whereas the time-varying matrices
# from `representation` have length nobs.
if ndynamic > 0 and end < self.nobs:
for name, shape in self.shapes.items():
if not name == 'obs' and representation[name].shape[-1] > 1:
representation[name] = representation[name][..., :end]
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
else:
# Construct the new endogenous array.
endog = np.empty((self.k_endog, ndynamic + nforecast))
endog.fill(np.nan)
endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog])
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'filter_timing': self.filter_timing,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(
endog, self.k_states, self.k_posdef, **model_kwargs
)
model.initialize_known(
self.initial_state,
self.initial_state_cov
)
model._initialize_filter()
model._initialize_state()
results = self._predict(nstatic, ndynamic, nforecast, model)
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast)
def _predict(self, nstatic, ndynamic, nforecast, model):
# Note: this doesn't use self, and can either be a static method or
# moved outside the class altogether.
# Get the underlying filter
kfilter = model._kalman_filter
# Save this (which shares memory with the memoryview on which the
# Kalman filter will be operating) so that we can replace actual data
# with predicted data during dynamic forecasting
endog = model._representations[model.prefix]['obs']
for t in range(kfilter.model.nobs):
# Run the Kalman filter for the first `nstatic` periods (for
# which dynamic computation will not be performed)
if t < nstatic:
next(kfilter)
# Perform dynamic prediction
elif t < nstatic + ndynamic:
design_t = 0 if model.design.shape[2] == 1 else t
obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t
# Unconditional value is the intercept (often zeros)
endog[:, t] = model.obs_intercept[:, obs_intercept_t]
# If t > 0, then we can condition the forecast on the state
if t > 0:
# Predict endog[:, t] given `predicted_state` calculated in
# previous iteration (i.e. t-1)
endog[:, t] += np.dot(
model.design[:, :, design_t],
kfilter.predicted_state[:, t]
)
# Advance Kalman filter
next(kfilter)
# Perform any (one-step-ahead) forecasting
else:
next(kfilter)
# Return the predicted state and predicted state covariance matrices
results = FilterResults(model)
results.update_representation(model)
results.update_filter(kfilter)
return results
class PredictionResults(FilterResults):
r"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast):
# Save the filter results object
self.results = results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
| bsd-3-clause | 3,883,965,358,522,059,000 | 41.340876 | 79 | 0.594628 | false |
rjw57/cubbie | migrations/versions/316bb58e84f_add_user_identities.py | 1 | 1110 | """add user_identities
Revision ID: 316bb58e84f
Revises: 38c8ec357e0
Create Date: 2015-03-11 01:40:12.157458
"""
# revision identifiers, used by Alembic.
revision = '316bb58e84f'
down_revision = '38c8ec357e0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_identities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider', sa.Text(), nullable=False),
sa.Column('provider_user_id', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_user_identities_provider_provider_id', 'user_identities', ['provider', 'provider_user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_user_identities_provider_provider_id', table_name='user_identities')
op.drop_table('user_identities')
### end Alembic commands ###
| mit | 7,870,348,524,913,182,000 | 30.714286 | 130 | 0.684685 | false |
cwgreene/Nanostructure-Simulator | utils/plot_trajectories.py | 1 | 1140 | import os
import sys
import re
import pylab
def parse_trajectory_line(line):
trajectory = []
for x,y in re.findall("\(([0-9.]+), ([0-9.]+)\)",line):
trajectory.append((float(x),float(y)))
return trajectory
def generate_trajectories(file):
#get rid fo two first lines
file.readline()
file.readline()
#parse each line
for line in file:
yield parse_trajectory_line(line)
def open_trajectory_file(n):
for filename in os.listdir("results"):
if re.match(str(n)+"traj",filename):
return open("results/"+filename)
raise "File not found"
def display_trajectories(n):
input =""
file = open_trajectory_file(n)
trajectory_gen = generate_trajectories(file)
trajectory = trajectory_gen.next()
interactive = True
i = 0
while input != 'q':
first = map(lambda x: x[0],trajectory)
second = map(lambda x: x[1],trajectory)
pylab.plot(first,second)
if interactive:
input = raw_input()
if input == "go":
i += 1
interactive=False
if i %100 == 0:
print i
raw_input()
try:
trajectory=trajectory_gen.next()
except:
print "Done"
break
if __name__=="__main__":
display_trajectories(sys.argv[1])
| mit | -5,705,216,141,029,246,000 | 20.923077 | 56 | 0.669298 | false |
jonathansick/androcmd | scripts/phat_baseline_test.py | 1 | 3612 | #!/usr/bin/env python
# encoding: utf-8
"""
Grid computation of dust attenuation for old vs. young stellar populations.
2015-05-12 - Created by Jonathan Sick
"""
import argparse
from androcmd.phatpipeline import PhatCatalog
from androcmd.baselineexp import SolarZPipeline, ThreeZPipeline
def main():
args = parse_args()
if args.pipeline == 'solarz':
# Use the single-Z solar pipeline
Pipeline = SolarZPipeline
elif args.pipeline == 'threez':
# Use the three-metallicity track pipeline
Pipeline = ThreeZPipeline
isoc = dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang')
pipeline = Pipeline(brick=23,
root_dir=args.model_name,
isoc_args=isoc)
if args.fit is not None:
dataset = PhatCatalog(args.brick)
pipeline.fit(args.fit, [args.fit], dataset)
if args.plot_hess is not None:
from androcmd.baselineexp import plot_fit_hess_grid
dataset = PhatCatalog(args.brick)
plot_fit_hess_grid(args.plot_hess, pipeline, dataset)
if args.plot_diff is not None:
from androcmd.baselineexp import plot_diff_hess_grid
dataset = PhatCatalog(args.brick)
plot_diff_hess_grid(args.plot_diff, pipeline, dataset)
if args.plot_sfh is not None:
from androcmd.baselineexp import sfh_comparison_plot
dataset = PhatCatalog(args.brick)
sfh_comparison_plot(args.plot_sfh, pipeline, dataset)
if args.plot_zsfh is not None:
from androcmd.baselineexp import plot_sfh_metallicity_trends
dataset = PhatCatalog(args.brick)
for fit_key in args.plot_zsfh:
plot_path = "{model}_b{brick:d}_zsfh_{key}".format(
model=args.model_name, brick=args.brick, key=fit_key)
plot_sfh_metallicity_trends(plot_path, pipeline, dataset, fit_key)
if args.chi_table is not None:
from androcmd.baselineexp import tabulate_fit_chi
dataset = PhatCatalog(args.brick)
tabulate_fit_chi(args.chi_table, pipeline, dataset)
if args.plot_isoc is not None:
from androcmd.baselineexp import plot_isocs, plot_isocs_lewis
dataset = PhatCatalog(args.brick)
plot_isocs(args.plot_isoc, pipeline, dataset)
plot_isocs_lewis(args.plot_isoc + '_lewis', pipeline, dataset)
if args.plot_lock is not None:
from androcmd.baselineexp import plot_lockfile
plot_lockfile(args.plot_lock, pipeline)
def parse_args():
parser = argparse.ArgumentParser(
description="Model a brick with differential old/young dust.")
parser.add_argument('model_name')
parser.add_argument('brick', type=int)
parser.add_argument('--fit',
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'],
default=None)
parser.add_argument('--pipeline',
choices=['solarz', 'threez'],
default='solarz')
parser.add_argument('--plot-hess', default=None)
parser.add_argument('--plot-diff', default=None)
parser.add_argument('--plot-sfh', default=None)
parser.add_argument('--chi-table', default=None)
parser.add_argument('--plot-zsfh', nargs='*', default=None,
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'])
parser.add_argument('--plot-isoc', default=None)
parser.add_argument('--plot-lock', default=None)
return parser.parse_args()
if __name__ == '__main__':
main()
| mit | -6,061,255,170,724,512,000 | 35.484848 | 78 | 0.623477 | false |
lhellebr/spacewalk | backend/server/rhnLib.py | 1 | 8211 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import hashlib
import string
import base64
import posixpath
from spacewalk.common.rhnLib import parseRPMName
from spacewalk.common.rhnLog import log_debug
from spacewalk.common.rhnException import rhnFault
# architecture work
from rhnMapping import check_package_arch
def computeSignature(*fields):
# Init the hash
m = hashlib.new('sha256')
for i in fields:
# use str(i) since some of the fields may be non-string
m.update(str(i))
return base64.encodestring(m.digest()).rstrip()
# 'n_n-n-v.v.v-r_r.r:e.ARCH.rpm' ---> [n,v,r,e,a]
def parseRPMFilename(pkgFilename):
"""
IN: Package Name: xxx-yyy-ver.ver.ver-rel.rel_rel:e.ARCH.rpm (string)
Understood rules:
o Name can have nearly any char, but end in a - (well seperated by).
Any character; may include - as well.
o Version cannot have a -, but ends in one.
o Release should be an actual number, and can't have any -'s.
o Release can include the Epoch, e.g.: 2:4 (4 is the epoch)
o Epoch: Can include anything except a - and the : seperator???
XXX: Is epoch info above correct?
OUT: [n,e,v,r, arch].
"""
if type(pkgFilename) != type(''):
raise rhnFault(21, str(pkgFilename)) # Invalid arg.
pkgFilename = os.path.basename(pkgFilename)
# Check that this is a package NAME (with arch.rpm) and strip
# that crap off.
pkg = string.split(pkgFilename, '.')
# 'rpm' at end?
if string.lower(pkg[-1]) not in ['rpm', 'deb']:
raise rhnFault(21, 'neither an rpm nor a deb package name: %s' % pkgFilename)
# Valid architecture next?
if check_package_arch(pkg[-2]) is None:
raise rhnFault(21, 'Incompatible architecture found: %s' % pkg[-2])
_arch = pkg[-2]
# Nuke that arch.rpm.
pkg = string.join(pkg[:-2], '.')
ret = list(parseRPMName(pkg))
if ret:
ret.append(_arch)
return ret
# XXX TBD where to place this function - it has to be accessible from several
# places
def normalize_server_arch(arch):
log_debug(4, 'server arch', arch)
if arch is None:
return ''
arch = str(arch)
if '-' in arch:
# Already normalized
return arch
# Fix the arch if need be
suffix = '-redhat-linux'
arch = arch + suffix
return arch
class InvalidAction(Exception):
""" An error class to signal when we can not handle an action """
pass
class EmptyAction(Exception):
""" An error class that signals that we encountered an internal error
trying to handle an action through no fault of the client
"""
pass
class ShadowAction(Exception):
""" An error class for actions that should not get to the client """
pass
def transpose_to_hash(arr, column_names):
""" Handy function to transpose an array from row-based to column-based,
with named columns.
"""
result = []
for c in column_names:
result.append([])
colnum = len(column_names)
for r in arr:
if len(r) != colnum:
raise Exception(
"Mismatching number of columns: expected %s, got %s; %s" % (
colnum, len(r), r))
for i in range(len(r)):
result[i].append(r[i])
# Now build the hash labeled with the column names
rh = {}
for i in range(len(column_names)):
rh[column_names[i]] = result[i]
return rh
def get_package_path(nevra, org_id, source=0, prepend="", omit_epoch=None,
package_type='rpm', checksum_type=None, checksum=None):
""" Computes a package path, optionally prepending a prefix
The path will look like
<prefix>/<org_id>/checksum[:3]/n/e:v-r/a/checksum/n-v-r.a.rpm if not omit_epoch
<prefix>/<org_id>/checksum[:3]/n/v-r/a/checksum/n-v-r.a.rpm if omit_epoch
"""
name, epoch, version, release, pkgarch = nevra
# dirarch and pkgarch are special-cased for source rpms
if source:
dirarch = 'SRPMS'
else:
dirarch = pkgarch
if org_id in ['', None]:
org = "NULL"
else:
org = org_id
if not omit_epoch and epoch not in [None, '']:
version = str(epoch) + ':' + version
# normpath sanitizes the path (removing duplicated / and such)
template = os.path.normpath(prepend +
"/%s/%s/%s/%s-%s/%s/%s/%s-%s-%s.%s.%s")
return template % (org, checksum[:3], name, version, release, dirarch, checksum,
name, nevra[2], release, pkgarch, package_type)
# bug #161989
# It seems that our software was written specifically for rpms in far too many
# ways. Here's a little bit of a hack function that will return the package path
# (as in from get_package_path) but without the filename appended.
# This enables us to append an arbitrary file name that is not restricted to the
# form: name-version-release.arch.type
def get_package_path_without_package_name(nevra, org_id, prepend="",
checksum_type=None, checksum=None):
"""return a package path without the package name appended"""
return os.path.dirname(get_package_path(nevra, org_id, prepend=prepend,
checksum_type=checksum_type, checksum=checksum))
class CallableObj:
""" Generic callable object """
def __init__(self, name, func):
self.func = func
self.name = name
def __call__(self, *args, **kwargs):
return self.func(self.name, *args, **kwargs)
def make_evr(nvre, source=False):
""" IN: 'e:name-version-release' or 'name-version-release:e'
OUT: {'name':name, 'version':version, 'release':release, 'epoch':epoch }
"""
if ":" in nvre:
nvr, epoch = nvre.rsplit(":", 1)
if "-" in epoch:
nvr, epoch = epoch, nvr
else:
nvr, epoch = nvre, ""
nvr_parts = nvr.rsplit("-", 2)
if len(nvr_parts) != 3:
raise rhnFault(err_code=21, err_text="NVRE is missing name, version, or release.")
result = dict(zip(["name", "version", "release"], nvr_parts))
result["epoch"] = epoch
if source and result["release"].endswith(".src"):
result["release"] = result["release"][:-4]
return result
def _is_secure_path(path):
path = posixpath.normpath(path)
return not (path.startswith('/') or path.startswith('../'))
def get_crash_path(org_id, system_id, crash):
"""For a given org_id, system_id and crash, return relative path to a crash directory."""
path = os.path.join('systems', org_id, system_id, 'crashes', crash)
if _is_secure_path(path):
return path
else:
return None
def get_crashfile_path(org_id, system_id, crash, filename):
"""For a given org_id, system_id, crash and filename, return relative path to a crash file."""
path = os.path.join(get_crash_path(org_id, system_id, crash), filename)
if _is_secure_path(path):
return path
else:
return None
def get_action_path(org_id, system_id, action_id):
"""For a given org_id, system_id, and action_id, return relative path to a store directory."""
path = os.path.join('systems', str(org_id), str(system_id), 'actions', str(action_id))
if _is_secure_path(path):
return path
def get_actionfile_path(org_id, system_id, action_id, filename):
"""For a given org_id, system_id, action_id, and file, return relative path to a file."""
path = os.path.join(get_action_path(org_id, system_id, action_id), str(filename))
if _is_secure_path(path):
return path
| gpl-2.0 | 8,887,653,560,759,876,000 | 30.580769 | 98 | 0.629765 | false |
m-r-hunt/invaders | enemies.py | 1 | 6646 | # Invaders
# Copyright (C) 2013 Maximilian Hunt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, random, pygame, projectiles, score_counter
class EnemySprite(pygame.sprite.Sprite):
# Class for one enemy invader.
def __init__(self, image, position, bullet_group):
# image: relative path to an image pygame can load
# position: (x, y) coordinates on screen
# bullet_group: pygame.sprite.Group to put fired bullets in
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.position = position
self.rect = self.image.get_rect()
self.rect.center = position
self.bullet_group = bullet_group
def update(self, dv, score, collisions):
# Update this enemy. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to increment on death
# collisions: a dictionary of collisions, possibly containing this object
# Handle any collisions given
if self in collisions:
death = False
for bullet in collisions[self]:
if (bullet.origin != self):
bullet.kill()
death = True
if (death == True):
score.increment()
self.kill()
# Update position
self.position = (self.position[0] + dv[0], self.position[1] + dv[1])
self.rect.center = self.position
def y(self):
# Return height (y coordinate).
return self.position[1]
def fire(self):
# (Possibly) fire a bullet down.
if (random.randrange(100) < 2):
bounds = (0-100, 800+100, 0-100, 600+100)
bullet = projectiles.Bullet(os.path.join("Resources", "Enemy Bullet.png"), self.position, (0, 5), bounds, self)
self.bullet_group.add(bullet)
class EnemyColumn(pygame.sprite.Group):
# Class for one column in a formation of enemies.
# Exists so we can easily fire only the lowest enemy in each column
# Remembers its own x coordinate, everything else happens inside the actual enemies
def __init__(self, x_position):
# x_position: integer x coordinate
pygame.sprite.Group.__init__(self)
self.x_position = x_position
def update(self, dv, score, collisions):
# Update this column. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to pass to contained EnemySprites
# collisions: a dictionary of collisions to pass to contained EnemySprites
# Return (x, y), x of this column and y of lowest contained Sprite.
self.x_position += dv[0]
# Update contained sprites
for i in self.sprites():
i.update(dv, score, collisions)
# Compute biggest y, ask that EnemySprite to fire.
max_y = 0
if (len(self) != 0):
for i in self.sprites():
if (i.y() > max_y):
max_y = i.y()
bottom_enemy = i
bottom_enemy.fire()
return self.x_position, max_y
class EnemyFormation(pygame.sprite.Group):
# Class for a whole formation of enemies.
# Contains both EnemyColumns and EnemySprites
# Magic numbers: Base speed stepped horizontally or vertically each frame.
H_STEP = 2
V_STEP = 10
def __init__(self, topleft, layout, bounds, bullet_group):
pygame.sprite.Group.__init__(self)
self.columns = []
columns, rows = layout
# Generate all the enemies and columns.
for i in range(0, columns):
column_x = topleft[0] + i*64
enemy_column = EnemyColumn(topleft[0] + i*64)
for j in range(0, rows):
new_enemy = EnemySprite(os.path.join("resources", "Enemy.png"), (column_x, topleft[1] + j*64), bullet_group)
enemy_column.add(new_enemy)
self.add(new_enemy)
self.columns.append(enemy_column)
# Direction: +1 for right, -1 for left (i.e. +-ve x direction)
self.current_direction = +1
self.left_bound, self.right_bound, self.bottom_bound = bounds
self.total = columns * rows
def update(self, score, collisions):
# Update this formation. Should be called once per frame.
# score: a Score to pass to contained EnemyColumns
# collisions: a dictionary of collisions to pass to contained EnemyColumns
# Returns (bool, bool). First is True if this formation is still in a good state, False if it needs resetting.
# Second is True if this is because it's now empty, False if it has reached the bottom of the screen.
direction_change = too_low = False
# Compute factor to move faster when we have fewer remaining members.
scale = int(float(self.total)/float(len(self)))
# Update columns
for i in self.columns:
x, y = i.update((scale*self.current_direction*self.H_STEP, 0), score, collisions)
# Remove empty columns
if (len(i.sprites()) == 0):
self.columns.remove(i)
# Notice if we've gone too low
elif (y > self.bottom_bound):
too_low = True
# Remember to change direction when we reach screen edges
elif (x < self.left_bound or x > self.right_bound):
direction_change = True
# Indicate we're empty
if (len(self.columns) == 0):
return False, True
# Indicate we reached the bottom of the screen.
elif too_low:
return False, False
# Drop down and change direction
elif direction_change:
self.current_direction *= -1
for i in self.columns:
i.update((scale*self.current_direction*self.H_STEP, self.V_STEP), score, [])
# If we made it here, everything's fine.
return True, True | gpl-2.0 | -4,189,515,760,736,269,300 | 41.06962 | 124 | 0.614354 | false |
thaihungle/deepexp | rare-mann/mimic_gen.py | 1 | 5981 | import numpy as np
import os
import random
import pickle
class MimicDataLoader(object):
def __init__(self, data_folder, batch_size=1, max_sequence=10, max_iter=None, split = 0.75, train_keep=1):
super(MimicDataLoader, self).__init__()
self.data_folder = data_folder
self.batch_size = batch_size
self.num_step = max_sequence
self.max_iter = max_iter
self.num_iter = 0
self.input_map=pickle.load(open(data_folder+'/dig_map.pkl','rb'))
self.ouput_map = pickle.load(open(data_folder + '/proc_map.pkl', 'rb'))
self.all_input = pickle.load(open(data_folder+'/dig_input.pkl','rb'))
self.all_output = pickle.load(open(data_folder + '/proc_output.pkl', 'rb'))
self.output_size = self.all_output.shape[1]
if len(np.shape(self.all_output))>1:
self.all_output = np.argmax(self.all_output, axis=1)
print(self.all_output[:10])
print(self.all_output.shape)
self.num_samples=self.all_input.shape[0]
print('num samples {}'.format(self.num_samples))
lindex=list(range(self.num_samples))
# random.shuffle(lindex)
self.train_data_indexes = lindex[:int(self.num_samples*split*train_keep)]
self.test_data_indexes = lindex[int(self.num_samples*split):]
self.is_training=True
self.data_offset=0
self.input_size=self.all_input.shape[1]
print('num train samples: {}'.format(len(self.train_data_indexes)))
print('train index: {} ...'.format(self.train_data_indexes[:10]))
print('num test samples: {}'.format(len(self.test_data_indexes)))
print('test index: {} ...'.format(self.test_data_indexes[:10]))
print('num classes: {}'.format(self.output_size))
print('num steps per episode: {}'.format(self.num_step))
print('batch size: {}'.format(self.batch_size))
def fetch_all(self):
train_x=[]
train_y=[]
test_x=[]
test_y=[]
for ind in self.train_data_indexes:
train_x.append(self.all_input[ind])
train_y.append(self.all_output[ind])
for ind in self.test_data_indexes:
test_x.append(self.all_input[ind])
test_y.append(self.all_output[ind])
return np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
def fetch_batch(self, is_training=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
indexes = np.zeros((self.batch_size, self.num_step), dtype=np.int32)
for i in range(self.batch_size):
indexes[i, :] = np.random.choice(len(list_index), self.num_step, replace=False)
# print('-------------')
# print(indexes[:10])
all_inputs=[]
all_outputs=[]
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
def fetch_batch_full(self, is_training, is_rand=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
num_t = len(list_index)
indexes = np.zeros((self.batch_size, num_t),dtype=np.int32)
for i in range(self.batch_size):
if is_rand:
indexes[i, :] = np.random.choice(len(list_index), num_t, replace=False)
else:
indexes[i, :] = np.asarray(list(range(len(list_index))))
# indexes = np.zeros((self.batch_size, num_t), dtype=np.int32)
# for i in range(self.batch_size):
# indexes[i,:]=np.arange(num_t)
all_inputs=[]
all_outputs=[]
for s in range(num_t):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
# indexes just have shape (batch,)
def predict_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
def predict_online_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
| mit | 2,495,038,114,839,712,000 | 38.609272 | 110 | 0.585521 | false |
boppreh/keyboard | setup.py | 1 | 1333 | """
Usage instructions:
- If you are installing: `python setup.py install`
- If you are developing: `python setup.py sdist --format=zip bdist_wheel --universal bdist_wininst && twine check dist/*`
"""
import keyboard
from setuptools import setup
setup(
name='keyboard',
version=keyboard.version,
author='BoppreH',
author_email='[email protected]',
packages=['keyboard'],
url='https://github.com/boppreh/keyboard',
license='MIT',
description='Hook and simulate keyboard events on Windows and Linux',
keywords = 'keyboard hook simulate hotkey',
# Wheel creation breaks with Windows newlines.
# https://github.com/pypa/setuptools/issues/1126
long_description=keyboard.__doc__.replace('\r\n', '\n'),
long_description_content_type='text/markdown',
install_requires=["pyobjc; sys_platform=='darwin'"], # OSX-specific dependency
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| mit | 5,545,354,428,995,627,000 | 34.078947 | 121 | 0.651913 | false |
2Minutes/davos-dev | davos/core/utils.py | 1 | 7692 |
import re
import sys
import os
import os.path as osp
from fnmatch import fnmatch
from pytd.gui.dialogs import promptDialog
from pytd.util.logutils import logMsg
from pytd.util.sysutils import importModule, toStr, inDevMode, getCaller
from pytd.util.fsutils import pathSplitDirs, pathResolve, pathNorm, pathJoin
from pytd.util.fsutils import jsonRead, jsonWrite, isDirStat, parseDirContent
from pytd.util.strutils import padded
_VERS_SPLIT_REXP = re.compile(r'-(v[0-9]+)')
def getConfigModule(sProjectName):
try:
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
sConfigModule = sConfPkg + '.' + sProjectName
modobj = importModule(sConfigModule)
except ImportError:
raise ImportError("No config module named '{}'".format(sConfigModule))
return modobj
def versionFromName(sFileName):
vers = _VERS_SPLIT_REXP.findall(sFileName)
return int(vers[-1].strip('v')) if vers else None
def mkVersionSuffix(v):
if not isinstance(v, int):
raise TypeError("argument must be of type <int>. Got {}.".format(type(v)))
return "".join(('-v', padded(v)))
def findVersionFields(s):
return _VERS_SPLIT_REXP.findall(s)
def promptForComment(**kwargs):
sComment = ""
bOk = False
result = promptDialog(title='Please...',
message='Leave a comment: ',
button=['OK', 'Cancel'],
defaultButton='OK',
cancelButton='Cancel',
dismissString='Cancel',
scrollableField=True,
**kwargs)
if result == 'Cancel':
logMsg("Cancelled !" , warning=True)
elif result == 'OK':
sComment = promptDialog(query=True, text=True)
bOk = True
return sComment, bOk
def projectNameFromPath(p):
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
pkg = importModule(sConfPkg)
sPkgDirPath = os.path.dirname(pkg.__file__)
sDirList = pathSplitDirs(p)
for sFilename in os.listdir(sPkgDirPath):
bIgnored = False
for sPatrn in ("__*", ".*", "*.pyc"):
if fnmatch(sFilename, sPatrn):
bIgnored = True
break
if bIgnored:
continue
sModName = os.path.splitext(sFilename)[0]
m = importModule(sConfPkg + '.' + sModName)
sProjDir = m.project.dir_name
if sProjDir in sDirList:
return sModName
return ""
def splitStep(sTaskName):
return sTaskName.rsplit("|", 1) if ("|" in sTaskName) else ("", sTaskName)
def damasServerPort():
return os.getenv("DAMAS_DEV_PORT", "8443") if inDevMode() else "8443"
def loadPrefs():
global DAVOS_PREFS
try:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
DAVOS_PREFS = jsonRead(p)
except EnvironmentError:
DAVOS_PREFS = {}
return DAVOS_PREFS
def savePrefs():
global DAVOS_PREFS
if DAVOS_PREFS:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
jsonWrite(p, DAVOS_PREFS)
def setPref(in_sKey, value):
global DAVOS_PREFS
if "|" not in in_sKey:
DAVOS_PREFS[in_sKey] = value
return
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
sPrevKey = ""
prevPrefs = None
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
prevPrefs[sPrevKey] = {}
currPrefs = prevPrefs[sPrevKey]
if i == iLastKey:
currPrefs[sKey] = value
return
if sKey not in currPrefs:
currPrefs[sKey] = {}
prevPrefs = currPrefs
sPrevKey = sKey
currPrefs = currPrefs[sKey]
def getPref(in_sKey, default=None):
global DAVOS_PREFS
if "|" not in in_sKey:
return DAVOS_PREFS.get(in_sKey, default)
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
k = "|".join(sKeyList[:(i + 1)])
logMsg("Not a pref dictionary: '{}'.".format(k), warning=True)
return default
if i == iLastKey:
return currPrefs.get(sKey, default)
if sKey in currPrefs:
currPrefs = currPrefs[sKey]
else:
logMsg("No such pref: '{}'.".format(in_sKey), warning=True)
return default
_ICON_DIR_PATH = ""
def mkIconPath(sRelPath):
global _ICON_DIR_PATH
if (not _ICON_DIR_PATH) or (not osp.exists(_ICON_DIR_PATH)):
p = sys.modules["davos"].__file__
p = osp.abspath(osp.join(osp.dirname(p), "..", "resources", "icon"))
_ICON_DIR_PATH = p
return pathJoin(_ICON_DIR_PATH, sRelPath)
def writePackContent(sPackDirPath, dirStat=None):
sPackDirPath = pathNorm(sPackDirPath)
if not dirStat:
dirStat = os.stat(sPackDirPath)
sJsonPath = mkPackFilePath(sPackDirPath)
iMtime = 0
if not osp.exists(sJsonPath):
iMtime = dirStat.st_mtime
iAtime = dirStat.st_atime
try:
open(sJsonPath, 'a+b').close() # create json file so it is listed by parseDirContent()
dirContent = parseDirContent(sPackDirPath)
jsonWrite(sJsonPath, dirContent, sort_keys=True)
finally:
if iMtime:
os.utime(sPackDirPath, (iAtime, iMtime))
return dirContent
def readPackContent(sPackDirPath, fail=True):
try:
dirContent = jsonRead(mkPackFilePath(sPackDirPath))
except EnvironmentError as e:
if fail:
raise
logMsg(toStr(e), warning=True)
dirContent = parseDirContent(sPackDirPath)
return dirContent
def mkPackFilePath(sPackDirPath):
return pathJoin(sPackDirPath, "_package.json")
_ISPACK_REXP = re.compile(r".+_pkg[^/\w].+", re.I)
def assertPack(p, dirStat=None):
if not dirStat:
dirStat = os.stat(pathNorm(p))
if isPack(p, fail=True, dirStat=dirStat):
return dirStat
return None
def belowPack(p):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
return True if _belowPack(p) else _belowOldPack(p)
else:
return _belowPack(p)
def isPack(p, fail=False, dirStat=None):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
bPackPath = True if _isPack(p) else _isOldPack(p)
else:
bPackPath = _isPack(p)
if not bPackPath:
if fail:
sMsg = ("Directory NOT a package (should start with 'pkg_' or 'lyr_'): '{}'."
.format(osp.basename(p)))
raise EnvironmentError(sMsg)
else:
return False
if dirStat and not isDirStat(dirStat):
if fail:
raise EnvironmentError("Package path NOT a directory: '{}'".format(p))
else:
return False
return True
def _belowPack(p):
p = osp.dirname(p)
for sDirName in pathSplitDirs(p):
if _isPack(sDirName):
return True
return False
def _isPack(p):
sBaseName = osp.basename(p) if "/" in p else p
if "_" not in sBaseName:
return False
sPrefix = sBaseName.split("_", 1)[0]
if not sPrefix:
return False
return (sPrefix.lower() + "_") in ("pkg_", "lyr_")
def _belowOldPack(p):
p = osp.dirname(p)
if "_pkg/" in p.lower():
return True
if _ISPACK_REXP.match(p):
return True
return False
def _isOldPack(p):
sName = osp.basename(p)
if sName.lower().endswith("_pkg"):
return True
if _ISPACK_REXP.match(sName):
return True
return False
| gpl-3.0 | -7,329,362,235,480,504,000 | 23.341772 | 94 | 0.597634 | false |
solarsail/aerosol-tools | clustatlib/clucsv.py | 1 | 3752 | import numpy as np
import os
import os.path
class csvbuilder:
def __init__(self, cs):
self.cs = cs
if not os.path.isdir('csv'):
os.mkdir('csv')
def month_type_csv(self, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.month_type_stat(site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "month," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(12):
content.append("%d,%s" % (i+1, ','.join([str(field) for field in mat[i]])))
content = '\n'.join(content)
with open("csv/month_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def year_type_csv(self, start_year, end_year, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.year_type_stat(start_year, end_year, site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "year," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(start_year, end_year+1):
content.append("%d,%s" % (i, ','.join([str(field) for field in mat[i-start_year]])))
content = '\n'.join(content)
with open("csv/year_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_csv(self):
header = "type,count,percentage%"
all = self.cs.type_stat()
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def site_type_csv(self):
all, types = self.cs.site_type_stat()
header = ",".join(["type{},%".format(t) for t in range(1, types+1)])
header = "site," + header
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/site_type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_stat_csv(self):
header = "type,refr440,refr675,refr870,refr1020,refi440,refi675,refi870,refi1020,volmedianradf,stddevf,volconf,volmedianradc,stddevc,volconc,ssa675,ssa870,ssa1020,asy440,asy675,asy870,sphericity"
list1 = self.cs.type_means()
list2 = self.cs.type_stddev()
l = []
for i in range(len(list1)):
l.append(list1[i])
stddevline = list(list2[i])
stddevline[0] = "stddev"
l.append(stddevline)
content = '\n'.join([','.join([str(field) for field in row]) for row in l])
with open("csv/type_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def distances_csv(self):
clus, dist_mat = self.cs.all_distances()
header = "," + ",".join([str(cid) for cid in clus])
lines = []
first = 1
cur = 0
for clu in clus:
lines.append(str(clu) + ',' * first + ','.join(str(d) for d in dist_mat[cur:cur+len(clus)-first+1]))
cur += len(clus) - first + 1
first += 1
content = '\n'.join(lines)
with open("csv/distance_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content))) | gpl-3.0 | -5,397,643,248,379,671,000 | 41.647727 | 203 | 0.539179 | false |
HazyResearch/metal | metal/logging/writer.py | 1 | 4223 | import copy
import json
import os
from collections import defaultdict
from subprocess import check_output
from time import strftime
from metal.utils import recursive_transform
class LogWriter(object):
"""Class for writing simple JSON logs at end of runs, with interface for
storing per-iter data as well.
Config contains:
log_dir: (str) The path to the base log directory, or defaults to
current working directory.
run_dir: (str) The name of the sub-directory, or defaults to the date,
strftime("%Y_%m_%d").
run_name: (str) The name of the run + the time, or defaults to the time,
strftime("%H_%M_%S).
writer_metrics: (list) An optional whitelist of metrics to write,
ignoring all others. (If None, write all available metrics).
Log is saved to 'log_dir/run_dir/{run_name}_H_M_S.json'
"""
def __init__(
self,
log_dir=None,
run_dir=None,
run_name=None,
writer_metrics=[],
verbose=True,
**kwargs,
):
start_date = strftime("%Y_%m_%d")
start_time = strftime("%H_%M_%S")
# Set logging subdirectory + make sure exists
log_dir = log_dir or os.getcwd()
run_dir = run_dir or start_date
if run_name is not None:
run_name = f"{run_name}_{start_time}"
else:
run_name = start_time
self.log_subdir = os.path.join(log_dir, run_dir, run_name)
if not os.path.exists(self.log_subdir):
os.makedirs(self.log_subdir)
# Save other settings
self.writer_metrics = writer_metrics
self.verbose = verbose
# Initialize log
# Note we have a separate section for during-run metrics
commit = check_output(["git", "rev-parse", "--short", "HEAD"]).strip()
self.log_dict = {
"start_date": start_date,
"start_time": start_time,
"commit": str(commit),
"config": None,
"run_log": defaultdict(list),
}
def add_scalar(self, name, val, i):
# Note: Does not handle deduplication of (name, val) entries w same i
if not self.writer_metrics or name in self.write_metrics:
if val is not None:
val = float(val)
self.log_dict["run_log"][name].append((i, val))
return True
else:
return False
def write(self, config=None, metrics=None):
self.write_run_log()
if config is not None:
self.write_config(config)
if metrics is not None:
self.write_metrics(metrics)
def write_log(self):
"""Dump log output to file"""
log_path = os.path.join(self.log_subdir, "log.json")
if self.verbose:
print(f"Writing log to {log_path}")
with open(log_path, "w") as f:
json.dump(self.log_dict, f, indent=1)
def write_config(self, config, config_name="config"):
"""Dump config dict to file"""
config_path = os.path.join(self.log_subdir, f"{config_name}.json")
if self.verbose:
print(f"Writing config to {config_path}")
with open(config_path, "w") as f:
config = self._sanitize_config(config)
json.dump(config, f, indent=1)
def write_metrics(self, metrics):
metrics_path = os.path.join(self.log_subdir, "metrics.json")
if self.verbose:
print(f"Writing metrics to {metrics_path}")
with open(metrics_path, "w") as f:
json.dump(metrics, f, indent=1)
def close(self):
pass
def _sanitize_config(self, config):
config = copy.deepcopy(config)
# Replace individual functions
is_func = lambda x: callable(x)
replace_with_name = lambda f: str(f)
config = recursive_transform(config, is_func, replace_with_name)
# Replace lists of functions
is_func_list = lambda x: isinstance(x, list) and all(is_func(f) for f in x)
replace_with_names = lambda x: [replace_with_name(f) for f in x]
config = recursive_transform(config, is_func_list, replace_with_names)
return config
| apache-2.0 | -3,689,918,546,375,237,000 | 34.191667 | 83 | 0.582998 | false |
hawkeyexp/plugin.video.netflix | resources/lib/services/nfsession/session/base.py | 1 | 2055 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT
Initialize the netflix session
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.globals import G
from resources.lib.utils.logging import LOG
class SessionBase(object):
"""Initialize the netflix session"""
session = None
"""The requests.session object to handle communication to Netflix"""
verify_ssl = True
"""Use SSL verification when performing requests"""
# Functions from derived classes to allow perform particular operations in parent classes
external_func_activate_profile = None # (set by nfsession_op.py)
def __init__(self):
self.verify_ssl = bool(G.ADDON.getSettingBool('ssl_verification'))
self._init_session()
def _init_session(self):
"""Initialize the session to use for all future connections"""
try:
self.session.close()
LOG.info('Session closed')
except AttributeError:
pass
from requests import session
self.session = session()
self.session.max_redirects = 10 # Too much redirects should means some problem
self.session.headers.update({
'User-Agent': common.get_user_agent(enable_android_mediaflag_fix=True),
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'www.netflix.com'
})
LOG.info('Initialized new session')
@property
def auth_url(self):
"""Access rights to make HTTP requests on an endpoint"""
return G.LOCAL_DB.get_value('auth_url', table=TABLE_SESSION)
@auth_url.setter
def auth_url(self, value):
G.LOCAL_DB.set_value('auth_url', value, TABLE_SESSION)
| mit | 8,894,260,233,918,521,000 | 33.830508 | 93 | 0.66618 | false |
playerNaN/NaNPyGameEngine | engine.py | 1 | 5921 | import pygame
import sys
import os
from collections import namedtuple
import time
import resourcemanager
ColorList = namedtuple("ColorList", "black white red green blue")
colors = ColorList((0,0,0),(0xFF,0xFF,0xFF),(0xFF,0,0),(0,0xFF,0),(0,0,0xFF))
PyListener = namedtuple("PyListener", "condition effect")
PyEventListener = namedtuple("PyEventListener","events condition effect")
class Pyengine:
def __init__(self,size):
pygame.init()
self.__size = size
self.__fps = 60
self.__bg = colors.white
self.__fg = colors.black
self.__on_update = []
self.__on_draw = []
self.__keys_down = {}
self.__listeners = []
self.__event_handlers = []
self.__mouse_down = {}
self.__display = None
self.__screen_centerX = size[0]/2
self.__scaleX = 1.0
self.__scaleY = 1.0
self.__screen_centerY = size[1]/2
self.__clock = pygame.time.Clock()
self.__buffer_surface = None
self.__resource_manager = resourcemanager.ResourceManager()
self.__animators = {}
def add_animator(self,name,animator):
self.__animators[name] = animator
def remove_animator(self,name):
del self.__animators[name]
def get_animator(self,name):
return self.__animators[name]
def set_scale_x(self,x):
self.__scaleX = x
def get_scale_x(self):
return self.__scaleX
def set_scale_y(self,y):
self.__scaleY = y
def get_scale_y(self):
return self.__scaleY
def set_scale(self,s):
self.__scaleX = s[0]
self.__scaleY = s[1]
def get_scale(self):
return (self.__scaleX,self.__scaleY)
def set_fg(self,fg):
self.__fg = fg
def get_fg(self):
return self.__fg
def set_bg(self,bg):
self.__bg = bg
def get_bg(self):
return self.__bg
def get_display(self):
return self.__display()
def set_screen_center_x(self,x):
self.__screen_centerX = x
def get_screen_center_x(self):
return self.__screen_centerX
def set_screen_center_y(self,y):
self.__screen_centerY = y
def get_screen_center_y(self):
return self.__screen_centerY
def set_screen_center(self,pos):
self.__screen_centerX = pos[0]
self.__screen_centerY = pos[1]
def get_screen_center(self):
return (self.__screen_centerX,self.__screen_centerY)
def get_buffer_surface(self):
return self.__buffer_surface
def get_resource_manager(self):
return self.__resource_manager
def update_all_animators(self):
ms = self.__clock.get_time()
for i in self.__animators:
self.__animators[i].update(ms)
def draw_all_animators(self):
for i in self.__animators:
self.draw_image(self.__animators[i].get_current_image(),self.__animators[i].get_position())
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.exit()
elif event.type == pygame.KEYDOWN:
self.__keys_down[event.key] = True
elif event.type == pygame.KEYUP:
self.__keys_down[event.key] = False
elif event.type == pygame.MOUSEBUTTONDOWN:
self.__mouse_down = True
elif event.type == pygame.MOUSEBUTTONUP:
self.__mouse_down = False
for handler in self.__event_handlers:
if event.type in handler.events and handler.condition(self,event):
handler.effect(self,event)
def draw_image(self,name,pos):
self.__buffer_surface.blit(self.__resource_manager.get_image(name),pos)
def is_key_down(self,key):
if not key in self.__keys_down:
return False
return self.__keys_down[key]
def is_mouse_button_down(self,button):
if not button in self.__mouse_down:
return False
return self.__mouse_down[button]
def run(self):
screen = pygame.display.set_mode(self.__size)
self.__display = screen
oldTime = time.time()
while True:
spf = 1.0 / self.__fps
self.handle_events()
self.update()
self.draw(screen)
self.__clock.tick(self.__fps)
def exit(self):
pygame.display.quit()
pygame.quit()
sys.exit()
def update(self):
self.update_all_animators()
for l in self.__listeners:
if l.condition(self):
l.effect(self)
def draw(self,display):
self.__buffer_surface = pygame.Surface(display.get_size())
display.fill(colors.red)
self.__buffer_surface.fill(self.__bg)
for od in self.__on_draw:
od(self,self.__buffer_surface)
self.draw_all_animators()
src_size = (self.__size[0]/self.__scaleX,self.__size[1]/self.__scaleY)
top = self.__screen_centerY - src_size[1] / 2
left = self.__screen_centerX - src_size[0] / 2
cropped = pygame.Surface(src_size)
cropped.blit(self.__buffer_surface,(0,0),(left,top,src_size[0],src_size[1]))
cropped = pygame.transform.scale(cropped,self.__size)
display.blit(cropped,(0,0))
pygame.display.update((0,0,self.__size[0],self.__size[1]))
def add_draw_listener(self,f):
self.__on_draw.append(f)
def add_listener(self,condition,effect):
self.__listeners.append(PyListener(condition,effect))
def add_on_update(self,effect):
self.__add_listener(lambda s:True,effect)
def add_event_listener(self,events,condition,effect):
self.__event_handlers.append(PyEventListener(events,condition,effect))
def set_fps(self,fps):
self.__fps = fps
def get_fps(self):
return self.__fps
| unlicense | -5,850,025,484,649,098,000 | 32.647727 | 103 | 0.575578 | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py | 1 | 21363 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineScaleSetRollingUpgradesOperations:
"""VirtualMachineScaleSetRollingUpgradesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _cancel_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
async def begin_cancel(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Cancels the current virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
async def _start_os_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._start_os_upgrade_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
async def begin_start_os_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts a rolling upgrade to move all virtual machine scale set instances to the latest
available Platform Image OS version. Instances which are already running the latest available
OS version are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
async def _start_extension_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._start_extension_upgrade_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_extension_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore
async def begin_start_extension_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to
the latest available extension version. Instances which are already running the latest
extension versions are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_extension_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_extension_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore
async def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
"""Gets the status of the latest virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RollingUpgradeStatusInfo, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.RollingUpgradeStatusInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get_latest.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest'} # type: ignore
| mit | 8,239,307,016,554,352,000 | 49.503546 | 234 | 0.654356 | false |
muminoff/savollar | savollar/pipelines.py | 1 | 2093 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don"t forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log
from elasticsearch import Elasticsearch
from uuid import uuid1
from savollar.models import SavolModel
class ElasticSearchIndexPipeline(object):
def process_item(self, item, spider):
es = Elasticsearch([
{"host": settings["ELASTICSEARCH_HOST"]},
])
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
es.index(
index=settings["ELASTICSEARCH_INDEX"],
doc_type="info",
id=str(uuid1()),
body=dict(item)
)
log.msg("Item indexed to ElasticSearch database %s:%s" %
(settings["ELASTICSEARCH_HOST"], settings["ELASTICSEARCH_PORT"]),
level=log.DEBUG, spider=spider)
return item
class CassandraExportPipleline(object):
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
model = SavolModel()
model.title = item["title"]
model.question = item["question"]
model.answer = item["answer"]
model.author = item["author"]
model.permalink = item["permalink"]
model.year = int(item["year"])
model.month = int(item["month"])
model.date = int(item["date"])
model.tags = item["title"].split()
model.save()
log.msg("Item exported to Cassandra database %s/%s" %
(settings["CASSANDRA_HOST"], settings["CASSANDRA_KEYSPACE"]),
level=log.DEBUG, spider=spider)
return item
| apache-2.0 | 5,381,455,535,540,653,000 | 33.883333 | 85 | 0.565695 | false |
shanot/imp | modules/rmf/examples/link.py | 2 | 1236 | ## \example rmf/link.py
# This example is like module/rmf/pdb.py except that instead of creating a
# new hierarchy from the rmf file, it simply links the existing hierarchy
# to the file. This mechanism can be used for loading multiple
# conformations for scoring or other analysis without having to set up
# restraints and things each time.
from __future__ import print_function
import IMP.atom
import IMP.rmf
import RMF
import sys
IMP.setup_from_argv(sys.argv, "link")
m = IMP.Model()
# Create a new IMP.atom.Hierarchy from the contents of the pdb file
h = IMP.atom.read_pdb(IMP.rmf.get_example_path("simple.pdb"), m)
tfn = "link.rmf"
print("File name is", tfn)
# open the file, clearing any existing contents
rh = RMF.create_rmf_file(tfn)
# add the hierarchy to the file
IMP.rmf.add_hierarchies(rh, [h])
# add the current configuration to the file as frame 0
IMP.rmf.save_frame(rh)
# close the file
del rh
# reopen it, don't clear the file when opening it
rh = RMF.open_rmf_file_read_only(tfn)
# link to the existing pdb hierarchy
IMP.rmf.link_hierarchies(rh, [h])
# load the same coordinates in, ok, that is not very exciting
IMP.rmf.load_frame(rh, RMF.FrameID(0))
print("Try running rmf_display or rmf_show on", tfn)
| gpl-3.0 | 2,193,594,142,939,475,700 | 25.869565 | 74 | 0.7411 | false |
karesansui/karesansui | bin/restart_network.py | 1 | 4392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException
from karesansui.lib.const import NETWORK_IFCONFIG_COMMAND, NETWORK_BRCTL_COMMAND
from karesansui.lib.utils import load_locale
from karesansui.lib.utils import execute_command
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-n', '--name', dest='name', help=_('Network name'))
optp.add_option('-f', '--force', dest='force', action="store_true", help=_('Do everything to bring up network'))
return optp.parse_args()
def chkopts(opts):
if not opts.name:
raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name')
class RestartNetwork(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
conn = KaresansuiVirtConnection(readonly=False)
try:
active_networks = conn.list_active_network()
inactive_networks = conn.list_inactive_network()
if not (opts.name in active_networks or opts.name in inactive_networks):
raise KssCommandException('Could not find the specified network. - net=%s' % (opts.name))
self.up_progress(10)
try:
conn.stop_network(opts.name)
except KaresansuiVirtException, e:
if opt.force is not True:
raise KssCommandException('Could not stop the specified network. - net=%s' % (opts.name))
self.up_progress(20)
try:
conn.start_network(opts.name)
except KaresansuiVirtException, e:
if opts.force is not True:
raise KssCommandException('Could not start the specified network. - net=%s' % (opts.name))
# try to bring down existing bridge
kvn = conn.search_kvn_networks(opts.name)[0]
try:
bridge_name = kvn.get_info()['bridge']['name']
except KeyError:
pass
ret, res = execute_command([NETWORK_IFCONFIG_COMMAND, bridge_name, 'down'])
ret, res = execute_command([NETWORK_BRCTL_COMMAND, 'delbr', bridge_name])
# try again
conn.start_network(opts.name)
self.up_progress(10)
if not (opts.name in conn.list_active_network()):
raise KssCommandException('Failed to start network. - net=%s' % (opts.name))
self.logger.info('Restarted network. - net=%s' % (opts.name))
print >>sys.stdout, _('Restarted network. - net=%s') % (opts.name)
return True
finally:
conn.close()
if __name__ == "__main__":
target = RestartNetwork()
sys.exit(target.run())
| mit | 2,224,646,137,374,195,000 | 36.538462 | 116 | 0.645492 | false |
qnzhou/ThingiverseCrawler | thingiverse_crawler.py | 1 | 9320 | #!//usr/bin/env python
import argparse
import datetime
import os
import os.path
import requests
import re
import time
import urllib
import urlparse
from subprocess import check_call
def utc_mktime(utc_tuple):
"""Returns number of seconds elapsed since epoch
Note that no timezone are taken into consideration.
utc tuple must be: (year, month, day, hour, minute, second)
"""
if len(utc_tuple) == 6:
utc_tuple += (0, 0, 0)
return time.mktime(utc_tuple) - time.mktime((1970, 1, 1, 0, 0, 0, 0, 0, 0))
def datetime_to_timestamp(dt):
"""Converts a datetime object to UTC timestamp"""
return int(utc_mktime(dt.timetuple()))
def parse_thing_ids(text):
pattern = "thing:(\d{5,7})"
matched = re.findall(pattern, text)
return [int(val) for val in matched]
def parse_file_ids(text):
pattern = "download:(\d{5,7})"
matched = re.findall(pattern, text)
return [int(val) for val in matched]
known_licenses = [
("Creative Commons - Attribution",
re.compile("http://creativecommons.org/licenses/by/\d(.\d)?/")),
("Creative Commons - Attribution - Share Alike",
re.compile("http://creativecommons.org/licenses/by-sa/\d(.\d)?/")),
("Creative Commons - Attribution - No Derivatives",
re.compile("http://creativecommons.org/licenses/by-nd/\d(.\d)?/")),
("Creative Commons - Attribution - Non-Commercial",
re.compile("http://creativecommons.org/licenses/by-nc/\d(.\d)?/")),
("Attribution - Non-Commercial - Share Alike",
re.compile("http://creativecommons.org/licenses/by-nc-sa/\d(.\d)?/")),
("Attribution - Non-Commercial - No Derivatives",
re.compile("http://creativecommons.org/licenses/by-nc-nd/\d(.\d)?/")),
("Creative Commons - Public Domain Dedication",
re.compile("http://creativecommons.org/publicdomain/zero/\d(.\d)?/")),
("GNU - GPL",
re.compile("http://creativecommons.org/licenses/GPL/\d(.\d)?/")),
("GNU - LGPL",
re.compile("http://creativecommons.org/licenses/LGPL/\d(.\d)?/")),
("BSD License",
re.compile("http://creativecommons.org/licenses/BSD/")),
("Nokia",
re.compile("http://www.developer.nokia.com/Terms_and_conditions/3d-printing.xhtml")),
("Public Domain",
re.compile("http://creativecommons.org/licenses/publicdomain/")),
]
def parse_license(text):
for name, pattern in known_licenses:
if pattern.search(text):
return name
return "unknown_license"
def crawl_thing_ids(N, end_date=None):
""" This method extract N things that were uploaded to thingiverse.com
before end_date. If end_date is None, use today's date.
"""
baseurl = "http://www.thingiverse.com/search/recent/things/page:{}?q=&start_date=&stop_date={}&search_mode=advanced&description=&username=&tags=&license="
end_date = datetime_to_timestamp(end_date)
thing_ids = set()
for i in range(N/12 + 1):
url = baseurl.format(i, end_date)
r = requests.get(url)
assert(r.status_code==200)
thing_ids.update(parse_thing_ids(r.text))
if len(thing_ids) > N:
break
# Sleep a bit to avoid being mistaken as DoS.
time.sleep(0.5)
return thing_ids
def crawl_things(N, output_dir, term=None, category=None, source=None, organize=False):
#baseurl = "http://www.thingiverse.com/newest/page:{}"
#baseurl = "http://www.thingiverse.com/explore/popular/page:{}"
key = None
if term is None:
assert(source is not None);
url_prefix= "http://www.thingiverse.com/explore/{}/".format(source);
if category is None:
baseurl = url_prefix + "page:{}"
else:
baseurl = url_prefix + urllib.quote_plus(category) + "/page:{}"
key = category
else:
baseurl = "http://www.thingiverse.com/search/page:{}?type=things&q=" + urllib.quote_plus(term)
key = term
thing_ids = set()
file_ids = set()
records = []
num_files = 0
page = 0
previous_path = ''
while True:
url = baseurl.format(page+1)
contents = get_url(url)
page += 1
# If the previous url ends up being the same as the old one, we should stop as there are no more pages
current_path = urlparse.urlparse(contents.url).path
if previous_path == current_path:
return records
else:
previous_path = current_path
for thing_id in parse_thing_ids(contents.text):
if thing_id in thing_ids:
continue
print("thing id: {}".format(thing_id))
thing_ids.add(thing_id)
license, thing_files = get_thing(thing_id)
for file_id in thing_files:
if file_id in file_ids:
continue
file_ids.add(file_id)
print(" file id: {}".format(file_id))
result = download_file(file_id, thing_id, output_dir, organize)
if result is None: continue
filename, link = result
if filename is not None:
records.append((thing_id, file_id, filename, license, link))
if N is not None and len(records) >= N:
return records
# Sleep a bit to avoid being mistaken as DoS.
time.sleep(0.5)
save_records(records, key)
def get_thing(thing_id):
base_url = "http://www.thingiverse.com/{}:{}"
file_ids = []
url = base_url.format("thing", thing_id)
contents = get_url(url).text
license = parse_license(contents)
return license, parse_file_ids(contents)
def get_url(url, time_out=600):
r = requests.get(url)
sleep_time = 1.0
while r.status_code != 200:
print("sleep {}s".format(sleep_time))
print(url)
time.sleep(sleep_time)
r = requests.get(url)
sleep_time += 2
if (sleep_time > time_out):
# We have sleeped for over 10 minutes, the page probably does
# not exist.
break
if r.status_code != 200:
print("failed to retrieve {}".format(url))
else:
return r
# return r.text
def get_download_link(file_id):
base_url = "https://www.thingiverse.com/{}:{}"
url = base_url.format("download", file_id)
r = requests.head(url)
link = r.headers.get("Location", None)
if link is not None:
__, ext = os.path.splitext(link)
if ext.lower() not in [".stl", ".obj", ".ply", ".off"]:
return None
return link
def download_file(file_id, thing_id, output_dir, organize):
link = get_download_link(file_id)
if link is None:
return None
__, ext = os.path.splitext(link)
output_file = "{}{}".format(file_id, ext.lower())
if organize:
output_file = os.path.join(str(thing_id), output_file)
output_file = os.path.join(output_dir, output_file)
command = "wget -q --tries=20 --waitretry 20 -O {} {}".format(output_file, link)
#check_call(command.split())
return output_file, link
def save_records(records, key=None):
# Enforce kebab case file name
output_name = re.sub('(\w) (\w)', r'\1-\2', key).lower()+"-" if key else ""
output_name += "summary"
with open(output_name+".csv", 'w') as fout:
fout.write("thing_id, file_id, file, license, link\n")
for entry in records:
fout.write(",".join([str(val) for val in entry]) + "\n")
def parse_args():
parser = argparse.ArgumentParser(
description="Crawl data from thingiverse",
epilog="Written by Qingnan Zhou <qnzhou at gmail dot com> Modified by Mike Gleason")
parser.add_argument("--output-dir", "-o", help="output directories",
default=".")
parser.add_argument("--number", "-n", type=int,
help="how many files to crawl", default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--search-term", "-s", type=str, default=None,
help="term to search for")
group.add_argument("--category", "-c", type=str, default=None,
help="catergory to search for")
parser.add_argument('--organize', dest='organized', default=False, action='store_true',
help="organize files by their main category")
parser.add_argument("--source", choices=("newest", "featured", "popular",
"verified", "made-things", "derivatives", "customizable",
"random-things", "firehose"), default="featured");
return parser
def main():
parser = parse_args()
args = parser.parse_args()
if args.number is None and (args.search_term is None and args.category is None):
parser.error('Number or Search/Category Term required')
output_dir = args.output_dir
number = args.number
records = crawl_things(
args.number,
output_dir,
args.search_term,
args.category,
args.source,
args.organized)
if args.search_term:
save_records(records, args.search_term)
elif args.category:
save_records(records, args.category)
else:
save_records(records)
if __name__ == "__main__":
main()
| mit | -6,989,201,447,378,768,000 | 33.64684 | 158 | 0.593777 | false |
PyBossa/pybossa | pybossa/model/counter.py | 2 | 1787 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2017 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import Integer
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.dialects.postgresql import TIMESTAMP
from pybossa.core import db
from pybossa.model import DomainObject, make_timestamp
class Counter(db.Model, DomainObject):
'''A Counter lists the number of task runs for a given Task.'''
__tablename__ = 'counter'
#: Counter.ID
id = Column(Integer, primary_key=True)
#: UTC timestamp when the counter was created.
created = Column(TIMESTAMP, default=make_timestamp)
#: Project.ID that this counter is associated with.
project_id = Column(Integer, ForeignKey('project.id',
ondelete='CASCADE'),
nullable=False)
#: Task.ID that this counter is associated with.
task_id = Column(Integer, ForeignKey('task.id',
ondelete='CASCADE'),
nullable=False)
#: Number of task_runs for this task.
n_task_runs = Column(Integer, default=0, nullable=False)
| agpl-3.0 | -4,575,830,606,666,470,400 | 39.613636 | 77 | 0.684947 | false |
marbu/pylatest | tests/xdocutils/test_utils.py | 1 | 6663 | # -*- coding: utf8 -*-
"""
Tests of helper functions from pylatest.xdocutils.utils module.
"""
# Copyright (C) 2018 Martin Bukatovič <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import textwrap
from docutils.core import publish_doctree
import pytest
from pylatest.xdocutils.core import pylatest_publish_parts
from pylatest.xdocutils.readers import NoDocInfoReader
from pylatest.xdocutils.utils import get_field_list
from pylatest.xdocutils.utils import get_testcase_id
from pylatest.xdocutils.utils import get_testcase_requirements
def _publish(source):
"""
Parse rst source string into doctree.
"""
doctree = publish_doctree(
source=source,
reader=NoDocInfoReader(),
parser_name='restructuredtext',)
return doctree
def test_get_field_list_null(empty_doctree):
assert get_field_list(empty_doctree) == None
def test_get_field_list_missing():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
There is no field list.
Description
===========
Nothing here as well.
'''))
assert get_field_list(doctree) == None
def test_get_field_list_present():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:id: FOO-122
:author: [email protected]
:component: foo
'''))
assert get_field_list(doctree) is not None
def test_get_testcase_id_null(empty_doctree):
assert get_testcase_id(empty_doctree) == None
def test_get_testcase_id():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:id: FOO-122
:author: [email protected]
:component: foo
'''))
assert get_testcase_id(doctree) == "FOO-122"
#
# requirements
#
def test_get_testcase_requirements_null(empty_doctree):
assert get_testcase_requirements(empty_doctree) == []
REQUIREMENT_FIELD_NAMES = ["requirement", "requirements"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_single(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}: FOO-212
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-212"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_single_empty(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
'''.format(field_name)))
assert get_testcase_requirements(doctree) == []
def test_get_testcase_requirements_many():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:requirement: FOO-212
:requirement: FOO-232
:component: foo
'''))
assert get_testcase_requirements(doctree) == ["FOO-212", "FOO-232"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_single(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
- FOO-212
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-212"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_many(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
- FOO-212
- FOO-232
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-212", "FOO-232"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_many_someemptyitems(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
-
- FOO-132
-
:requirement: FOO-130
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-132", "FOO-130"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_many_onlyemptyitems(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
-
-
'''.format(field_name)))
assert get_testcase_requirements(doctree) == []
def test_get_testcase_requirements_many_list_many():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:requirement: FOO-012
:requirement: FOO-032
:requirements:
- FOO-212
- FOO-232
'''))
assert get_testcase_requirements(doctree) == [
"FOO-012", "FOO-032", "FOO-212", "FOO-232"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_single_url_link(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:requirement: https://example.com
'''.format(field_name)))
results = get_testcase_requirements(doctree)
assert len(results) == 1
# check that we get actual rst node for a link (reference node)
assert results[0].tagname == "reference"
assert results[0].astext() == "https://example.com"
def test_get_testcase_requirements_many_list_url_link():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:requirements:
- https://example.com/foo
- https://example.com/bar
'''))
results = get_testcase_requirements(doctree)
assert len(results) == 2
# check that we get actual rst node for a link (reference node)
assert results[0].tagname == "reference"
assert results[1].tagname == "reference"
# and expected content
assert results[0].astext() == "https://example.com/foo"
assert results[1].astext() == "https://example.com/bar"
| gpl-3.0 | 7,024,286,174,276,592,000 | 25.12549 | 72 | 0.655509 | false |
gogogo/gogogo-hk | gogogo/models/property.py | 1 | 3233 | from google.appengine.ext import db
from django import forms
from django.utils.translation import ugettext_lazy as _
class TransitTypeProperty(db.IntegerProperty):
"""
Transit Type Property - Storage of transit type
"""
def __init__ (self,*args,**kwargs):
kwargs["choices"] = range(0,8)
db.IntegerProperty.__init__(self,*args,**kwargs)
def validate(self, value):
if isinstance(value,basestring):
value = int(value)
return super(TransitTypeProperty, self).validate(value)
def get_form_field(self, **kwargs):
attrs = {
'form_class': forms.ChoiceField,
'choices' : TransitTypeProperty.get_choices()
}
attrs.update(kwargs)
return super(TransitTypeProperty, self).get_form_field(**attrs)
def get_choices():
ret = [ (i,TransitTypeProperty.get_type_name(i)) for i in range(0,8)]
return ret
get_choices = staticmethod(get_choices)
def get_basic_type_name_list():
"""
Return a list of basic type name
"""
ret = [TransitTypeProperty.get_type_name(i) for i in range(0,8)]
return ret
get_basic_type_name_list = staticmethod(get_basic_type_name_list)
def get_type_name(type):
if type == 0:
return _("Tram, Streetcar, Light rail")
elif type == 1:
return _("Subway, Metro") #Any underground rail system within a metropolitan area
elif type == 2:
return _("Rail") #Used for intercity or long-distance travel.
elif type == 3:
return _("Bus")
elif type == 4:
return _("Ferry")
elif type == 5:
return _("Cable car")
elif type == 6:
return _("Gondola, Suspended cable car")
elif type == 7:
return _("Funicular")
else:
return ""
get_type_name = staticmethod(get_type_name)
class PaymentMethodProperty(db.IntegerProperty):
"""
Payment Method
"""
def __init__ (self,*args,**kwargs):
kwargs["choices"] = range(0,2)
if "default" not in kwargs:
kwargs["default"] = 0
db.IntegerProperty.__init__(self,*args,**kwargs)
def validate(self, value):
if isinstance(value,basestring):
value = int(value)
return super(PaymentMethodProperty, self).validate(value)
def get_form_field(self, **kwargs):
attrs = {
'form_class': forms.ChoiceField,
'choices' : PaymentMethodProperty.get_choices()
}
attrs.update(kwargs)
return super(PaymentMethodProperty, self).get_form_field(**attrs)
def get_choices():
ret = [ (i,PaymentMethodProperty.get_type_name(i)) for i in range(0,2)]
return ret
get_choices = staticmethod(get_choices)
def get_type_name(type):
if type == 0:
return _("Fare is paid on board")
elif type == 1:
return _("Fare must be paid before boarding")
get_type_name = staticmethod(get_type_name)
| agpl-3.0 | 2,126,706,793,343,167,200 | 30.086538 | 93 | 0.554903 | false |
bmazin/ARCONS-pipeline | examples/Pal2014-J0337/hTestLimit.py | 1 | 8356 | #Filename: hTestLimit.py
#Author: Matt Strader
#
#This script opens a list of observed photon phases,
import numpy as np
import tables
import numexpr
import matplotlib.pyplot as plt
import multiprocessing
import functools
import time
from kuiper.kuiper import kuiper,kuiper_FPP
from kuiper.htest import h_test,h_fpp,h_test2
from pulsarUtils import nSigma,plotPulseProfile
from histMetrics import kuiperFpp,hTestFpp
from inverseTransformSampling import inverseTransformSampler
def hTestTrial(iTrial,nPhotons,photonPulseFraction,pulseModel,pulseModelQueryPoints):
np.random.seed(int((time.time()+iTrial)*1e6))
modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints)
nPulsePhotons = int(np.floor(photonPulseFraction*nPhotons))
nBackgroundPhotons = int(np.ceil((1.-photonPulseFraction) * nPhotons))
simPulsePhotons = modelSampler(nPulsePhotons)
#background photons come from a uniform distribution
simBackgroundPhotons = np.random.random(nBackgroundPhotons)
simPhases = np.append(simPulsePhotons,simBackgroundPhotons)
simHDict = h_test2(simPhases)
simH,simM,simPval,simFourierCoeffs = simHDict['H'],simHDict['M'],simHDict['fpp'],simHDict['cs']
print '{} - H,M,fpp,sig:'.format(iTrial),simH,simM,simPval
return {'H':simH,'M':simM,'fpp':simPval}
if __name__=='__main__':
path = '/Scratch/dataProcessing/J0337/masterPhotons3.h5'
wvlStart = 4000.
wvlEnd = 5500.
bLoadFromPl = True
nPhaseBins = 20
hTestPath = '/Scratch/dataProcessing/J0337/hTestResults_withProfiles_{}-{}.npz'.format(wvlStart,wvlEnd)
phaseBinEdges = np.linspace(0.,1.,nPhaseBins+1)
if bLoadFromPl:
photFile = tables.openFile(path,'r')
photTable = photFile.root.photons.photTable
phases = photTable.readWhere('(wvlStart < wavelength) & (wavelength < wvlEnd)')['phase']
photFile.close()
print 'cut wavelengths to range ({},{})'.format(wvlStart,wvlEnd)
nPhotons = len(phases)
print nPhotons,'real photons read'
observedProfile,_ = np.histogram(phases,bins=phaseBinEdges)
observedProfile = 1.0*observedProfile
observedProfileErrors = np.sqrt(observedProfile)
#Do H-test
hDict = h_test2(phases)
H,M,pval,fourierCoeffs = hDict['H'],hDict['M'],hDict['fpp'],hDict['cs']
print 'h-test on real data'
print 'H,M,fpp:',H,M,pval
print nSigma(1-pval),'sigmas'
#h_test2 calculates all fourierCoeffs out to 20, but for the fourier model, we only want the ones out to order M, which optimizes the Zm^2 metric
truncatedFourierCoeffs = fourierCoeffs[0:M]
print 'fourier coeffs:',truncatedFourierCoeffs
#for the model, we want the negative modes as well as positve, so add them
modelFourierCoeffs = np.concatenate([truncatedFourierCoeffs[::-1],[1.],np.conj(truncatedFourierCoeffs)])
#make array of mode numbers
modes = np.arange(-len(truncatedFourierCoeffs),len(truncatedFourierCoeffs)+1)
#save so next time we can set bLoadFromPl=False
np.savez(hTestPath,H=H,M=M,pval=pval,fourierCoeffs=fourierCoeffs,nPhotons=nPhotons,wvlRange=(wvlStart,wvlEnd),modelFourierCoeffs=modelFourierCoeffs,modes=modes,observedProfile=observedProfile,observedProfileErrors=observedProfileErrors,phaseBinEdges=phaseBinEdges)
else:
#Load values from previous run, when we had bLoadFromPl=True
hTestDict = np.load(hTestPath)
H,M,pval,fourierCoeffs,nPhotons,modelFourierCoeffs,modes = hTestDict['H'],hTestDict['M'],hTestDict['pval'],hTestDict['fourierCoeffs'],hTestDict['nPhotons'],hTestDict['modelFourierCoeffs'],hTestDict['modes']
observedProfile,observedProfileErrors,phaseBinEdges = hTestDict['observedProfile'],hTestDict['observedProfileErrors'],hTestDict['phaseBinEdges']
print 'h-test on real data'
print 'H,M,fpp:',H,M,pval
print nSigma(1-pval),'sigmas'
#Plot the observed profile
fig,ax = plt.subplots(1,1)
plotPulseProfile(phaseBinEdges,observedProfile,profileErrors=observedProfileErrors,color='k',plotDoublePulse=False,label='observed',ax=ax)
ax.set_ylabel('counts')
ax.set_xlabel('phase')
ax.set_title('Observed Folded Light Curve {}-{} nm'.format(wvlStart/10.,wvlEnd/10.))
#make as set of x points for the pulse model we'll make
#Do NOT include x=0, or the inverted function will have a jump that causes an excess of samples
#at phase=0
nSmoothPlotPoints=1000
pulseModelQueryPoints = np.linspace(1./nSmoothPlotPoints,1,nSmoothPlotPoints)
def modelProfile(thetas):
return np.sum( modelFourierCoeffs * np.exp(2.j*np.pi*modes*thetas[:,np.newaxis]),axis=1)
lightCurveModel = np.abs(modelProfile(pulseModelQueryPoints))
#for this test we only want the model to be the pulsed component. We will add a DC offset later
pulseModel = lightCurveModel - np.min(lightCurveModel)
#initialPhotonPulseFraction = 1.*np.sum(pulseModel) / np.sum(lightCurveModel)
photonPulseFraction=15400./nPhotons #skip to previously determined answer
print 'photon fraction',photonPulseFraction
#get samples with distribution of the modelProfile
#modelSampler = inverseTransformSampler(pdf=lightCurveModel,queryPoints=pulseModelQueryPoints)
modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints)
nTrials = 1
#for each trial run the h test on a set of photon phases with our model profile, and with the pulse fraction specified
#we want to make a distribution of H values for this pulse fraction, model, and number of photons
#make a function that only takes the trial number (as an identifier)
mappableHTestTrial = functools.partial(hTestTrial,pulseModel=pulseModel,
pulseModelQueryPoints=pulseModelQueryPoints,nPhotons=nPhotons,
photonPulseFraction=photonPulseFraction)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-3)#leave a few processors for other people
outDicts = pool.map(mappableHTestTrial,np.arange(nTrials))
simHs = np.array([out['H'] for out in outDicts])
simPvals = np.array([out['fpp'] for out in outDicts])
#save the resulting list of H vals
np.savez('sim3-h-{}.npz'.format(nTrials),simHs=simHs,simPvals=simPvals,pval=pval,H=H,photonPulseFraction=photonPulseFraction,nPhotons=nPhotons)
#make a model profile once more for a plot
modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints)
nPulsePhotons = int(np.floor(photonPulseFraction*nPhotons))
nBackgroundPhotons = int(np.ceil((1.-photonPulseFraction) * nPhotons))
simPulsePhotons = modelSampler(nPulsePhotons)
#background photons come from a uniform distribution
simBackgroundPhotons = np.random.random(nBackgroundPhotons)
#put them together for the full profile
simPhases = np.append(simPulsePhotons,simBackgroundPhotons)
#make a binned phase profile to plot
simProfile,_ = np.histogram(simPhases,bins=phaseBinEdges)
simProfileErrors = np.sqrt(simProfile)#assume Poisson errors
meanLevel = np.mean(simProfile)
fig,ax = plt.subplots(1,1)
ax.plot(pulseModelQueryPoints,meanLevel*lightCurveModel,color='r')
plotPulseProfile(phaseBinEdges,simProfile,profileErrors=simProfileErrors,color='b',plotDoublePulse=False,label='sim',ax=ax)
ax.set_title('Simulated profile')
#
#plt.show()
print '{} trials'.format(len(simHs))
print 'observed fpp:',pval
frac = 1.*np.sum(simPvals<pval)/len(simPvals)
print 'fraction of trials with H below observed fpp:',frac
#hHist,hBinEdges = np.histogram(simHs,bins=100,density=True)
fppHist,fppBinEdges = np.histogram(simPvals,bins=100,density=True)
if nTrials > 1:
fig,ax = plt.subplots(1,1)
ax.plot(fppBinEdges[0:-1],fppHist,drawstyle='steps-post',color='k')
ax.axvline(pval,color='r')
ax.set_xlabel('fpp')
ax.set_ylabel('frequency')
ax.set_title('Distribution of H for model profile')
magG = 17.93
sineMagDiff = -2.5*np.log10(photonPulseFraction)
print 'SDSS magnitude g: {:.2f}'.format(magG)
print 'magnitude difference: {:.2f}'.format(sineMagDiff)
print 'limiting g mag: {:.2f}'.format(magG+sineMagDiff)
plt.show()
| gpl-2.0 | -5,455,971,314,414,658,000 | 44.413043 | 272 | 0.728698 | false |
Saevon/webdnd | shared/utils/debug_toolbars.py | 1 | 1502 | import django
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import DebugPanel
import sys
class VersionDebugPanel(DebugPanel):
'''
Panel that displays the Django version.
'''
name = 'Version'
has_content = True
def nav_title(self):
return _('Versions')
def nav_subtitle(self):
return 'Django %s' % django.get_version()
def url(self):
return ''
def title(self):
return _('Versions')
def content(self):
versions = {}
versions['Web D&D'] = settings.VERSION
versions['Syncrae'] = settings.SYNCRAE_VERSION
context = self.context.copy()
context.update({
'versions': versions,
'paths': sys.path,
})
return render_to_string('debug_toolbar/panels/versions.html', context)
class SyncraeSpyDebugPanel(DebugPanel):
'''
Panel that shows Syncrae Messages
'''
name = 'Syncrae'
has_content = True
def nav_title(self):
return _('Syncrae')
def nav_subtitle(self):
return ''
def url(self):
return ''
def title(self):
return _('Syncrae')
def content(self):
return render_to_string('debug_syncrae.html', self.context)
class DividerDebugPanel(DebugPanel):
name = 'Divider'
has_content = False
def nav_title(self):
return ' '
| mit | -4,841,625,695,873,232,000 | 18.25641 | 78 | 0.608522 | false |
stefco/geco_data | geco_irig_plot.py | 1 | 5662 | #!/usr/bin/env python
# (c) Stefan Countryman, 2016-2017
DESC="""Plot an IRIG-B signal read from stdin. Assumes that the timeseries
is a sequence of newline-delimited float literals."""
FAST_CHANNEL_BITRATE = 16384 # for IRIG-B, DuoTone, etc.
# THE REST OF THE IMPORTS ARE AFTER THIS IF STATEMENT.
# Quits immediately on --help or -h flags to skip slow imports when you just
# want to read the help documentation.
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=DESC)
# TODO: make this -i and --ifo instead of detector.
parser.add_argument("--detector",
help=("the detector; used in the title of the output "
"plot"))
parser.add_argument("-O", "--outfile",
help="the filename of the generated plot")
parser.add_argument("-T", "--timeseries",
help="copy from stdin to stdout while reading",
action="store_true")
parser.add_argument("-A", "--actualtime",
help=("actual time signal was recorded "
"(appears in title)"))
args = parser.parse_args()
# Force matplotlib to not use any Xwindows backend. NECESSARY ON CLUSTER.
import matplotlib
matplotlib.use('Agg')
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import geco_irig_decode
def read_timeseries_stdin(num_lines, cat_to_stdout=False):
"""Read in newline-delimited numerical data from stdin; don't read more
than a second worth of data. If cat_to_stdout is True, print data that
has been read in back to stdout (useful for piped commands)."""
timeseries = np.zeros(num_lines)
line = ""
i = 0
while i < num_lines:
line = float(sys.stdin.readline())
timeseries[i] = line
if cat_to_stdout:
print(line)
i += 1
return timeseries
def irigb_decoded_title(timeseries, IFO=None, actual_time=None):
"""Get a title for an IRIG-B timeseries plot that includes the decoded
time in the timeseries itself."""
# get the detector name
if IFO is None:
detector_suffix = ""
else:
detector_suffix = " at " + IFO
# get the actual time of recording, if provided
if actual_time is None:
actual_time_str = ""
else:
actual_time_str = "\nActual Time: {}".format(actual_time)
# add title and so on
try:
decoded_time = geco_irig_decode.get_date_from_timeseries(timeseries)
decoded_time_str = decoded_time.strftime('%a %b %d %X %Y')
except ValueError as e:
decoded_time_str = "COULD NOT DECODE TIME"
fmt = "One Second of IRIG-B Signal{}\nDecoded Time: {}{}"
return fmt.format(detector_suffix, decoded_time_str, actual_time_str)
def irigb_output_filename(outfile=None):
"""Get the output filename for an IRIG-B plot."""
if outfile is None:
output_filename = "irigb-plot-made-at-" + str(time.time()) + ".png"
else:
output_filename = outfile
# append .png if not already there
if output_filename.split(".")[-1] != "png":
output_filename += ".png"
return output_filename
def plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1.,
output_filename=None, overlay=False, linewidth=1):
"""Plot a timeseries and produce num_subdivs subplots that show equal-sized
subdivisions of the full timeseries data to show details (good for
high-bitrate timeseries). If you want to keep plotting data to the same
figure, set 'overlay=True', and the current figure will be plotted to."""
bitrate = int(len(timeseries) / float(dt))
times = np.linspace(0, 1, num=bitrate, endpoint=False)
# find max and min values in timeseries; use these to set plot boundaries
yrange = timeseries.max() - timeseries.min()
ymax = timeseries.max() + 0.1*yrange
ymin = timeseries.min() - 0.1*yrange
if not overlay:
plt.figure()
# print("making plot")
plt.gcf().set_figwidth(7)
plt.gcf().set_figheight(4+1.2*num_subdivs) # ~1.2in height per zoomed plot
# plot the full second on the first row; lines should be black ('k' option).
plt.subplot(num_subdivs + 1, 1, 1)
plt.ylim(ymin, ymax)
plt.plot(times, timeseries, 'k', linewidth=linewidth)
plt.tick_params(axis='y', labelsize='small')
# make num_subdivs subplots to better show the full second
for i in range(num_subdivs):
# print("making plot " + str(i))
plt.subplot(num_subdivs+1, 1, i+2)
plt.ylim(ymin, ymax)
plt.xlim(float(i)/num_subdivs, (float(i)+1)/num_subdivs)
start = bitrate*i // num_subdivs
end = bitrate*(i+1) // num_subdivs
plt.plot(times[start:end], timeseries[start:end], 'k',
linewidth=linewidth)
plt.tick_params(axis='y', labelsize='small')
plt.suptitle(title)
plt.xlabel("Time since start of second [$s$]")
# print("saving plot")
plt.subplots_adjust(left=0.125, right=0.9, bottom=0.1, top=0.9, wspace=0.2,
hspace=0.5)
if not (output_filename is None):
plt.savefig(output_filename)
return plt
if __name__ == '__main__':
timeseries = read_timeseries_stdin(FAST_CHANNEL_BITRATE,
cat_to_stdout=args.timeseries)
title = irigb_decoded_title(timeseries, args.detector, args.actualtime)
output_filename = irigb_output_filename(args.outfile)
plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1.,
output_filename=output_filename)
| mit | 2,641,587,764,302,819,300 | 40.028986 | 80 | 0.628753 | false |
TNick/pyl2extra | pyl2extra/datasets/images.py | 1 | 13590 | """
Dataset for images and related functionality.
This module does not have dependencies inside pyl2extra package, so you
can just copy-paste it inside your source tree.
To use this dataset prepare a .csv file with targets (integers or real numbers)
on first column and file paths on the second column:
.. code::
0,file1.png
1,file2.png
Image file paths are relative to current directory (``os.getcwd()``). The
images need not be square and can be in any format recognized by the
``Image`` module. Internally, the images are converted to RGB and are made
square for you.
Use it in a .yaml file like so:
.. code::
dataset: &trndataset !obj:pyl2extra.datasets.images.Images {
source: 'train.csv',
image_size: 128
}
The ``image_size`` can be skipped, in which case the size of the images is
derived from first image that is provided.
By default the class assumes a classification problem (targets are integers).
If you need to uset it in a regression problem create it like so:
.. code::
dataset: &trndataset !obj:pyl2extra.datasets.images.Images {
source: 'train.csv',
image_size: 128,
regression: True
}
As the dataset simply wraps the ``DenseDesignMatrix``, parameters like
``rng`` (random number generator), ``preprocessor`` and ``fit_preprocessor``
can be used and will be passed to ``DenseDesignMatrix`` superclass.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "[email protected]"
import csv
import numpy
import os
from PIL import Image
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
import theano
class Images(DenseDesignMatrix):
"""
A pylearn2 dataset that loads the images from a list or csv file.
Please note that - if you use this dataset and your model has a
final Softmax layer you should construct it like so (YAML syntax):
.. code::
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
irange: .0,
n_classes: %(classes)d,
binary_target_dim: 1
}
where ``classes`` is the same number of classes passed to ``Images``
constructor. ``binary_target_dim`` is important and failing to set it
constructs the wrong architecture, causing errors like:
ValueError: VectorSpace(dim=1, dtype=float32) with total dimension 1
can't format a batch into VectorSpace(dim=X, dtype=float32) because
its total dimension is X.
Parameters
----------
source: OrderedDict, dict, str, tuple, list
This argument provides the input images and (optionally)
associated categories. The meaning of the argument depends
on the data type:
- if ``source`` is a string, it is interpreted to be the
path towards a csv file; the file must NOT have a header,
first column must contain the targets (classes or values) and
second column must contain the paths for the image files;
- if ``source`` is a dictionary, the keys must be the
paths for image files, ``Image`` instances or numpy arrays and
the values must be the classes or values (None or empty
string if this instance does not provide the labels);
- a tuple or list must have exactly one or two
members: first one must be a list or tuple of image paths or
Images or numpy arrays, while second one (optional)
has the targets (classes as integers or real values).
image_size: int, optional
The size of the images in the final dataset. All images
will be resized to be ``image_size`` x ``image_size``
pixels.
classes: int, optional
If this is a classification problem the parameter should be
used to indicate the total number of classes and targets are
expected to be integers in the range ``[0; classes-1]``.
If this is a regression problem the parameter should be ``None`` and
targets are expected to be real numbers.
rng: object, optional
A random number generator used for picking random \
indices into the design matrix when choosing minibatches.
preprocessor: Preprocessor, optional
Preprocessor to apply to images.
fit_preprocessor: bool, optional
Whether preprocessor can fit parameters when applied to training
data.
"""
def __init__(self, source, image_size=None, classes=None,
rng=None, preprocessor=None, fit_preprocessor=False):
#: preserve original argument for future reference
self.source = source
#: Number of classes (None for regression)
self.classes = classes
# all images are loaded in ``ind`` variable
ind = _init_input(source)
# DenseDesignMatrix expects us to provide a numpy array
# we choose to have number of examples on first axis ('b'),
# then rows and columns of the image, then the channels
# always 3 in our case
self.axes = ('b', 0, 1, 'c')
if image_size is None:
dense_x = None
else:
dense_x = numpy.zeros(shape=(len(ind), image_size, image_size, 3),
dtype='uint8')
categories = []
has_targets = False
for i, (img, ctg) in enumerate(ind):
if isinstance(img, Image.Image):
img = numpy.array(img)
width = img.shape[1]
height = img.shape[0]
largest = max(width, height)
if image_size is None:
# if the user did not specify an image size we determine
# the size using the first image that we encounter; this is
# usefull if all images are already of required size,
# for example
image_size = largest
dense_x = numpy.zeros(shape=(len(ind), image_size,
image_size, 3),
dtype='uint8')
imgin = img
# do we need to enlarge / shrink the image?
elif largest != image_size:
wpercent = image_size / float(largest)
width = int(width * wpercent)
height = int(height * wpercent)
largest = max(width, height)
# inefficient? could use scipy.ndimage.zoom.
img_tmp = Image.fromarray(img)
img_tmp = img_tmp.resize((width, height), Image.ANTIALIAS)
imgin = numpy.array(img_tmp)
else:
imgin = img
delta_x = (largest - width) / 2
delta_y = (largest - height) / 2
delta_x2 = delta_x + width
delta_y2 = delta_y + height
#print delta_x, delta_y, delta_x2, delta_y2, width, height
dense_x[i, delta_y:delta_y2, delta_x:delta_x2, :] = imgin
categories.append(ctg)
if ctg != '':
has_targets = True
dense_x = numpy.cast[theano.config.floatX](dense_x)
# if we have categories / values convert them to proper format
if has_targets:
if classes is None:
# in regression we expect real values
dense_y = numpy.empty(shape=(len(ind), 1),
dtype=theano.config.floatX)
for i, ctg in enumerate(categories):
dense_y[i, 0] = float(ctg)
else:
# in classification we expect integers
dense_y = numpy.empty(shape=(len(ind), 1), dtype=int)
for i, ctg in enumerate(categories):
dense_y[i, 0] = int(ctg)
else:
dense_y = None
if rng is None:
rng = DenseDesignMatrix._default_seed
# everything else is handled by the DenseDesignMatrix superclass
super(Images, self).__init__(topo_view=dense_x,
y=dense_y,
axes=self.axes,
view_converter=None,
preprocessor=preprocessor,
fit_preprocessor=fit_preprocessor,
X_labels=None,
y_labels=classes if has_targets else None)
def _init_input(source):
"""
Homogenize sources.
"""
if isinstance(source, basestring):
# this is a csv file that we're going to read
result = _load_list(_load_csv(source))
elif isinstance(source, dict):
# keys are file names, values are classes
result = _load_list(source.items())
elif isinstance(source, (list, tuple)):
# one item lists the files, the other lists the classes
if len(source) == 1:
result = _load_list([(src, None) for src in source[0]])
elif len(source) == 2:
if len(source[0]) == len(source[1]):
result = _load_list(zip(source[0], source[1]))
else:
raise ValueError("Lists/tuples provded to Images class "
"constructor are expected to have "
"same length (%d != %d)" %
(len(source[0]), len(source[1])))
else:
raise ValueError("Lists/tuples provided to Images class "
"constructor are expected to have one "
"(images only) or two members (images"
" and classes); the input has %d members." %
len(source))
else:
raise ValueError("Images class expects for its `source` argument "
"a file path (string), a dictionary of "
"file:class pairs, or a pair of lists (tuples); "
"%s is not supported" % str(source.__class__))
return result
def _load_csv(csv_path):
"""
Internal function for loading the content from a .csv file.
Parameters
----------
csv_path: str
The path towards the .csv file to read.
Returns
-------
result: list of tuples
The method creates a list of tuples that should be passed to
`_load_list()`.
"""
# we're going to accumulate files and categories here
result = []
# compute absolute path of the source csv file
csv_path = os.path.abspath(csv_path)
with open(csv_path, 'rt') as fhand:
# the reader is flexible, allowing delimiters
# other than comma; quotation can also be customized
csvr = csv.reader(fhand,
delimiter=',',
quotechar='"')
# the reader will give us a list for each row of
# the source file
for row in csvr:
# we're going to skip empty rows without warning
if len(row) == 0:
continue
# we could skip the header here, if present; we
# could even detect the column index from its
# name; but we try to keep the things simple
# class/value is always first, file path second
result.append((row[1], row[0]))
return result
def _load_list(srclist):
"""
Internal function for loading the content from a list.
Image files are converted to `numpy.ndarray`;
empty classes are normalized to a string of lenghth 0.
Parameters
----------
srclist: list of tuples
A list of tuples, with first entry in tuple being
a string, an Image or `numpy.ndarray` instances and
second being classes (None for no class).
Returns
-------
result: list of tuples
The method creates a list of tuples, with first entry in tuple being
`numpy.ndarray` instances and second being targets (None for no
target) - integer classes (classification) or real values
(regression).
"""
# we're going to accumulate Images and categories here
result = []
for img, cls in srclist:
if isinstance(img, basestring):
imgin = Image.open(img)
elif isinstance(img, numpy.ndarray):
imgin = Image.fromarray(img)
elif isinstance(img, Image.Image):
imgin = img
elif Image.isImageType(img):
imgin = img
else:
raise ValueError("Valid input for images are strings (a "
"path towards a file), pil images "
"and numpy arrays; %s is not supported" %
str(img.__class__))
if cls is None:
cls = ''
imgin = imgin.convert('RGB')
result.append((numpy.array(imgin), cls))
return result
def one_image(image, image_size=None, classes=None,
rng=None, preprocessor=None, fit_preprocessor=False):
"""
Convenience function that creates an Images dataset from a single image.
Parameters
----------
image: string, image or numpy.ndarray
The image to use as source.
See :class:`Images` for a description of other parameters.
"""
return Images(source=((image,),),
image_size=image_size, classes=classes,
rng=rng, preprocessor=preprocessor,
fit_preprocessor=fit_preprocessor)
| bsd-3-clause | 5,339,343,264,149,101,000 | 36.960894 | 79 | 0.576306 | false |
mahak/cloudify-cli | cloudify_cli/commands/users.py | 1 | 9023 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from .. import env
from ..cli import cfy
from ..table import print_data, print_single
from ..utils import handle_client_error
USER_COLUMNS = ['username', 'groups', 'role', 'group_system_roles', 'active',
'last_login_at', 'is_locked']
GET_DATA_COLUMNS = ['user_tenants', 'group_tenants']
NO_GET_DATA_COLUMNS = ['tenants']
USER_LABELS = {'role': 'system wide role',
'group_system_roles': 'system wide roles via groups'}
def _format_user(user):
user_tenants = dict(
(str(tenant), str(user.user_tenants[tenant]))
for tenant in user.user_tenants
)
group_tenants = dict(
(str(tenant),
dict(
(str(role),
[str(group) for group in user.group_tenants[tenant][role]])
for role in user.group_tenants[tenant]
))
for tenant in user.group_tenants
)
user['user_tenants'] = str(user_tenants)[1:-1]
user['group_tenants'] = str(group_tenants)[1:-1]
return user
def _format_group_system_roles(user):
group_system_roles = dict(
(str(role),
[str(user_group) for user_group in user['group_system_roles'][role]])
for role in user['group_system_roles']
)
user['group_system_roles'] = str(group_system_roles).strip('{}')
return user
@cfy.group(name='users')
@cfy.options.common_options
def users():
"""Handle Cloudify users
"""
if not env.is_initialized():
env.raise_uninitialized()
@users.command(name='list', short_help='List users [manager only]')
@cfy.options.sort_by('username')
@cfy.options.descending
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.search
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list(sort_by,
descending,
get_data,
search,
pagination_offset,
pagination_size,
logger,
client):
"""List all users
"""
logger.info('Listing all users...')
users_list = client.users.list(
sort=sort_by,
is_descending=descending,
_get_data=get_data,
_search=search,
_offset=pagination_offset,
_size=pagination_size
)
total = users_list.metadata.pagination.total
# copy list
columns = [] + USER_COLUMNS
users_list = [_format_group_system_roles(user) for user in users_list]
if get_data:
users_list = [_format_user(user) for user in users_list]
columns += GET_DATA_COLUMNS
else:
columns += NO_GET_DATA_COLUMNS
print_data(columns, users_list, 'Users:', labels=USER_LABELS)
logger.info('Showing {0} of {1} users'.format(len(users_list), total))
@users.command(name='create', short_help='Create a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.common_options
@cfy.options.security_role
@cfy.options.password
@cfy.options.tenant_name(required=False)
@cfy.options.user_tenant_role(required=False,
options_flags=['-l', '--user-tenant-role'])
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=False)
@cfy.pass_logger
def create(username,
security_role,
password,
tenant_name,
user_tenant_role,
logger,
client):
"""Create a new user on the manager
`USERNAME` is the username of the user
"""
client.users.create(username, password, security_role)
logger.info('User `{0}` created with `{1}` security role'.format(
username, security_role))
if tenant_name and user_tenant_role:
client.tenants.add_user(username, tenant_name, user_tenant_role)
logger.info(
'User `{0}` added successfully to tenant `{1}` with `{2}` role'
.format(username, tenant_name, user_tenant_role))
@users.command(name='set-password',
short_help='Set a new password for a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.password
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def set_password(username, password, logger, client):
"""Set a new password for a user
`USERNAME` is the username of the user
"""
logger.info('Setting new password for user {0}...'.format(username))
client.users.set_password(username, password)
logger.info('New password set')
@users.command(name='set-role',
short_help='Set a new role for a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.security_role
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def set_role(username, security_role, logger, client):
"""Set a new role for a user
`USERNAME` is the username of the user
"""
logger.info('Setting new role for user {0}...'.format(username))
client.users.set_role(username, security_role)
logger.info('New role `{0}` set'.format(security_role))
@users.command(name='get',
short_help='Get details for a single user [manager only]')
@cfy.argument(
'username', callback=cfy.validate_name, default=env.get_username())
@cfy.options.common_options
@cfy.options.get_data
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get(username, get_data, logger, client):
"""Get details for a single user
`USERNAME` is the username of the user. (default: current user)
"""
logger.info('Getting info for user `{0}`...'.format(username))
if username == env.get_username():
user_details = client.users.get_self(_get_data=get_data)
else:
user_details = client.users.get(username, _get_data=get_data)
# copy list
columns = [] + USER_COLUMNS
if get_data:
_format_user(user_details)
columns += GET_DATA_COLUMNS
else:
columns += NO_GET_DATA_COLUMNS
print_single(columns,
user_details,
'Requested user info:',
labels=USER_LABELS)
@users.command(name='delete',
short_help='Delete a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete(username, logger, client):
"""Delete a user
`USERNAME` is the username of the user
"""
logger.info('Deleting user `{0}`...'.format(username))
client.users.delete(username)
logger.info('User removed')
@users.command(name='activate',
short_help='Make an inactive user active [manager only]')
@cfy.argument('username')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def activate(username, logger, client):
"""Activate a user
`USERNAME` is the username of the user
"""
graceful_msg = 'User `{0}` is already active'.format(username)
logger.info('Activating user `{0}`...'.format(username))
with handle_client_error(409, graceful_msg, logger):
client.users.activate(username)
logger.info('User activated')
@users.command(name='deactivate',
short_help='Make an active user inactive [manager only]')
@cfy.argument('username')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def deactivate(username, logger, client):
"""Deactivate a user
`USERNAME` is the username of the user
"""
graceful_msg = 'User `{0}` is already inactive'.format(username)
logger.info('Deactivating user `{0}`...'.format(username))
with handle_client_error(409, graceful_msg, logger):
client.users.deactivate(username)
logger.info('User deactivated')
@users.command(name='unlock',
short_help='Unlock a locked user [manager only]')
@cfy.argument('username')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def unlock(username, logger, client):
"""Unlock a locked user
`USERNAME` is the username of the user
"""
graceful_msg = 'User `{0}` is already unlocked'.format(username)
logger.info('Unlocking user `{0}`...'.format(username))
with handle_client_error(409, graceful_msg, logger):
client.users.unlock(username)
logger.info('User unlocked')
| apache-2.0 | -221,964,532,561,751,600 | 30.883392 | 78 | 0.654217 | false |
openstack/heat | heat/engine/update.py | 1 | 12695 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from heat.common import exception
from heat.engine import dependencies
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stk_defn
from heat.objects import resource as resource_objects
LOG = logging.getLogger(__name__)
class StackUpdate(object):
"""A Task to perform the update of an existing stack to a new template."""
def __init__(self, existing_stack, new_stack, previous_stack,
rollback=False):
"""Initialise with the existing stack and the new stack."""
self.existing_stack = existing_stack
self.new_stack = new_stack
self.previous_stack = previous_stack
self.rollback = rollback
self.existing_snippets = dict((n, r.frozen_definition())
for n, r in self.existing_stack.items()
if n in self.new_stack)
def __repr__(self):
if self.rollback:
return '%s Rollback' % str(self.existing_stack)
else:
return '%s Update' % str(self.existing_stack)
def __call__(self):
"""Return a co-routine that updates the stack."""
cleanup_prev = scheduler.DependencyTaskGroup(
self.previous_stack.dependencies,
self._remove_backup_resource,
reverse=True)
def get_error_wait_time(resource):
return resource.cancel_grace_period()
updater = scheduler.DependencyTaskGroup(
self.dependencies(),
self._resource_update,
error_wait_time=get_error_wait_time)
if not self.rollback:
yield from cleanup_prev()
try:
yield from updater()
finally:
self.previous_stack.reset_dependencies()
def _resource_update(self, res):
if res.name in self.new_stack and self.new_stack[res.name] is res:
return self._process_new_resource_update(res)
else:
return self._process_existing_resource_update(res)
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
LOG.debug("Deleting backup resource %s", prev_res.name)
yield from prev_res.destroy()
@staticmethod
def _exchange_stacks(existing_res, prev_res):
resource_objects.Resource.exchange_stacks(existing_res.stack.context,
existing_res.id, prev_res.id)
prev_stack, existing_stack = prev_res.stack, existing_res.stack
prev_stack.add_resource(existing_res)
existing_stack.add_resource(prev_res)
def _create_resource(self, new_res):
res_name = new_res.name
# Clean up previous resource
if res_name in self.previous_stack:
prev_res = self.previous_stack[res_name]
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
# Swap in the backup resource if it is in a valid state,
# instead of creating a new resource
if prev_res.status == prev_res.COMPLETE:
LOG.debug("Swapping in backup Resource %s", res_name)
self._exchange_stacks(self.existing_stack[res_name],
prev_res)
return
LOG.debug("Deleting backup Resource %s", res_name)
yield from prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
LOG.debug("Backing up existing Resource %s", res_name)
existing_res = self.existing_stack[res_name]
self.previous_stack.add_resource(existing_res)
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
self.existing_stack.add_resource(new_res)
# Save new resource definition to backup stack if it is not
# present in backup stack template already
# it allows to resolve all dependencies that existing resource
# can have if it was copied to backup stack
if (res_name not in
self.previous_stack.t[self.previous_stack.t.RESOURCES]):
LOG.debug("Storing definition of new Resource %s", res_name)
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
yield from new_res.create()
self._update_resource_data(new_res)
def _check_replace_restricted(self, res):
registry = res.stack.env.registry
restricted_actions = registry.get_rsrc_restricted_actions(res.name)
existing_res = self.existing_stack[res.name]
if 'replace' in restricted_actions:
ex = exception.ResourceActionRestricted(action='replace')
failure = exception.ResourceFailure(ex, existing_res,
existing_res.UPDATE)
existing_res._add_event(existing_res.UPDATE, existing_res.FAILED,
str(ex))
raise failure
def _update_resource_data(self, resource):
# Use the *new* template to determine the attrs to cache
node_data = resource.node_data(self.new_stack.defn)
stk_defn.update_resource_data(self.existing_stack.defn,
resource.name, node_data)
# Also update the new stack's definition with the data, so that
# following resources can calculate dep_attr values correctly (e.g. if
# the actual attribute name in a get_attr function also comes from a
# get_attr function.)
stk_defn.update_resource_data(self.new_stack.defn,
resource.name, node_data)
def _process_new_resource_update(self, new_res):
res_name = new_res.name
if res_name in self.existing_stack:
existing_res = self.existing_stack[res_name]
is_substituted = existing_res.check_is_substituted(type(new_res))
if type(existing_res) is type(new_res) or is_substituted:
try:
yield from self._update_in_place(existing_res,
new_res,
is_substituted)
except resource.UpdateReplace:
pass
else:
# Save updated resource definition to backup stack
# cause it allows the backup stack resources to be
# synchronized
LOG.debug("Storing definition of updated Resource %s",
res_name)
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
self.existing_stack.t.add_resource(new_res.t)
self.existing_stack.t.store(self.existing_stack.context)
LOG.info("Resource %(res_name)s for stack "
"%(stack_name)s updated",
{'res_name': res_name,
'stack_name': self.existing_stack.name})
self._update_resource_data(existing_res)
return
else:
self._check_replace_restricted(new_res)
yield from self._create_resource(new_res)
def _update_in_place(self, existing_res, new_res, is_substituted=False):
existing_snippet = self.existing_snippets[existing_res.name]
prev_res = self.previous_stack.get(new_res.name)
# Note the new resource snippet is resolved in the context
# of the existing stack (which is the stack being updated)
# but with the template of the new stack (in case the update
# is switching template implementations)
new_snippet = new_res.t.reparse(self.existing_stack.defn,
self.new_stack.t)
if is_substituted:
substitute = type(new_res)(existing_res.name,
existing_res.t,
existing_res.stack)
existing_res.stack.resources[existing_res.name] = substitute
existing_res = substitute
existing_res.converge = self.new_stack.converge
yield from existing_res.update(new_snippet, existing_snippet,
prev_resource=prev_res)
def _process_existing_resource_update(self, existing_res):
res_name = existing_res.name
if res_name in self.previous_stack:
backup_res = self.previous_stack[res_name]
yield from self._remove_backup_resource(backup_res)
if res_name in self.new_stack:
new_res = self.new_stack[res_name]
if new_res.state == (new_res.INIT, new_res.COMPLETE):
# Already updated in-place
return
if existing_res.stack is not self.previous_stack:
yield from existing_res.destroy()
if res_name not in self.new_stack:
self.existing_stack.remove_resource(res_name)
def dependencies(self):
"""Return the Dependencies graph for the update.
Returns a Dependencies object representing the dependencies between
update operations to move from an existing stack definition to a new
one.
"""
existing_deps = self.existing_stack.dependencies
new_deps = self.new_stack.dependencies
def edges():
# Create/update the new stack's resources in create order
for e in new_deps.graph().edges():
yield e
# Destroy/cleanup the old stack's resources in delete order
for e in existing_deps.graph(reverse=True).edges():
yield e
# Don't cleanup old resources until after they have been replaced
for name, res in self.existing_stack.items():
if name in self.new_stack:
yield (res, self.new_stack[name])
return dependencies.Dependencies(edges())
def preview(self):
upd_keys = set(self.new_stack.resources.keys())
cur_keys = set(self.existing_stack.resources.keys())
common_keys = cur_keys.intersection(upd_keys)
deleted_keys = cur_keys.difference(upd_keys)
added_keys = upd_keys.difference(cur_keys)
updated_keys = []
replaced_keys = []
for key in common_keys:
current_res = self.existing_stack.resources[key]
updated_res = self.new_stack.resources[key]
current_props = current_res.frozen_definition().properties(
current_res.properties_schema, current_res.context)
updated_props = updated_res.frozen_definition().properties(
updated_res.properties_schema, updated_res.context)
# type comparison must match that in _process_new_resource_update
if type(current_res) is not type(updated_res):
replaced_keys.append(key)
continue
try:
if current_res.preview_update(updated_res.frozen_definition(),
current_res.frozen_definition(),
updated_props, current_props,
None):
updated_keys.append(key)
except resource.UpdateReplace:
replaced_keys.append(key)
return {
'unchanged': list(set(common_keys).difference(
set(updated_keys + replaced_keys))),
'updated': updated_keys,
'replaced': replaced_keys,
'added': list(added_keys),
'deleted': list(deleted_keys),
}
| apache-2.0 | 3,472,610,170,852,946,400 | 41.316667 | 79 | 0.579992 | false |
diefenbach/django-lfs | lfs/marketing/models.py | 1 | 1821 | # django imports
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
# lfs imports
from lfs.catalog.models import Product
from lfs.order.models import Order
class Topseller(models.Model):
"""Selected products are in any case among topsellers.
"""
product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product"))
position = models.PositiveSmallIntegerField(_(u"Position"), default=1)
class Meta:
ordering = ["position"]
app_label = 'marketing'
def __str__(self):
return u"%s (%s)" % (self.product.name, self.position)
class ProductSales(models.Model):
"""Stores totals sales per product.
"""
product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product"))
sales = models.IntegerField(_(u"sales"), default=0)
class Meta:
app_label = 'marketing'
class FeaturedProduct(models.Model):
"""Featured products are manually selected by the shop owner
"""
product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product"))
position = models.PositiveSmallIntegerField(_(u"Position"), default=1)
active = models.BooleanField(_(u"Active"), default=True)
class Meta:
ordering = ["position"]
app_label = 'marketing'
def __str__(self):
return u"%s (%s)" % (self.product.name, self.position)
class OrderRatingMail(models.Model):
"""Saves whether and when a rating mail has been send for an order.
"""
order = models.ForeignKey(Order, models.CASCADE, verbose_name=_(u"Order"))
send_date = models.DateTimeField(auto_now=True)
def __str__(self):
return u"%s (%s)" % (self.order.id, self.send_date.strftime(ugettext('DATE_FORMAT')))
class Meta:
app_label = 'marketing'
| bsd-3-clause | -2,391,349,366,419,636,700 | 29.864407 | 93 | 0.667216 | false |
rain2o/collective.pfg.skiplogic | setup.py | 1 | 1050 | from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='collective.pfg.skiplogic',
version=version,
description="Adds skip logic capabilities to ploneformgen forms",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='',
author='',
author_email='',
url='http://svn.plone.org/svn/collective/',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective', 'collective.pfg'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| gpl-2.0 | -539,731,365,731,175,900 | 28.166667 | 72 | 0.569524 | false |
benregn/itu-courses | itu/pipelines.py | 1 | 1027 | import pymongo
from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log
class MongoDBPipeline(object):
def __init__(self):
connection = pymongo.Connection(
settings['MONGODB_SERVER'], settings['MONGODB_PORT'])
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
def process_item(self, item, spider):
valid = True
for data in item:
# here we only check if the data is not null
# but we could do any crazy validation we want
if not data:
valid = False
raise DropItem(
"Missing %s course from %s" % (data, item['url']))
if valid:
self.collection.insert(dict(item))
log.msg("Item written to MongoDB database %s/%s" %
(settings['MONGODB_DB'], settings['MONGODB_COLLECTION']),
level=log.DEBUG, spider=spider)
return item
| mit | 7,761,366,228,736,481,000 | 33.233333 | 77 | 0.581305 | false |
robertnishihara/ray | streaming/python/tests/test_word_count.py | 1 | 1689 | import os
import ray
from ray.streaming import StreamingContext
def test_word_count():
ray.init(_load_code_from_local=True)
ctx = StreamingContext.Builder() \
.build()
ctx.read_text_file(__file__) \
.set_parallelism(1) \
.flat_map(lambda x: x.split()) \
.map(lambda x: (x, 1)) \
.key_by(lambda x: x[0]) \
.reduce(lambda old_value, new_value:
(old_value[0], old_value[1] + new_value[1])) \
.filter(lambda x: "ray" not in x) \
.sink(lambda x: print("result", x))
ctx.submit("word_count")
import time
time.sleep(3)
ray.shutdown()
def test_simple_word_count():
ray.init(_load_code_from_local=True)
ctx = StreamingContext.Builder() \
.build()
sink_file = "/tmp/ray_streaming_test_simple_word_count.txt"
if os.path.exists(sink_file):
os.remove(sink_file)
def sink_func(x):
with open(sink_file, "a") as f:
line = "{}:{},".format(x[0], x[1])
print("sink_func", line)
f.write(line)
ctx.from_values("a", "b", "c") \
.set_parallelism(1) \
.flat_map(lambda x: [x, x]) \
.map(lambda x: (x, 1)) \
.key_by(lambda x: x[0]) \
.reduce(lambda old_value, new_value:
(old_value[0], old_value[1] + new_value[1])) \
.sink(sink_func)
ctx.submit("word_count")
import time
time.sleep(3)
ray.shutdown()
with open(sink_file, "r") as f:
result = f.read()
assert "a:2" in result
assert "b:2" in result
assert "c:2" in result
if __name__ == "__main__":
test_word_count()
test_simple_word_count()
| apache-2.0 | -5,655,306,037,818,127,000 | 27.15 | 63 | 0.536412 | false |
letolab/airy | airy/utils/cache.py | 1 | 9676 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
from airy.core.conf import settings
from airy.core.cache import get_cache
from airy.utils.encoding import smart_str, iri_to_uri
from airy.utils.http import http_date
from airy.utils.hashcompat import md5_constructor
from airy.utils.translation import get_language
from airy.http import HttpRequest
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If enabled, returns the cache key ending with a locale."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, request.method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| bsd-2-clause | -6,054,072,846,640,393,000 | 39.655462 | 88 | 0.671042 | false |
znick/anytask | anytask/users/models.py | 1 | 9320 | # -*- coding: utf-8 -*-
import logging
import os
from courses.models import Course
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from groups.models import Group
from mail.models import Message
from users.model_user_status import UserStatus
from years.common import get_current_year
from anytask.storage import OverwriteStorage
logger = logging.getLogger('django.request')
def get_upload_path(instance, filename):
return os.path.join('images', 'user_%d' % instance.user.id, filename)
class UserProfile(models.Model):
user = models.OneToOneField(User, db_index=True, null=False, blank=False, unique=True, related_name='profile')
middle_name = models.CharField(max_length=128, db_index=True, null=True, blank=True)
user_status = models.ManyToManyField(UserStatus, db_index=True, blank=True, related_name='users_by_status')
avatar = models.ImageField('profile picture', upload_to=get_upload_path, blank=True, null=True,
storage=OverwriteStorage())
birth_date = models.DateField(blank=True, null=True)
info = models.TextField(default="", blank=True, null=True)
phone = models.CharField(max_length=128, null=True, blank=True)
city_of_residence = models.CharField(max_length=191, null=True, blank=True)
university = models.CharField(max_length=191, null=True, blank=True)
university_in_process = models.BooleanField(null=False, blank=False, default=False)
university_class = models.CharField(max_length=191, null=True, blank=True)
university_department = models.CharField(max_length=191, null=True, blank=True)
university_year_end = models.CharField(max_length=191, null=True, blank=True)
additional_info = models.TextField(null=True, blank=True)
unit = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
position = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_degree = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_title = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
show_email = models.BooleanField(db_index=False, null=False, blank=False, default=True)
send_my_own_events = models.BooleanField(db_index=False, null=False, blank=False, default=False)
unread_messages = models.ManyToManyField(Message, blank=True, related_name='unread_messages')
deleted_messages = models.ManyToManyField(Message, blank=True, related_name='deleted_messages')
send_notify_messages = models.ManyToManyField(Message, blank=True, related_name='send_notify_messages')
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
login_via_yandex = models.BooleanField(db_index=False, null=False, blank=False, default=False)
ya_uid = models.IntegerField(null=True, blank=True)
ya_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_uid = models.CharField(max_length=191, null=True, blank=True)
ya_contest_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_uid = models.CharField(max_length=191, null=True, blank=True)
ya_passport_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_email = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
telegram_uid = models.IntegerField(default=None, null=True, blank=True)
notify_in_telegram = models.BooleanField(default=False, null=False, blank=False)
language = models.CharField(default="ru", max_length=128, unique=False, null=True, blank=True)
time_zone = models.TextField(null=False, blank=False, default='Europe/Moscow')
location = models.TextField(null=True, blank=True, default="")
def is_current_year_student(self):
return Group.objects.filter(year=get_current_year()).filter(students=self.user).count() > 0
def __unicode__(self):
return unicode(self.user)
def is_active(self):
for status in self.user_status.all():
if status.tag == 'not_active' or status.tag == 'academic':
return False
return True
def set_status(self, new_status):
if not isinstance(new_status, UserStatus):
new_status = UserStatus.objects.get(id=new_status)
if new_status.type:
self.user_status.remove(*self.user_status.filter(type=new_status.type))
self.user_status.add(new_status)
def get_unread_count(self):
return self.unread_messages.exclude(id__in=self.deleted_messages.all()).count()
def can_sync_contest(self):
for course in Course.objects.filter(is_active=True):
if course.get_user_group(self.user) and course.send_to_contest_from_users:
return True
return False
class UserProfileLog(models.Model):
user = models.ForeignKey(User, db_index=True, null=False, blank=False, related_name='profiles_logs_by_user')
middle_name = models.CharField(max_length=128, db_index=True, null=True, blank=True)
user_status = models.ManyToManyField(UserStatus, db_index=True, blank=True)
avatar = models.ImageField('profile picture', upload_to=get_upload_path, blank=True, null=True,
storage=OverwriteStorage())
birth_date = models.DateField(blank=True, null=True)
info = models.TextField(default="", blank=True, null=True)
phone = models.CharField(max_length=128, null=True, blank=True)
city_of_residence = models.CharField(max_length=191, null=True, blank=True)
university = models.CharField(max_length=191, null=True, blank=True)
university_in_process = models.BooleanField(null=False, blank=False, default=False)
university_class = models.CharField(max_length=50, null=True, blank=True)
university_department = models.CharField(max_length=191, null=True, blank=True)
university_year_end = models.CharField(max_length=20, null=True, blank=True)
additional_info = models.TextField(null=True, blank=True)
unit = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
position = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_degree = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_title = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
show_email = models.BooleanField(db_index=False, null=False, blank=False, default=True)
send_my_own_events = models.BooleanField(db_index=False, null=False, blank=False, default=False)
unread_messages = models.ManyToManyField(Message, blank=True, related_name='log_unread_messages')
deleted_messages = models.ManyToManyField(Message, blank=True, related_name='log_deleted_messages')
send_notify_messages = models.ManyToManyField(Message, blank=True, related_name='log_send_notify_messages')
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
login_via_yandex = models.BooleanField(db_index=False, null=False, blank=False, default=True)
ya_uid = models.IntegerField(null=True, blank=True)
ya_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_uid = models.IntegerField(null=True, blank=True)
ya_contest_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_uid = models.IntegerField(null=True, blank=True)
ya_passport_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_email = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
telegram_uid = models.IntegerField(default=None, null=True, blank=True)
notify_in_telegram = models.BooleanField(default=False, null=False, blank=False)
language = models.CharField(default="ru", max_length=128, unique=False, null=True, blank=True)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
def is_current_year_student(self):
return Group.objects.filter(year=get_current_year()).filter(students=self.user).count() > 0
def __unicode__(self):
return unicode(self.user)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
| mit | -698,361,928,609,399,700 | 49.378378 | 114 | 0.720172 | false |
QQuick/Transcrypt | transcrypt/modules/org/transcrypt/autotester/__init__.py | 1 | 12645 | # First run a test from the command prompt, generating an HTML file.
# The output of the test is stored in a DIV.
# Also the script is automatically included in the HTML file.
# Loading the HTML file will run the script.
# This will compare the output of the script running in the browswer to the output in the DIV.
# If those two match, the test reports OK, else it reports failure.
from org.transcrypt.stubs.browser import __main__, __envir__, __pragma__
from org.transcrypt.autotester.html import HTMLGenerator, DataConverter, JSTesterUI, itemsAreEqual
# Don't import __envir__ from __base__ since it will overwrite __buildin__.__envir__ in the browser
# Import from stubs will be skipped in the browser
# ... The ice is a bit thin here
__pragma__ ('nokwargs')
import itertools
def getFileLocation(ancestor):
""" This function needs to crawl up the stack
and find out where the ancestor caller of
this function was in the source code of either the
python or javascript, depending on environment.
@param ancestor the ancestor of this function that
we want to capture file information about.
@return string indicating the file position and line number
"""
if __envir__.executor_name == __envir__.transpiler_name: # js
s = None
__pragma__('js', '{}',
'''
var e = new Error();
if ( ! e.stack ) {
console.log("MAJOR ISSUE: Browser Error lacks Stack");
} else {
s = e.stack;
}
''')
# Now we will process the stack to find the grandparent
# calling function
# @note - I'm explicitly not including a 're' module
# dependency here
frames = None
__pragma__('js', '{}',
'''
var linereg = new RegExp("\\n\\r|\\n", "g");
frames = s.toString().split(linereg);
''')
if ( frames is None or (len(frames) < 2)):
__pragma__('js', '{}', 'console.log("Failed to Split Stack");')
return("UNKNOWN:???")
# @note - if the call stack in transcrypts javascript
# translation changes then this index may need to change
# @todo - need more work here to determine this because
# this is fragile
gpFrame = frames[(ancestor*2 + 1)]
# This regex splits the string coming from the javascript
# stacktrace so that we can connect the file and line number
# runTests (http://localhost:8080/run/autotest.js:3159:8)
# func URL filename lineno:colno
# Group 1 = function
# Group 2 & 3 = protocol and hostname
# Group 4 = Path on this host (filename is at the end)
# Group 5 = lineno
# Group 6 = column number in file
frameReg = r"([^(]*)\(?([^:]*:)\/{2,3}([^:/]*:?)([^:]*):(\d+):(\d+)"
m = None
__pragma__('js', '{}',
'''
var r = new RegExp(frameReg);
m = r.exec(gpFrame);
''')
if m:
filepath = m[4]
# Split the filepath and take the last element
# to the get filename
pathParts = filepath.split("/")
filename = pathParts[len(pathParts)-1]
lineno = m[5]
return( "{}:{}".format(filename, lineno) )
else:
__pragma__('js', '{}', 'console.log("Failed to Match Frame", gpFrame);')
return("UNKNOWN:???")
#ELSE
# Needed because Transcrypt imports are compile time
__pragma__("skip")
from inspect import getframeinfo, stack
s = stack()
caller = getframeinfo(s[ancestor][0])
# Trim the file name path so that we don't get
# a lot of unnecessary content
filepath = caller.filename
# @todo - this is a hack - we should use os.path
pathParts = filepath.split('/')
filename = "/".join(pathParts[-2:])
return( "%s:%d" % (filename, caller.lineno))
__pragma__ ('noskip')
class AutoTester:
""" Main testing class for comparing CPython to Transcrypt. This
class is primarily used by calling the "check" method to confirm that
the result is the same in both environments and "done" when all checks
for a particular module have been completed.
"""
def __init__ (self, symbols = []):
self.symbols = symbols
# refDict/testDict contains the test results
# of each testlet identified by name as the key
self._currTestlet = "UNKNOWN"
self.testDict = {}
self.refDict = {}
if __envir__.executor_name == __envir__.transpiler_name:
self.ui = JSTesterUI()
else:
self.ui = None
def sortedRepr (self, any):
# When using sets or dicts, use elemens or keys
# of one type, in sort order
def tryGetNumKey (key):
if type (key) == str: # Try to interpret key as numerical, see comment with repr function in __builtins__
try:
return int (key)
except:
try:
return float (key)
except:
return key
else:
return key
if type (any) == dict:
return '{' + ', '.join ([
'{}: {}'.format (repr (key), repr (any [key]))
for index, key in enumerate (sorted ([tryGetNumKey (key) for key in any.keys ()], key = lambda aKey: str (aKey)))
]) + '}'
elif type (any) == set:
if len (any):
return '{' + ', '.join (sorted ([str (item) for item in list (any)])) + '}'
else:
return repr (any)
elif type (any) == range:
return repr (list (any))
else:
return repr (any)
__pragma__('kwargs')
def check (self, *args, ancestor = 2):
""" Given a set of values from either the python or transcrypt
environments, we log the position of the check call in the test
and representative values of the passed arguments for later
comparison.
"""
position=getFileLocation(ancestor)
# N.B. stubs.browser provides a special sorting repr
item = ' '.join ([self.sortedRepr (arg) for arg in args])
if __envir__.executor_name == __envir__.transpiler_name:
self.testDict[self._currTestlet].append((position,item))
else:
self.refDict[self._currTestlet].append((position,item))
__pragma__('nokwargs')
def expectException(self, func):
""" This method attempts to call the passed method and
checks to see whether an exception was generated.
@return string indicating "no exception" or "exception"
"""
try:
func()
return("no exception")
except Exception as exc:
return("exception")
def throwToError(self, func):
""" This function invokes the passed function and then
converts an exception to an error response so that
the unit test can continue even in the case where an
exception may or may not occur.
"""
try:
return(func())
except Exception as exc:
return (None, "!!!{}".format(str(exc)))
def checkEval(self, func):
""" Check the result of the passed function which is
invoked without arguments. If this function throws an
exception, that exception is caught and converted to an error
with can be compared against the result. This allows the
user to control for exception that may or may not be generated
in the unit tests
"""
ret = self.throwToError(func)
self.check(ret, ancestor = 3)
def checkPad(self, val, count):
""" This method is to help manage flow control in unit tests and
keep all unit tests aligned
"""
for i in range(0, count):
self.check(val)
def _getTotalErrorCnt(self, testData, refData):
""" This method determines the total number of non-matching
values in the test and reference data for a particular module.
"""
errCount = 0
for i,(refPos, refItem) in enumerate(refData):
try:
testPos,testItem = testData[i]
if not itemsAreEqual (testItem, refItem):
errCount+=1
except:
errCount+=1
return(errCount)
def compare (self):
# Load the python reference data from the hidden HTML div
dc = DataConverter()
self.refDict = dc.getPythonResults()
totalErrors = 0
sKeys = sorted(self.refDict.keys())
for key in sKeys:
refData = self.refDict[key]
try:
testData = self.testDict[key]
if ( testData is None ):
raise KeyError("No Test Data Module: {}".format(key))
except KeyError:
# No Test Data found for this key - we will populate with
# errors for all ref data
self.ui.appendSeqRowName(key, len(refData))
for i,(refPos, refItem) in enumerate(refData):
self.ui.appendTableResult(key, None, None, refPos, refItem, False)
continue
# know we have testData so let's determine the total number of
# errors for this test module. This will allow us to both set
# the num of errors in the test module header row and set the
# rows to the appropriate initial collapsed/expanded state.
errCount= self._getTotalErrorCnt(testData, refData)
collapse = (errCount == 0)
self.ui.appendSeqRowName(key, errCount)
# Now we will populate the table with all the rows
# of data fro the comparison
for i,(refPos, refItem) in enumerate(refData):
try:
# This will throw if testData's length is
# shorter than refData's
testPos,testItem = testData[i]
except:
testPos = None
testItem = None
self.ui.appendTableResult(
key, testPos, testItem, refPos, refItem, collapse
)
totalErrors += errCount
self.ui.setOutputStatus( totalErrors == 0 )
def _cleanName(self, name):
""" Clean the passed name of characters that won't be allowed
in CSS class or HTML id strings.
"""
# Convert testletName to replace any of the characters that
# are not acceptable in a CSS class or HTML id - this is to
# make our lives easier
# @note - I'm SPECIFICALLY not using a regex here because the
# regex engine module is still under dev and could possibly
# have issues
ret = name
invalidChars = [
'~', '!', '@', '$', '%',
'^', '&', '*', '(', ')',
'+', '=', ',', '.', '/',
"'", ';', ':', '"', '?',
'>', '<', '[', ']', '\\',
'{', '}', '|', '`', '#',
" ",
]
for ch in invalidChars:
ret = ret.replace(ch, "_")
return(ret)
def run (self, testlet, testletName):
testletName = self._cleanName(testletName)
self._currTestlet = testletName
if __envir__.executor_name == __envir__.transpiler_name:
self.testDict[self._currTestlet] = []
else:
self.refDict[self._currTestlet] = []
try:
testlet.run (self)
except Exception as exc:
if ( self.ui is not None ):
self.ui.setOutputStatus(False)
self.ui.showException(testletName, exc)
else:
# Error - No UI yet, reraise specific exception to enable finding out why
raise
def done (self):
if __envir__.executor_name == __envir__.transpiler_name:
self.compare ()
else:
fnameBase = __main__.__file__.replace ('\\', '/')
hg = HTMLGenerator(fnameBase)
hg.generate_html(self.refDict)
| apache-2.0 | -3,752,604,880,992,353,000 | 37.515625 | 129 | 0.535152 | false |
walterbender/Pippy | pippy_app.py | 2 | 59457 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007,2008,2009 Chris Ball, based on Collabora's
# "hellomesh" demo.
#
# Copyright (C) 2013,14 Walter Bender
# Copyright (C) 2013,14 Ignacio Rodriguez
# Copyright (C) 2013 Jorge Gomez
# Copyright (C) 2013,14 Sai Vineet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Pippy Activity: A simple Python programming activity ."""
import re
import os
import subprocess
from random import uniform
import locale
import json
import sys
from shutil import copy2
from signal import SIGTERM
from gettext import gettext as _
import uuid
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from gi import require_version
require_version('Gdk', '3.0')
require_version('Gtk', '3.0')
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import Pango
try:
require_version('Vte', '2.91')
except:
require_version('Vte', '2.90')
from gi.repository import Vte
from gi.repository import GObject
DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
from sugar3.datastore import datastore
from sugar3.activity import activity as activity
from sugar3.activity.widgets import EditToolbar
from sugar3.activity.widgets import StopButton
from sugar3.activity.activity import get_bundle_path
from sugar3.graphics.alert import Alert
from sugar3.graphics.alert import ConfirmationAlert
from sugar3.graphics.alert import NotifyAlert
from sugar3.graphics.icon import Icon
from sugar3.graphics.objectchooser import ObjectChooser
from sugar3.graphics.toggletoolbutton import ToggleToolButton
from sugar3.graphics.toolbarbox import ToolbarButton
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.activity.widgets import ActivityToolbarButton
from jarabe.view.customizebundle import generate_unique_id
from activity import ViewSourceActivity
from activity import TARGET_TYPE_TEXT
from collabwrapper import CollabWrapper
from filedialog import FileDialog
from icondialog import IconDialog
from notebook import SourceNotebook, tab_object
from toolbars import DevelopViewToolbar
import sound_check
import logging
text_buffer = None
# magic prefix to use utf-8 source encoding
PYTHON_PREFIX = '''#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
# Force category names into Pootle
DEFAULT_CATEGORIES = [_('graphics'), _('math'), _('python'), _('sound'),
_('string'), _('tutorials')]
_logger = logging.getLogger('pippy-activity')
DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='{modulename}',
version='1.0',
py_modules=[
{filenames}
],
)
""" # This is .format()'ed with the list of the file names.
DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='{modulename}',
version='1.0',
py_modules=[
{filenames}
],
)
""" # This is .format()'ed with the list of the file names.
def _has_new_vte_api():
try:
return (Vte.MAJOR_VERSION >= 0 and
Vte.MINOR_VERSION >= 38)
except:
# Really old versions of Vte don't have VERSION
return False
def _find_object_id(activity_id, mimetype='text/x-python'):
''' Round-about way of accessing self._jobject.object_id '''
dsobjects, nobjects = datastore.find({'mime_type': [mimetype]})
for dsobject in dsobjects:
if 'activity_id' in dsobject.metadata and \
dsobject.metadata['activity_id'] == activity_id:
return dsobject.object_id
return None
class PippyActivity(ViewSourceActivity):
'''Pippy Activity as specified in activity.info'''
def __init__(self, handle):
self._pippy_instance = self
self.session_data = [] # Used to manage saving
self._loaded_session = [] # Used to manage tabs
self._py_file_loaded_from_journal = False
self._py_object_id = None
self._dialog = None
sys.path.append(os.path.join(self.get_activity_root(), 'Library'))
ViewSourceActivity.__init__(self, handle)
self._collab = CollabWrapper(self)
self._collab.message.connect(self.__message_cb)
self.set_canvas(self.initialize_display())
self.after_init()
self.connect("notify::active", self.__active_cb)
self._collab.setup()
def focus():
""" Enforce focus for the text view once. """
widget = self.get_toplevel().get_focus()
textview = self._source_tabs.get_text_view()
if widget is None and textview is not None:
textview.grab_focus()
return True
return False
GLib.timeout_add(100, focus)
def initialize_display(self):
'''Build activity toolbar with title input, share button and export
buttons
'''
toolbar_box = ToolbarBox()
activity_button = ActivityToolbarButton(self)
toolbar_box.toolbar.insert(activity_button, 0)
self.set_toolbar_box(toolbar_box)
activity_button.show()
toolbar_box.show()
activity_toolbar = activity_button.page
separator = Gtk.SeparatorToolItem()
activity_toolbar.insert(separator, -1)
separator.show()
button = ToolButton('pippy-import-doc')
button.set_tooltip(_('Import Python file to new tab'))
button.connect('clicked', self._import_py_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-export-doc')
button.set_tooltip(_('Export as Pippy document'))
button.connect('clicked', self._export_document_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-export-library')
button.set_tooltip(_('Save this file to the Pippy library'))
button.connect('clicked', self._save_as_library)
activity_toolbar.insert(button, -1)
if not self._library_writable():
button.set_sensitive(False)
button.show()
button = ToolButton('pippy-export-example')
button.set_tooltip(_('Export as new Pippy example'))
button.connect('clicked', self._export_example_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-create-bundle')
button.set_tooltip(_('Create a Sugar activity bundle'))
button.connect('clicked', self._create_bundle_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-create-distutils')
# TRANS: A distutils package is used to distribute Python modules
button.set_tooltip(_('Export as a distutils package'))
button.connect('clicked', self._export_distutils_cb)
activity_toolbar.insert(button, -1)
button.show()
self._edit_toolbar = EditToolbar()
button = ToolbarButton()
button.set_page(self._edit_toolbar)
button.props.icon_name = 'toolbar-edit'
button.props.label = _('Edit')
self.get_toolbar_box().toolbar.insert(button, -1)
button.show()
self._edit_toolbar.show()
self._edit_toolbar.undo.connect('clicked', self.__undobutton_cb)
self._edit_toolbar.redo.connect('clicked', self.__redobutton_cb)
self._edit_toolbar.copy.connect('clicked', self.__copybutton_cb)
self._edit_toolbar.paste.connect('clicked', self.__pastebutton_cb)
view_btn = ToolbarButton()
view_toolbar = DevelopViewToolbar(self)
view_btn.props.page = view_toolbar
view_btn.props.icon_name = 'toolbar-view'
view_btn.props.label = _('View')
view_toolbar.connect('font-size-changed',
self._font_size_changed_cb)
self.get_toolbar_box().toolbar.insert(view_btn, -1)
self.view_toolbar = view_toolbar
view_toolbar.show()
actions_toolbar = self.get_toolbar_box().toolbar
self._toggle_output = ToggleToolButton('tray-show')
self._toggle_output.set_tooltip(_('Show output panel'))
self._toggle_output.connect('toggled', self._toggle_output_cb)
actions_toolbar.insert(self._toggle_output, -1)
self._toggle_output.show()
self._inverted_colors = ToggleToolButton(icon_name='dark-theme')
self._inverted_colors.set_tooltip(_('Inverted Colors'))
self._inverted_colors.set_accelerator('<Ctrl><Shift>I')
self._inverted_colors.connect(
'toggled', self.__inverted_colors_toggled_cb)
actions_toolbar.insert(self._inverted_colors, -1)
self._inverted_colors.show()
icons_path = os.path.join(get_bundle_path(), 'icons')
icon_bw = Gtk.Image()
icon_bw.set_from_file(os.path.join(icons_path, 'run_bw.svg'))
icon_bw.show()
icon_color = Gtk.Image()
icon_color.set_from_file(os.path.join(icons_path, 'run_color.svg'))
icon_color.show()
button = ToolButton(label=_('Run!'))
button.props.accelerator = _('<alt>r')
button.set_icon_widget(icon_bw)
button.set_tooltip(_('Run!'))
button.connect('clicked', self._flash_cb,
dict({'bw': icon_bw, 'color': icon_color}))
button.connect('clicked', self._go_button_cb)
actions_toolbar.insert(button, -1)
button.show()
icon_bw = Gtk.Image()
icon_bw.set_from_file(os.path.join(icons_path, 'stopit_bw.svg'))
icon_bw.show()
icon_color = Gtk.Image()
icon_color.set_from_file(os.path.join(icons_path, 'stopit_color.svg'))
icon_color.show()
button = ToolButton(label=_('Stop'))
button.props.accelerator = _('<alt>s')
button.set_icon_widget(icon_bw)
button.connect('clicked', self._flash_cb,
dict({'bw': icon_bw, 'color': icon_color}))
button.connect('clicked', self._stop_button_cb)
button.set_tooltip(_('Stop'))
actions_toolbar.insert(button, -1)
button.show()
icon_bw = Gtk.Image()
icon_bw.set_from_file(os.path.join(icons_path, 'eraser_bw.svg'))
icon_bw.show()
icon_color = Gtk.Image()
icon_color.set_from_file(os.path.join(icons_path, 'eraser_color.svg'))
icon_color.show()
button = ToolButton(label=_('Clear output panel'))
button.props.accelerator = _('<alt>c')
button.set_icon_widget(icon_bw)
button.connect('clicked', self._clear_button_cb)
button.connect('clicked', self._flash_cb,
dict({'bw': icon_bw, 'color': icon_color}))
button.set_tooltip(_('Clear output panel'))
actions_toolbar.insert(button, -1)
button.show()
activity_toolbar.show()
separator = Gtk.SeparatorToolItem()
self.get_toolbar_box().toolbar.insert(separator, -1)
separator.show()
button = ToolButton('pippy-openoff')
button.set_tooltip(_('Open an example'))
button.connect('clicked', self._load_example_cb)
self.get_toolbar_box().toolbar.insert(button, -1)
button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
self.get_toolbar_box().toolbar.insert(separator, -1)
separator.show()
stop = StopButton(self)
self.get_toolbar_box().toolbar.insert(stop, -1)
stop.show()
vpane = Gtk.Paned.new(orientation=Gtk.Orientation.VERTICAL)
vpane.set_position(400) # setting initial position
self.paths = []
try:
if sound_check.finddir():
TAMTAM_AVAILABLE = True
else:
TAMTAM_AVAILABLE = False
except sound_check.SoundLibraryNotFoundError:
TAMTAM_AVAILABLE = False
data_path = os.path.join(get_bundle_path(), 'data')
# get default language from locale
locale_lang = locale.getdefaultlocale()[0]
if locale_lang is None:
lang = 'en'
else:
lang = locale_lang.split('_')[0]
_logger.debug(locale.getdefaultlocale())
_logger.debug(lang)
# construct the path for both
lang_path = os.path.join(data_path, lang)
en_lang_path = os.path.join(data_path, 'en')
# get all folders in lang examples
all_folders = []
if os.path.exists(lang_path):
for d in sorted(os.listdir(lang_path)):
all_folders.append(d)
# get all folders in English examples
for d in sorted(os.listdir(en_lang_path)):
# check if folder isn't already in list
if d not in all_folders:
all_folders.append(d)
for folder in all_folders:
# Skip sound folders if TAMTAM is not installed
if folder == 'sound' and not TAMTAM_AVAILABLE:
continue
direntry = {}
# check if dir exists in pref language, if exists, add it
if os.path.exists(os.path.join(lang_path, folder)):
direntry = {
'name': _(folder.capitalize()),
'path': os.path.join(lang_path, folder) + '/'}
# if not try to see if it's in default English path
elif os.path.exists(os.path.join(en_lang_path, folder)):
direntry = {
'name': _(folder.capitalize()),
'path': os.path.join(en_lang_path, folder) + '/'}
self.paths.append([direntry['name'], direntry['path']])
# Adding local examples
data_path = os.path.join(get_bundle_path(), 'data')
self.paths.append([_('My examples'), data_path])
self._source_tabs = SourceNotebook(self, self._collab)
self._source_tabs.connect('tab-added', self._add_source_cb)
self._source_tabs.connect('tab-renamed', self._rename_source_cb)
self._source_tabs.connect('tab-closed', self._close_source_cb)
if self._loaded_session:
for name, content, path in self._loaded_session:
self._source_tabs.add_tab(name, content, path)
else:
self.session_data.append(None)
self._source_tabs.add_tab() # New instance, ergo empty tab
vpane.add1(self._source_tabs)
self._source_tabs.show()
self._outbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self._vte = Vte.Terminal()
self._vte.set_encoding('utf-8')
self._vte.set_size(30, 5)
self._vte.set_scrollback_lines(-1)
self._vte_set_colors('#000000', '#E7E7E7')
self._child_exited_handler = None
self._vte.connect('child_exited', self._child_exited_cb)
self._vte.connect('drag_data_received', self._vte_drop_cb)
self._outbox.pack_start(self._vte, True, True, 0)
outsb = Gtk.Scrollbar(orientation=Gtk.Orientation.VERTICAL)
outsb.set_adjustment(self._vte.get_vadjustment())
outsb.show()
self._outbox.pack_start(outsb, False, False, 0)
self._load_config()
vpane.add2(self._outbox)
self._outbox.show()
vpane.show()
return vpane
def _vte_set_colors(self, bg, fg):
# XXX support both Vte APIs
if _has_new_vte_api():
foreground = Gdk.RGBA()
foreground.parse(bg)
background = Gdk.RGBA()
background.parse(fg)
else:
foreground = Gdk.color_parse(bg)
background = Gdk.color_parse(fg)
self._vte.set_colors(foreground, background, [])
def after_init(self):
self._outbox.hide()
def _font_size_changed_cb(self, widget, size):
self._source_tabs.set_font_size(size)
self._vte.set_font(
Pango.FontDescription('Monospace {}'.format(size)))
def _store_config(self):
font_size = self._source_tabs.get_font_size()
_config_file_path = os.path.join(
activity.get_activity_root(), 'data',
'config.json')
with open(_config_file_path, "w") as f:
f.write(json.dumps(font_size))
def _load_config(self):
_config_file_path = os.path.join(
activity.get_activity_root(), 'data',
'config.json')
if not os.path.isfile(_config_file_path):
return
with open(_config_file_path, "r") as f:
font_size = json.loads(f.read())
self.view_toolbar.set_font_size(font_size)
self._vte.set_font(
Pango.FontDescription('Monospace {}'.format(font_size)))
def __active_cb(self, widget, event):
_logger.debug('__active_cb %r', self.props.active)
if self.props.active:
self.resume()
else:
self.pause()
def do_visibility_notify_event(self, event):
_logger.debug('do_visibility_notify_event %r', event.get_state())
if event.get_state() == Gdk.VisibilityState.FULLY_OBSCURED:
self.pause()
else:
self.resume()
def pause(self):
# FIXME: We had resume, but no pause?
pass
def resume(self):
if self._dialog is not None:
self._dialog.set_keep_above(True)
def _toggle_output_cb(self, button):
shown = button.get_active()
if shown:
self._outbox.show_all()
self._toggle_output.set_tooltip(_('Hide output panel'))
self._toggle_output.set_icon_name('tray-hide')
else:
self._outbox.hide()
self._toggle_output.set_tooltip(_('Show output panel'))
self._toggle_output.set_icon_name('tray-show')
def __inverted_colors_toggled_cb(self, button):
if button.props.active:
self._vte_set_colors('#E7E7E7', '#000000')
self._source_tabs.set_dark()
button.set_icon_name('light-theme')
button.set_tooltip(_('Normal Colors'))
else:
self._vte_set_colors('#000000', '#E7E7E7')
self._source_tabs.set_light()
button.set_icon_name('dark-theme')
button.set_tooltip(_('Inverted Colors'))
def _load_example_cb(self, widget):
widget.set_icon_name('pippy-openon')
self._dialog = FileDialog(self.paths, self, widget)
self._dialog.show()
self._dialog.run()
path = self._dialog.get_path()
if path:
self._select_func_cb(path)
def _add_source_cb(self, button, force=False, editor_id=None):
if self._collab._leader or force:
if editor_id is None:
editor_id = str(uuid.uuid1())
self._source_tabs.add_tab(editor_id=editor_id)
self.session_data.append(None)
self._source_tabs.get_nth_page(-1).show_all()
self._source_tabs.get_text_view().grab_focus()
if self._collab._leader:
self._collab.post(dict(
action='add-source',
editor_id=editor_id))
else:
# The leader must do it first so that they can set
# up the text buffer
self._collab.post(dict(action='add-source-request'))
# Check if dark mode enabled, apply it
if self._inverted_colors.props.active:
self._source_tabs.set_dark()
def _rename_source_cb(self, notebook, page, name):
_logger.debug('_rename_source_cb %r %r' % (page, name))
self._collab.post(dict(action='rename-source', page=page, name=name))
def _close_source_cb(self, notebook, page):
_logger.debug('_close_source_cb %r' % (page))
self._collab.post(dict(action='close-source', page=page))
def __message_cb(self, collab, buddy, msg):
action = msg.get('action')
if action == 'add-source-request' and self._collab._leader:
self._add_source_cb(None, force=True)
elif action == 'add-source':
self._add_source_cb(
None, force=True, editor_id=msg.get('editor_id'))
elif action == 'rename-source':
page = msg.get('page')
name = msg.get('name')
_logger.debug('__message_cb rename-source %r %r' % (page, name))
self._source_tabs.rename_tab(page, name)
elif action == 'close-source':
page = msg.get('page')
_logger.debug('__message_cb close-source %r' % (page))
self._source_tabs.close_tab(page)
def _vte_drop_cb(self, widget, context, x, y, selection, targetType, time):
if targetType == TARGET_TYPE_TEXT:
self._vte.feed_child(selection.data)
def get_data(self):
return self._source_tabs.get_all_data()
def set_data(self, data):
# Remove initial new/blank thing
self.session_data = []
self._loaded_session = []
try:
self._source_tabs.remove_page(0)
tab_object.pop(0)
self._source_tabs.last_tab = 0
except IndexError:
pass
list_ = list(zip(*data))
for name, code, path, modified, editor_id in list_:
self._source_tabs.add_tab(
label=name, editor_id=editor_id)
self.session_data.append(None) # maybe?
def _selection_cb(self, value):
self.save()
_logger.debug('clicked! %s' % value['path'])
_file = open(value['path'], 'r')
lines = _file.readlines()
self._add_source_cb(None)
text_buffer = self._source_tabs.get_text_buffer()
text_buffer.set_text(''.join(lines))
text_buffer.set_modified(False)
self._pippy_instance.metadata['title'] = value['name']
self._stop_button_cb(None)
self._reset_vte()
self._source_tabs.set_current_label(value['name'])
self._source_tabs.set_current_path(value['path'])
self._source_tabs.get_text_view().grab_focus()
def _select_func_cb(self, path):
values = {}
values['name'] = os.path.basename(path)
values['path'] = path
self._selection_cb(values)
def _timer_cb(self, button, icons):
button.set_icon_widget(icons['bw'])
button.show_all()
return False
def _flash_cb(self, button, icons):
button.set_icon_widget(icons['color'])
button.show_all()
GObject.timeout_add(400, self._timer_cb, button, icons)
def _clear_button_cb(self, button):
self.save()
self._stop_button_cb(None)
self._reset_vte()
self._source_tabs.get_text_view().grab_focus()
def _write_all_buffers(self, tmp_dir):
data = self._source_tabs.get_all_data()
zipdata = list(zip(data[0], data[1]))
for name, content in zipdata:
name = self._source_tabs.purify_name(name)
with open(os.path.join(tmp_dir, name), 'w') as f:
# Write utf-8 coding prefix if there's not one already
if re.match(r'coding[:=]\s*([-\w.]+)',
'\n'.join(content.splitlines()[:2])) is None:
f.write(PYTHON_PREFIX)
f.write(content)
def _reset_vte(self):
self._vte.grab_focus()
self._vte.feed(b'\x1B[H\x1B[J\x1B[0;39m')
def __undobutton_cb(self, butston):
text_buffer = self._source_tabs.get_text_buffer()
if text_buffer.can_undo():
text_buffer.undo()
def __redobutton_cb(self, button):
text_buffer = self._source_tabs.get_text_buffer()
if text_buffer.can_redo():
text_buffer.redo()
def __copybutton_cb(self, button):
text_buffer = self._source_tabs.get_text_buffer()
if self._vte.get_has_selection():
self._vte.copy_clipboard()
elif text_buffer.get_has_selection():
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
text_buffer.copy_clipboard(clipboard)
def __pastebutton_cb(self, button):
text_buffer = self._source_tabs.get_text_buffer()
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
text_buffer.paste_clipboard(clipboard, None, True)
def _go_button_cb(self, button):
self._stop_button_cb(button) # Try stopping old code first.
self._reset_vte()
# FIXME: We're losing an odd race here
# Gtk.main_iteration(block=False)
if self._toggle_output.get_active() is False:
self._outbox.show_all()
self._toggle_output.set_active(True)
pippy_tmp_dir = '%s/tmp/' % self.get_activity_root()
self._write_all_buffers(pippy_tmp_dir)
current_file = os.path.join(
pippy_tmp_dir,
self._source_tabs.get_current_file_name())
# Write activity.py here too, to support pippy-based activities.
copy2('%s/activity.py' % get_bundle_path(),
'%s/tmp/activity.py' % self.get_activity_root())
# XXX Support both Vte APIs
if _has_new_vte_api():
vte_run = self._vte.spawn_sync
else:
vte_run = self._vte.fork_command_full
self._pid = vte_run(
Vte.PtyFlags.DEFAULT,
get_bundle_path(),
['/bin/sh', '-c', 'python3 %s; sleep 1' % current_file,
'PYTHONPATH=%s/library:%s' % (get_bundle_path(),
os.getenv('PYTHONPATH', ''))],
['PYTHONPATH=%s/library:%s' % (get_bundle_path(),
os.getenv('PYTHONPATH', ''))],
GLib.SpawnFlags.DO_NOT_REAP_CHILD,
None,
None,)
def _stop_button_cb(self, button):
try:
if self._pid is not None:
os.kill(self._pid[1], SIGTERM)
except:
pass # Process must already be dead.
def _library_writable(self):
return os.access(os.path.join(get_bundle_path(), 'library'), os.W_OK)
def _save_as_library(self, button):
library_dir = os.path.join(get_bundle_path(), 'library')
file_name = self._source_tabs.get_current_file_name()
text_buffer = self._source_tabs.get_text_buffer()
content = text_buffer.get_text(
*text_buffer.get_bounds(),
include_hidden_chars=True)
if not os.path.isdir(library_dir):
os.mkdir(library_dir)
with open(os.path.join(library_dir, file_name), 'w') as f:
f.write(content)
success = True
if success:
alert = NotifyAlert(5)
alert.props.title = _('Python File added to Library')
IMPORT_MESSAGE = _('The file you selected has been added'
' to the library. Use "import {importname}"'
' to import the library for using.')
alert.props.msg = IMPORT_MESSAGE.format(importname=file_name[:-3])
alert.connect('response', self._remove_alert_cb)
self.add_alert(alert)
def _export_document_cb(self, __):
self.copy()
alert = NotifyAlert()
alert.props.title = _('Saved')
alert.props.msg = _('The document has been saved to journal.')
alert.connect('response', lambda x, i: self.remove_alert(x))
self.add_alert(alert)
def _remove_alert_cb(self, alert, response_id):
self.remove_alert(alert)
def _import_py_cb(self, button):
chooser = ObjectChooser()
result = chooser.run()
if result is Gtk.ResponseType.ACCEPT:
dsitem = chooser.get_selected_object()
if dsitem.metadata['mime_type'] != 'text/x-python':
alert = NotifyAlert(5)
alert.props.title = _('Error importing Python file')
alert.props.msg = _('The file you selected is not a '
'Python file.')
alert.connect('response', self._remove_alert_cb)
self.add_alert(alert)
elif dsitem.object_id in self.session_data:
alert = NotifyAlert(5)
alert.props.title = _('Error importing Python file')
alert.props.msg = _('The file you selected is already '
'open')
alert.connect('response', self._remove_alert_cb)
self.add_alert(alert)
else:
name = dsitem.metadata['title']
file_path = dsitem.get_file_path()
content = open(file_path, 'r').read()
self._source_tabs.add_tab(name, content, None)
self._source_tabs.set_current_label(name)
self.session_data.append(dsitem.object_id)
_logger.debug('after import py: %r' % self.session_data)
chooser.destroy()
def _create_bundle_cb(self, button):
from shutil import rmtree
from tempfile import mkdtemp
# Get the name of this pippy program.
title = self._pippy_instance.metadata['title'].replace('.py', '')
title = title.replace('-', '')
if title == 'Pippy Activity':
alert = Alert()
alert.props.title = _('Save as Activity Error')
alert.props.msg = _('Please give your activity a meaningful name '
'before attempting to save it as an activity.')
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
alert_icon = Alert()
ok_icon = Icon(icon_name='dialog-ok')
alert_icon.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert_icon.props.title = _('Activity icon')
alert_icon.props.msg = _('Please select an activity icon.')
self._stop_button_cb(None) # try stopping old code first.
self._reset_vte()
self._outbox.show_all()
self._vte.feed(_("Creating activity bundle...").encode())
self._vte.feed(b'\r\n')
TMPDIR = 'instance'
app_temp = mkdtemp('.activity', 'Pippy',
os.path.join(self.get_activity_root(), TMPDIR))
sourcefile = os.path.join(app_temp, 'xyzzy.py')
# invoke ourself to build the activity bundle.
_logger.debug('writing out source file: %s' % sourcefile)
def internal_callback(window=None, event=None):
icon = '%s/activity/activity-default.svg' % (get_bundle_path())
if window:
icon = window.get_icon()
self._stop_button_cb(None) # Try stopping old code first.
self._reset_vte()
self._vte.feed(_('Creating activity bundle...').encode())
self._vte.feed(b'\r\n')
TMPDIR = 'instance'
app_temp = mkdtemp('.activity', 'Pippy',
os.path.join(self.get_activity_root(), TMPDIR))
sourcefile = os.path.join(app_temp, 'xyzzy.py')
# Invoke ourself to build the activity bundle.
_logger.debug('writing out source file: %s' % sourcefile)
# Write out application code
self._write_text_buffer(sourcefile)
try:
# FIXME: vte invocation was raising errors.
# Switched to subprocss
output = subprocess.check_output(
['/usr/bin/python3',
'%s/pippy_app.py' % get_bundle_path(),
'-p', '%s/library' % get_bundle_path(),
'-d', app_temp, title, sourcefile, icon])
self._vte.feed(output)
self._vte.feed(b'\r\n')
self._bundle_cb(title, app_temp)
except subprocess.CalledProcessError:
rmtree(app_temp, ignore_errors=True) # clean up!
self._vte.feed(_('Save as Activity Error').encode())
self._vte.feed(b'\r\n')
raise
def _alert_response(alert, response_id):
self.remove_alert(alert)
def _dialog():
dialog = IconDialog()
dialog.connect('destroy', internal_callback)
GObject.idle_add(_dialog)
alert_icon.connect('response', _alert_response)
self.add_alert(alert_icon)
def _write_text_buffer(self, filename):
text_buffer = self._source_tabs.get_text_buffer()
start, end = text_buffer.get_bounds()
text = text_buffer.get_text(start, end, True)
with open(filename, 'w') as f:
# Write utf-8 coding prefix if there's not one already
if re.match(r'coding[:=]\s*([-\w.]+)',
'\n'.join(text.splitlines()[:2])) is None:
f.write(PYTHON_PREFIX)
for line in text:
f.write(line)
def _export_distutils_cb(self, button):
app_temp = os.path.join(self.get_activity_root(), 'instance')
data = self._source_tabs.get_all_data()
for filename, content in zip(data[0], data[1]):
fileobj = open(os.path.join(app_temp, filename), 'w')
fileobj.write(content)
fileobj.close()
filenames = ','.join([("'" + name[:-3] + "'") for name in data[0]])
title = self._pippy_instance.metadata['title']
if title is _('Pippy Activity'):
alert = Alert()
alert.props.title = _('Save as distutils package error')
alert.props.msg = _('Please give your activity a meaningful '
'name before attempting to save it '
'as an distutils package.')
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
found = next((
name for name in data[0]
if name != self._source_tabs.purify_name(name)),
None)
if found is not None:
example = self._source_tabs.purify_name(found)
alert = Alert()
alert.props.title = _('Save as distutils package error')
alert.props.msg = _('Please give your source files a proper '
'name, for example "%s", before attempting to '
'save it as an distutils package.') % example
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
setup_script = DISTUTILS_SETUP_SCRIPT.format(modulename=title,
filenames=filenames)
setupfile = open(os.path.join(app_temp, 'setup.py'), 'w')
setupfile.write(setup_script)
setupfile.close()
os.chdir(app_temp)
subprocess.check_output(
['/usr/bin/python3', os.path.join(app_temp, 'setup.py'), 'sdist',
'-v'])
# Hand off to journal
os.chmod(app_temp, 0o777)
jobject = datastore.create()
metadata = {
'title': '%s distutils bundle' % title,
'title_set_by_user': '1',
'mime_type': 'application/x-gzip',
}
for k, v in list(metadata.items()):
# The dict.update method is missing =(
jobject.metadata[k] = v
tarname = 'dist/{modulename}-1.0.tar.gz'.format(modulename=title)
jobject.file_path = os.path.join(app_temp, tarname)
datastore.write(jobject)
def _export_example_cb(self, button):
# Get the name of this pippy program.
title = self._pippy_instance.metadata['title']
if title == _('Pippy Activity'):
alert = Alert()
alert.props.title = _('Save as Example Error')
alert.props.msg = \
_('Please give your activity a meaningful '
'name before attempting to save it as an example.')
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
self._stop_button_cb(None) # Try stopping old code first.
self._reset_vte()
self._vte.feed(_('Creating example...').encode())
self._vte.feed(b'\r\n')
local_data = os.path.join(os.environ['SUGAR_ACTIVITY_ROOT'], 'data')
local_file = os.path.join(local_data, title)
if os.path.exists(local_file):
alert = ConfirmationAlert()
alert.props.title = _('Save as Example Warning')
alert.props.msg = _('This example already exists. '
'Do you want to overwrite it?')
alert.connect('response', self._confirmation_alert_cb, local_file)
self.add_alert(alert)
else:
self.write_file(local_file)
self._reset_vte()
self._vte.feed(_('Saved as example.').encode())
self._vte.feed(b'\r\n')
self._add_to_example_list(local_file)
def _child_exited_cb(self, *args):
'''Called whenever a child exits. If there's a handler, run it.'''
h, self._child_exited_handler = self._child_exited_handler, None
if h is not None:
h()
def _bundle_cb(self, title, app_temp):
'''Called when we're done building a bundle for a source file.'''
from sugar3 import profile
from shutil import rmtree
try:
# Find the .xo file: were we successful?
bundle_file = [f for f in os.listdir(app_temp)
if f.endswith('.xo')]
if len(bundle_file) != 1:
_logger.debug("Couldn't find bundle: %s" %
str(bundle_file))
self._vte.feed(b'\r\n')
self._vte.feed(_('Error saving activity to journal.').encode())
self._vte.feed(b'\r\n')
return # Something went wrong.
# Hand off to journal
os.chmod(app_temp, 0o755)
jobject = datastore.create()
metadata = {
'title': '%s Bundle' % title,
'title_set_by_user': '1',
'buddies': '',
'preview': '',
'icon-color': profile.get_color().to_string(),
'mime_type': 'application/vnd.olpc-sugar',
}
for k, v in list(metadata.items()):
# The dict.update method is missing =(
jobject.metadata[k] = v
jobject.file_path = os.path.join(app_temp, bundle_file[0])
datastore.write(jobject)
self._vte.feed(b'\r\n')
self._vte.feed(_('Activity saved to journal.').encode())
self._vte.feed(b'\r\n')
self.journal_show_object(jobject.object_id)
jobject.destroy()
finally:
rmtree(app_temp, ignore_errors=True) # clean up!
def _dismiss_alert_cb(self, alert, response_id):
self.remove_alert(alert)
def _confirmation_alert_cb(self, alert, response_id, local_file):
# Callback for conf alert
self.remove_alert(alert)
if response_id is Gtk.ResponseType.OK:
self.write_file(local_file)
self._reset_vte()
self._vte.feed(_('Saved as example.').encode())
self._vte.feed(b'\r\n')
else:
self._reset_vte()
def _add_to_example_list(self, local_file):
entry = {'name': _(os.path.basename(local_file)),
'path': local_file}
_iter = self.model.insert_before(self.example_iter, None)
self.model.set_value(_iter, 0, entry)
self.model.set_value(_iter, 1, entry['name'])
def is_example(self, path):
if path is None:
return False
for name in self.paths:
if path.startswith(name[1]):
return True
return False
def _get_pippy_object_id(self):
''' We need the object_id of this pippy instance to save in the .py
file metadata'''
if self._pippy_instance == self:
return _find_object_id(self.metadata['activity_id'],
mimetype='application/json')
else:
return self._pippy_instance.get_object_id()
def write_file(self, file_path):
pippy_id = self._get_pippy_object_id()
data = self._source_tabs.get_all_data()
zipped_data = list(zip(*data))
session_list = []
app_temp = os.path.join(self.get_activity_root(), 'instance')
tmpfile = os.path.join(app_temp, 'pippy-tempfile-storing.py')
if not self.session_data:
self.session_data.append(None)
for zipdata, content in zip(zipped_data, self.session_data):
_logger.debug('Session data %r', content)
name, python_code, path, modified, editor_id = zipdata
if content is not None and content == self._py_object_id:
_logger.debug('saving to self')
self.metadata['title'] = name
self.metadata['mime_type'] = 'text/x-python'
if pippy_id is not None:
self.metadata['pippy_instance'] = pippy_id
__file = open(file_path, 'w')
__file.write(python_code)
__file.close()
session_list.append([name, content])
elif content is not None and content[0] != '/':
_logger.debug('Saving an existing dsobject')
dsobject = datastore.get(content)
dsobject.metadata['title'] = name
dsobject.metadata['mime_type'] = 'text/x-python'
if pippy_id is not None:
dsobject.metadata['pippy_instance'] = pippy_id
__file = open(tmpfile, 'w')
__file.write(python_code)
__file.close()
dsobject.set_file_path(tmpfile)
datastore.write(dsobject)
session_list.append([name, dsobject.object_id])
elif modified:
_logger.debug('Creating new dsobj for modified code')
if len(python_code) > 0:
dsobject = datastore.create()
dsobject.metadata['title'] = name
dsobject.metadata['mime_type'] = 'text/x-python'
if pippy_id is not None:
dsobject.metadata['pippy_instance'] = pippy_id
__file = open(tmpfile, 'w')
__file.write(python_code)
__file.close()
dsobject.set_file_path(tmpfile)
datastore.write(dsobject)
session_list.append([name, dsobject.object_id])
# If there are multiple Nones, we need to find
# the correct one.
if content is None and \
self.session_data.count(None) > 1:
i = zipped_data.index(zipdata)
else:
i = self.session_data.index(content)
self.session_data[i] = dsobject.object_id
elif content is not None or path is not None:
_logger.debug('Saving reference to sample file')
if path is None: # Should not happen, but just in case...
_logger.error('path is None.')
session_list.append([name, content])
else:
session_list.append([name, path])
else: # Should not happen, but just in case...
_logger.debug('Nothing to save in tab? %s %s %s %s' %
(str(name), str(python_code), str(path),
str(content)))
self._pippy_instance.metadata['mime_type'] = 'application/json'
pippy_data = json.dumps(session_list)
# Override file path if we created a new Pippy instance
if self._py_file_loaded_from_journal:
file_path = os.path.join(app_temp, 'pippy-temp-instance-data')
_file = open(file_path, 'w')
_file.write(pippy_data)
_file.close()
if self._py_file_loaded_from_journal:
_logger.debug('setting pippy instance file_path to %s' %
file_path)
self._pippy_instance.set_file_path(file_path)
datastore.write(self._pippy_instance)
self._store_config()
def read_file(self, file_path):
# Either we are opening Python code or a list of objects
# stored (json-encoded) in a Pippy instance, or a shared
# session.
# Remove initial new/blank thing
self.session_data = []
self._loaded_session = []
try:
self._source_tabs.remove_page(0)
tab_object.pop(0)
self._source_tabs.last_tab = 0
except IndexError:
pass
if self.metadata['mime_type'] == 'text/x-python':
_logger.debug('Loading Python code')
# Opening some Python code directly
try:
text = open(file_path).read()
except:
alert = NotifyAlert(10)
alert.props.title = _('Error')
alert.props.msg = _('Error reading data.')
def _remove_alert(alert, response_id):
self.remove_alert(alert)
alert.connect("response", _remove_alert)
self.add_alert(alert)
return
self._py_file_loaded_from_journal = True
# Discard the '#!/usr/bin/python3' and 'coding: utf-8' lines,
# if present
python_code = re.sub(r'^' + re.escape(PYTHON_PREFIX), '', text)
name = self.metadata['title']
self._loaded_session.append([name, python_code, None])
# Since we loaded Python code, we need to create (or
# restore) a Pippy instance
if 'pippy_instance' in self.metadata:
_logger.debug('found a pippy instance: %s' %
self.metadata['pippy_instance'])
try:
self._pippy_instance = datastore.get(
self.metadata['pippy_instance'])
except:
_logger.debug('Cannot find old Pippy instance: %s')
self._pippy_instance = None
if self._pippy_instance in [self, None]:
self._pippy_instance = datastore.create()
self._pippy_instance.metadata['title'] = self.metadata['title']
self._pippy_instance.metadata['mime_type'] = 'application/json'
self._pippy_instance.metadata['activity'] = 'org.laptop.Pippy'
datastore.write(self._pippy_instance)
self.metadata['pippy_instance'] = \
self._pippy_instance.get_object_id()
_logger.debug('get_object_id %s' %
self.metadata['pippy_instance'])
# We need the Pippy file path so we can read the session data
file_path = self._pippy_instance.get_file_path()
# Finally, add this Python object to the session data
self._py_object_id = _find_object_id(self.metadata['activity_id'])
self.session_data.append(self._py_object_id)
_logger.debug('session_data: %s' % self.session_data)
if self.metadata['mime_type'] == 'application/json' or \
self._pippy_instance != self:
# Reading file list from Pippy instance
_logger.debug('Loading Pippy instance')
if len(file_path) == 0:
return
data = json.loads(open(file_path).read())
for name, content in data:
# content is either a datastore id or the path to some
# sample code
if content is not None and content[0] == '/': # a path
try:
python_code = open(content).read()
except:
_logger.error('Could not open %s; skipping' % content)
path = content
elif content != self._py_object_id:
try:
dsobject = datastore.get(content)
if 'mime_type' not in dsobject.metadata:
_logger.error(
'Warning: %s missing mime_type' % content)
elif dsobject.metadata['mime_type'] != 'text/x-python':
_logger.error(
'Warning: %s has unexpected mime_type %s' %
(content, dsobject.metadata['mime_type']))
except:
# Could be that the item has subsequently been
# deleted from the datastore, so we skip it.
_logger.error('Could not open %s; skipping' % content)
continue
try:
python_code = open(dsobject.get_file_path()).read()
except:
# Malformed bundle?
_logger.error('Could not open %s; skipping' %
dsobject.get_file_path())
continue
path = None
# Queue up the creation of the tabs...
# And add this content to the session data
if content not in self.session_data:
self.session_data.append(content)
self._loaded_session.append([name, python_code, path])
# Create tabs from the datastore, else add a blank tab
if self._loaded_session:
for name, content, path in self._loaded_session:
self._source_tabs.add_tab(name, content, path)
else:
self._source_tabs.add_tab()
# TEMPLATES AND INLINE FILES
ACTIVITY_INFO_TEMPLATE = '''
[Activity]
name = %(title)s
bundle_id = %(bundle_id)s
exec = sugar-activity3 %(class)s
icon = activity-icon
activity_version = %(version)d
mime_types = %(mime_types)s
show_launcher = yes
%(extra_info)s
'''
PIPPY_ICON = """<?xml version="1.0" ?><!DOCTYPE svg PUBLIC '-//W3C//DTD SVG
1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd' [
<!ENTITY stroke_color "#010101">
<!ENTITY fill_color "#FFFFFF">
]>
<svg enable-background="new 0 0 55 55" height="55px" version="1.1"
viewBox="0 0 55 55" width="55px" x="0px" xml:space="preserve"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink" y="0px"><g display="block"
id="activity-pippy">
<path d="M28.497,48.507
c5.988,0,14.88-2.838,14.88-11.185
c0-9.285-7.743-10.143-10.954-11.083
c-3.549-0.799-5.913-1.914-6.055-3.455
c-0.243-2.642,1.158-3.671,3.946-3.671
c0,0,6.632,3.664,12.266,0.74
c1.588-0.823,4.432-4.668,4.432-7.32
c0-2.653-9.181-5.719-11.967-5.719
c-2.788,0-5.159,3.847-5.159,3.847
c-5.574,0-11.149,5.306-11.149,10.612
c0,5.305,5.333,9.455,11.707,10.612
c2.963,0.469,5.441,2.22,4.878,5.438
c-0.457,2.613-2.995,5.306-8.361,5.306
c-4.252,0-13.3-0.219-14.745-4.079
c-0.929-2.486,0.168-5.205,1.562-5.205l-0.027-0.16
c-1.42-0.158-5.548,0.16-5.548,5.465
C8.202,45.452,17.347,48.507,28.497,48.507z"
fill="&fill_color;" stroke="&stroke_color;"
stroke-linecap="round" stroke-linejoin="round" stroke-width="3.5"/>
<path d="M42.579,19.854c-2.623-0.287-6.611-2-7.467-5.022" fill="none"
stroke="&stroke_color;" stroke-linecap="round" stroke-width="3"/>
<circle cx="35.805" cy="10.96" fill="&stroke_color;" r="1.676"/>
</g></svg><!-- " -->
"""
# ACTIVITY META-INFORMATION
# this is used by Pippy to generate a bundle for itself.
def pippy_activity_version():
'''Returns the version number of the generated activity bundle.'''
return 39
def pippy_activity_extra_files():
'''Returns a map of 'extra' files which should be included in the
generated activity bundle.'''
# Cheat here and generate the map from the fs contents.
extra = {}
bp = get_bundle_path()
for d in ['po', 'data', 'post']: # everybody gets library
for root, dirs, files in os.walk(os.path.join(bp, d)):
for name in files:
fn = os.path.join(root, name).replace(bp + '/', '')
extra[fn] = open(os.path.join(root, name), 'r').read()
return extra
def pippy_activity_news():
'''Return the NEWS file for this activity.'''
# Cheat again.
return open(os.path.join(get_bundle_path(), 'NEWS')).read()
def pippy_activity_icon():
'''Return an SVG document specifying the icon for this activity.'''
return PIPPY_ICON
def pippy_activity_class():
'''Return the class which should be started to run this activity.'''
return 'pippy_app.PippyActivity'
def pippy_activity_bundle_id():
'''Return the bundle_id for the generated activity.'''
return 'org.laptop.Pippy'
def pippy_activity_mime_types():
'''Return the mime types handled by the generated activity, as a list.'''
return ['text/x-python']
def pippy_activity_extra_info():
return '''
license = GPLv2+
update_url = http://activities.sugarlabs.org '''
# ACTIVITY BUNDLER
def main():
'''Create a bundle from a pippy-style source file'''
from optparse import OptionParser
from pyclbr import readmodule_ex
from tempfile import mkdtemp
from shutil import copytree, copy2, rmtree
from sugar3.activity import bundlebuilder
parser = OptionParser(usage='%prog [options] [title] [sourcefile] [icon]')
parser.add_option('-d', '--dir', dest='dir', default='.', metavar='DIR',
help='Put generated bundle in the specified directory.')
parser.add_option('-p', '--pythonpath', dest='path', action='append',
default=[], metavar='DIR',
help='Append directory to python search path.')
(options, args) = parser.parse_args()
if len(args) < 3:
parser.error('The title, sourcefile and icon arguments are required.')
title = args[0]
sourcefile = args[1]
icon_path = args[2]
pytitle = re.sub(r'[^A-Za-z0-9_]', '', title)
if re.match(r'[0-9]', pytitle) is not None:
pytitle = '_' + pytitle # first character cannot be numeric
# First take a gander at the source file and see if it's got extra info
# for us.
sourcedir, basename = os.path.split(sourcefile)
if not sourcedir:
sourcedir = '.'
module, ext = os.path.splitext(basename)
f = open(icon_path, 'r')
icon = f.read()
f.close()
# Things we look for:
bundle_info = {
'version': 1,
'extra_files': {},
'news': 'No news.',
'icon': icon,
'class': 'activity.VteActivity',
'bundle_id': ('org.sugarlabs.pippy.%s%d' %
(generate_unique_id(),
int(round(uniform(1000, 9999), 0)))),
'mime_types': '',
'extra_info': '',
}
# Are any of these things in the module?
try_import = False
info = readmodule_ex(module, [sourcedir] + options.path)
for func in list(bundle_info.keys()):
p_a_func = 'pippy_activity_%s' % func
if p_a_func in info:
try_import = True
if try_import:
# Yes, let's try to execute them to get better info about our bundle
oldpath = list(sys.path)
sys.path[0:0] = [sourcedir] + options.path
modobj = __import__(module)
for func in list(bundle_info.keys()):
p_a_func = 'pippy_activity_%s' % func
if p_a_func in modobj.__dict__:
bundle_info[func] = modobj.__dict__[p_a_func]()
sys.path = oldpath
# Okay! We've done the hard part. Now let's build a bundle.
# Create a new temp dir in which to create the bundle.
app_temp = mkdtemp('.activity', 'Pippy') # Hope TMPDIR is set correctly!
bundle = get_bundle_path()
try:
copytree('%s/library' % bundle, '%s/library' % app_temp)
copy2('%s/activity.py' % bundle, '%s/activity.py' % app_temp)
# create activity.info file.
bundle_info['title'] = title
bundle_info['pytitle'] = pytitle
# put 'extra' files in place.
extra_files = {
'activity/activity.info': ACTIVITY_INFO_TEMPLATE % bundle_info,
'activity/activity-icon.svg': bundle_info['icon'],
'NEWS': bundle_info['news'],
}
extra_files.update(bundle_info['extra_files'])
for path, contents in list(extra_files.items()):
# safety first!
assert '..' not in path
dirname, filename = os.path.split(path)
dirname = os.path.join(app_temp, dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(os.path.join(dirname, filename), 'w') as f:
f.write(contents)
# Put script into $app_temp/pippy_app.py
copy2(sourcefile, '%s/pippy_app.py' % app_temp)
# Invoke bundle builder
olddir = os.getcwd()
oldargv = sys.argv
os.chdir(app_temp)
sys.argv = ['setup.py', 'dist_xo']
print('\r\nStarting bundlebuilder\r\n')
bundlebuilder.start()
sys.argv = oldargv
os.chdir(olddir)
# Move to destination directory.
src = '%s/dist/%s-%d.xo' % (app_temp, pytitle, bundle_info['version'])
dst = '%s/%s-%d.xo' % (options.dir, pytitle, bundle_info['version'])
if not os.path.exists(src):
print('Cannot find %s\r\n' % (src))
else:
copy2(src, dst)
finally:
rmtree(app_temp, ignore_errors=True)
print('Finally\r\n')
if __name__ == '__main__':
from gettext import gettext as _
if False: # Change this to True to test within Pippy
sys.argv = sys.argv + ['-d', '/tmp', 'Pippy',
'/home/olpc/pippy_app.py']
print(_('Working...'))
sys.stdout.flush()
main()
print(_('done!'))
sys.exit(0)
| gpl-3.0 | -1,851,500,253,610,730,000 | 37.860784 | 79 | 0.564677 | false |
infrascloudy/flask-base | app/models/user.py | 1 | 6385 | from flask import current_app
from flask_login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature, SignatureExpired
from werkzeug.security import check_password_hash, generate_password_hash
from app import db, login_manager
class Permission:
GENERAL = 0x01
ADMINISTER = 0xff
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
index = db.Column(db.String(64))
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.GENERAL, 'main', True),
'Administrator': (
Permission.ADMINISTER,
'admin',
False # grants all permissions
)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.index = roles[r][1]
role.default = roles[r][2]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role \'%s\'>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
confirmed = db.Column(db.Boolean, default=False)
first_name = db.Column(db.String(64), index=True)
last_name = db.Column(db.String(64), index=True)
email = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_EMAIL']:
self.role = Role.query.filter_by(
permissions=Permission.ADMINISTER).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_admin(self):
return self.can(Permission.ADMINISTER)
@property
def password(self):
raise AttributeError('`password` is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=604800):
"""Generate a confirmation token to email a new user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def generate_email_change_token(self, new_email, expiration=3600):
"""Generate an email change token to email an existing user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def generate_password_reset_token(self, expiration=3600):
"""
Generate a password reset change token to email to an existing user.
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def confirm_account(self, token):
"""Verify that the provided token is for this user's id."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def change_email(self, token):
"""Verify the new email for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
db.session.commit()
return True
def reset_password(self, token, new_password):
"""Verify the new password for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
@staticmethod
def generate_fake(count=100, **kwargs):
"""Generate a number of fake users for testing."""
from sqlalchemy.exc import IntegrityError
from random import seed, choice
from faker import Faker
fake = Faker()
roles = Role.query.all()
seed()
for i in range(count):
u = User(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.email(),
password=fake.password(),
confirmed=True,
role=choice(roles),
**kwargs)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __repr__(self):
return '<User \'%s\'>' % self.full_name()
class AnonymousUser(AnonymousUserMixin):
def can(self, _):
return False
def is_admin(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| mit | 1,365,562,869,490,029,800 | 31.576531 | 76 | 0.592482 | false |
ME-ICA/me-ica | meica.libs/mdp/graph/graph.py | 1 | 13012 | # inspired by some code by Nathan Denny (1999)
# see http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html
try:
# use reduce against BDFL's will even on python > 2.6
from functools import reduce
except ImportError:
pass
class GraphException(Exception):
"""Base class for exception in the graph package."""
pass
class GraphTopologicalException(GraphException):
"""Exception thrown during a topological sort if the graph is cyclical."""
pass
def is_sequence(x):
return isinstance(x, (list, tuple))
def recursive_map(func, seq):
"""Apply a function recursively on a sequence and all subsequences."""
def _func(x):
if is_sequence(x):
return recursive_map(func, x)
else:
return func(x)
return map(_func, seq)
def recursive_reduce(func, seq, *argv):
"""Apply reduce(func, seq) recursively to a sequence and all its
subsequences."""
def _func(x, y):
if is_sequence(y):
return func(x, recursive_reduce(func, y))
else:
return func(x, y)
return reduce(_func, seq, *argv)
class GraphNode(object):
"""Represent a graph node and all information attached to it."""
def __init__(self, data=None):
self.data = data
# edges in
self.ein = []
# edges out
self.eout = []
def add_edge_in(self, edge):
self.ein.append(edge)
def add_edge_out(self, edge):
self.eout.append(edge)
def remove_edge_in(self, edge):
self.ein.remove(edge)
def remove_edge_out(self, edge):
self.eout.remove(edge)
def get_edges_in(self, from_ = None):
"""Return a copy of the list of the entering edges. If from_
is specified, return only the nodes coming from that node."""
inedges = self.ein[:]
if from_:
inedges = [edge for edge in inedges if edge.head == from_]
return inedges
def get_edges_out(self, to_ = None):
"""Return a copy of the list of the outgoing edges. If to_
is specified, return only the nodes going to that node."""
outedges = self.eout[:]
if to_:
outedges = [edge for edge in outedges if edge.tail == to_]
return outedges
def get_edges(self, neighbor = None):
"""Return a copy of all edges. If neighbor is specified, return
only the edges connected to that node."""
return ( self.get_edges_in(from_=neighbor) +
self.get_edges_out(to_=neighbor) )
def in_degree(self):
"""Return the number of entering edges."""
return len(self.ein)
def out_degree(self):
"""Return the number of outgoing edges."""
return len(self.eout)
def degree(self):
"""Return the number of edges."""
return self.in_degree()+self.out_degree()
def in_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_head(), self.ein)
def out_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_tail(), self.eout)
def neighbors(self):
return self.in_neighbors() + self.out_neighbors()
class GraphEdge(object):
"""Represent a graph edge and all information attached to it."""
def __init__(self, head, tail, data=None):
# head node
self.head = head
# neighbors out
self.tail = tail
# arbitrary data slot
self.data = data
def get_ends(self):
"""Return the tuple (head_id, tail_id)."""
return (self.head, self.tail)
def get_tail(self):
return self.tail
def get_head(self):
return self.head
class Graph(object):
"""Represent a directed graph."""
def __init__(self):
# list of nodes
self.nodes = []
# list of edges
self.edges = []
# node functions
def add_node(self, data=None):
node = GraphNode(data=data)
self.nodes.append(node)
return node
def remove_node(self, node):
# the node is not in this graph
if node not in self.nodes:
errstr = 'This node is not part of the graph (%s)' % node
raise GraphException(errstr)
# remove all edges containing this node
for edge in node.get_edges():
self.remove_edge(edge)
# remove the node
self.nodes.remove(node)
# edge functions
def add_edge(self, head, tail, data=None):
"""Add an edge going from head to tail.
head : head node
tail : tail node
"""
# create edge
edge = GraphEdge(head, tail, data=data)
# add edge to head and tail node
head.add_edge_out(edge)
tail.add_edge_in(edge)
# add to the edges dictionary
self.edges.append(edge)
return edge
def remove_edge(self, edge):
head, tail = edge.get_ends()
# remove from head
head.remove_edge_out(edge)
# remove from tail
tail.remove_edge_in(edge)
# remove the edge
self.edges.remove(edge)
### populate functions
def add_nodes(self, data):
"""Add many nodes at once.
data -- number of nodes to add or sequence of data values, one for
each new node"""
if not is_sequence(data):
data = [None]*data
return map(self.add_node, data)
def add_tree(self, tree):
"""Add a tree to the graph.
The tree is specified with a nested list of tuple, in a LISP-like
notation. The values specified in the list become the values of
the single nodes.
Return an equivalent nested list with the nodes instead of the values.
Example:
>>> a=b=c=d=e=None
>>> g.add_tree( (a, b, (c, d ,e)) )
corresponds to this tree structure, with all node values set to None:
a
/ \
b c
/ \
d e
"""
def _add_edge(root, son):
self.add_edge(root, son)
return root
nodes = recursive_map(self.add_node, tree)
recursive_reduce(_add_edge, nodes)
return nodes
def add_full_connectivity(self, from_nodes, to_nodes):
"""Add full connectivity from a group of nodes to another one.
Return a list of lists of edges, one for each node in 'from_nodes'.
Example: create a two-layer graph with full connectivity.
>>> g = Graph()
>>> layer1 = g.add_nodes(10)
>>> layer2 = g.add_nodes(5)
>>> g.add_full_connectivity(layer1, layer2)
"""
edges = []
for from_ in from_nodes:
edges.append(map(lambda x: self.add_edge(from_, x), to_nodes))
return edges
###### graph algorithms
def topological_sort(self):
"""Perform a topological sort of the nodes. If the graph has a cycle,
throw a GraphTopologicalException with the list of successfully
ordered nodes."""
# topologically sorted list of the nodes (result)
topological_list = []
# queue (fifo list) of the nodes with in_degree 0
topological_queue = []
# {node: in_degree} for the remaining nodes (those with in_degree>0)
remaining_indegree = {}
# init queues and lists
for node in self.nodes:
indegree = node.in_degree()
if indegree == 0:
topological_queue.append(node)
else:
remaining_indegree[node] = indegree
# remove nodes with in_degree 0 and decrease the in_degree of their sons
while len(topological_queue):
# remove the first node with degree 0
node = topological_queue.pop(0)
topological_list.append(node)
# decrease the in_degree of the sons
for son in node.out_neighbors():
remaining_indegree[son] -= 1
if remaining_indegree[son] == 0:
topological_queue.append(son)
# if not all nodes were covered, the graph must have a cycle
# raise a GraphTopographicalException
if len(topological_list)!=len(self.nodes):
raise GraphTopologicalException(topological_list)
return topological_list
### Depth-First sort
def _dfs(self, neighbors_fct, root, visit_fct=None):
# core depth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal dfs, reverse dfs, or
# dfs on the equivalent undirected graph, respectively
# result list containing the nodes in Depth-First order
dfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# stack (lifo) list
dfs_stack = []
dfs_stack.append(root)
while len(dfs_stack):
# consider the next node on the stack
node = dfs_stack.pop()
dfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the stack (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
dfs_stack.append(son)
return dfs_list
def dfs(self, root, visit_fct=None):
"""Return a list of nodes in some Depth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
The returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root.
"""
neighbors_fct = lambda node: node.out_neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_dfs(self, root, visit_fct=None):
"""Perform Depth First sort.
This function is identical to dfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
### Connected components
def connected_components(self):
"""Return a list of lists containing the nodes of all connected
components of the graph."""
visited = {}
def visit_fct(node, visited=visited):
visited[node] = None
components = []
nodes = self.nodes
for node in nodes:
if node in visited:
continue
components.append(self.undirected_dfs(node, visit_fct))
return components
def is_weakly_connected(self):
"""Return True if the graph is weakly connected."""
return len(self.undirected_dfs(self.nodes[0]))==len(self.nodes)
### Breadth-First Sort
# BFS and DFS could be generalized to one function. I leave them
# distinct for clarity.
def _bfs(self, neighbors_fct, root, visit_fct=None):
# core breadth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal bfs, reverse bfs, or
# bfs on the equivalent undirected graph, respectively
# result list containing the nodes in Breadth-First order
bfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# queue (fifo) list
bfs_queue = []
bfs_queue.append(root)
while len(bfs_queue):
# consider the next node in the queue
node = bfs_queue.pop(0)
bfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the queue (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
bfs_queue.append(son)
return bfs_list
def bfs(self, root, visit_fct=None):
"""Return a list of nodes in some Breadth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
Note the returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root."""
neighbors_fct = lambda node: node.out_neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_bfs(self, root, visit_fct=None):
"""Perform Breadth First sort.
This function is identical to bfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
| lgpl-2.1 | -8,937,105,326,309,814,000 | 31.448878 | 80 | 0.587919 | false |
caffeinehit/yell | yell/backends/celery.py | 1 | 2316 | from __future__ import absolute_import
from celery.task import Task
from yell import Notification, notify, registry
class CeleryNotificationTask(Task):
""" Dispatch and run the notification. """
def run(self, name=None, backend=None, *args, **kwargs):
"""
The Celery task.
Delivers the notification via all backends returned by :param:`backend`.
"""
assert name is not None, "No 'name' specified to notify"
assert backend is not None, "No 'backend' specified to notify with"
backends = backend().get_backends(*args, **kwargs)
notify(name, backends=backends, *args, **kwargs)
class CeleryNotification(Notification):
"""
Delivers notifications through Celery.
:example:
::
from yell import notify, Notification
class EmailNotification(Notification):
name = 'async'
def notify(self, *args, **kwargs):
# Deliver email
class DBNotification(Notification):
name = 'async'
def notify(self, *args, **kwargs):
# Save to database
class AsyncNotification(CeleryNotification):
name = 'async'
notify('async', backends = [AsyncNotification],
text = "This notification is routed through Celery before being sent and saved")
In the above example when calling :attr:`yell.notify` will invoke ``EmailNotification`` and
``DBNotification`` once the task was delivered through Celery.
"""
name = None
"""
The name of this notification. Override in subclasses.
"""
def get_backends(self, *args, **kwargs):
"""
Return all backends the task should use to deliver notifications.
By default all backends with the same :attr:`name` except for subclasses
of :class:`CeleryNotifications` will be used.
"""
return filter(lambda cls: not issubclass(cls, self.__class__), registry.notifications[self.name])
def notify(self, *args, **kwargs):
"""
Dispatches the notification to Celery
"""
return CeleryNotificationTask.delay(name=self.name, backend=self.__class__, *args, **kwargs)
| mit | -3,298,192,501,619,146,000 | 32.565217 | 105 | 0.603195 | false |
CiNC0/Cartier | cartier-python-resign-linux/tests/test_versioning.py | 1 | 1194 | #!/usr/bin/env python
import os.path
import importlib
import unittest
tests_dir = os.path.abspath(os.path.dirname(__file__))
package_name = tests_dir.split(os.path.sep)[-2].replace('-', '_')
package = importlib.import_module(package_name)
class VersioningTestCase(unittest.TestCase):
def assert_proper_attribute(self, attribute):
try:
assert getattr(package, attribute), (
"{} improperly set".format(attribute))
except AttributeError:
assert False, "missing {}".format(attribute)
def test_version_attribute(self):
self.assert_proper_attribute("__version__")
# test major, minor, and patch are numbers
version_split = package.__version__.split(".")[:3]
assert version_split, "__version__ is not set"
for n in version_split:
try:
int(n)
except ValueError:
assert False, "'{}' is not an integer".format(n)
def test_commit_attribute(self):
self.assert_proper_attribute("__commit__")
def test_build_attribute(self):
self.assert_proper_attribute("__build__")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,473,486,557,290,887,000 | 28.85 | 65 | 0.60804 | false |
rgerkin/pyNeuroML | pyneuroml/tune/NeuroMLSimulation.py | 1 | 5357 | '''
A class for running a single instance of a NeuroML model by generating a
LEMS file and using pyNeuroML to run in a chosen simulator
'''
import sys
import time
from pyneuroml import pynml
from pyneuroml.lems import generate_lems_file_for_neuroml
try:
import pyelectro # Not used here, just for checking installation
except:
print('>> Note: pyelectro from https://github.com/pgleeson/pyelectro is required!')
exit()
try:
import neurotune # Not used here, just for checking installation
except:
print('>> Note: neurotune from https://github.com/pgleeson/neurotune is required!')
exit()
class NeuroMLSimulation(object):
def __init__(self,
reference,
neuroml_file,
target,
sim_time=1000,
dt=0.05,
simulator='jNeuroML',
generate_dir = './',
cleanup = True,
nml_doc = None):
self.sim_time = sim_time
self.dt = dt
self.simulator = simulator
self.generate_dir = generate_dir if generate_dir.endswith('/') else generate_dir+'/'
self.reference = reference
self.target = target
self.neuroml_file = neuroml_file
self.nml_doc = nml_doc
self.cleanup = cleanup
self.already_run = False
def show(self):
"""
Plot the result of the simulation once it's been intialized
"""
from matplotlib import pyplot as plt
if self.already_run:
for ref in self.volts.keys():
plt.plot(self.t, self.volts[ref], label=ref)
plt.title("Simulation voltage vs time")
plt.legend()
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
else:
pynml.print_comment("First you have to 'go()' the simulation.", True)
plt.show()
def go(self):
lems_file_name = 'LEMS_%s.xml'%(self.reference)
generate_lems_file_for_neuroml(self.reference,
self.neuroml_file,
self.target,
self.sim_time,
self.dt,
lems_file_name = lems_file_name,
target_dir = self.generate_dir,
nml_doc = self.nml_doc)
pynml.print_comment_v("Running a simulation of %s ms with timestep %s ms: %s"%(self.sim_time, self.dt, lems_file_name))
self.already_run = True
start = time.time()
if self.simulator == 'jNeuroML':
results = pynml.run_lems_with_jneuroml(lems_file_name,
nogui=True,
load_saved_data=True,
plot=False,
exec_in_dir = self.generate_dir,
verbose=False,
cleanup=self.cleanup)
elif self.simulator == 'jNeuroML_NEURON':
results = pynml.run_lems_with_jneuroml_neuron(lems_file_name,
nogui=True,
load_saved_data=True,
plot=False,
exec_in_dir = self.generate_dir,
verbose=False,
cleanup=self.cleanup)
else:
pynml.print_comment_v('Unsupported simulator: %s'%self.simulator)
exit()
secs = time.time()-start
pynml.print_comment_v("Ran simulation in %s in %f seconds (%f mins)\n\n"%(self.simulator, secs, secs/60.0))
self.t = [t*1000 for t in results['t']]
self.volts = {}
for key in results.keys():
if key != 't':
self.volts[key] = [v*1000 for v in results[key]]
if __name__ == '__main__':
sim_time = 700
dt = 0.05
if len(sys.argv) == 2 and sys.argv[1] == '-net':
sim = NeuroMLSimulation('TestNet',
'../../examples/test_data/simplenet.nml',
'simplenet',
sim_time,
dt,
'jNeuroML',
'temp/')
sim.go()
sim.show()
else:
sim = NeuroMLSimulation('TestHH',
'../../examples/test_data/HHCellNetwork.net.nml',
'HHCellNetwork',
sim_time,
dt,
'jNeuroML',
'temp')
sim.go()
sim.show()
| lgpl-3.0 | -7,017,770,894,526,758,000 | 31.271084 | 127 | 0.417958 | false |
Azure/azure-sdk-for-python | sdk/powerbiembedded/azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/operation_py3.py | 1 | 1163 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""Operation.
:param name: The name of the operation being performed on this particular
object. This name should match the action name that appears in RBAC / the
event service.
:type name: str
:param display:
:type display: ~azure.mgmt.powerbiembedded.models.Display
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'Display'},
}
def __init__(self, *, name: str=None, display=None, **kwargs) -> None:
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
| mit | 5,197,764,973,507,058,000 | 33.205882 | 78 | 0.574377 | false |
Anderson0026/mapproxy | mapproxy/script/conf/app.py | 1 | 6606 | # -:- encoding: utf-8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import sys
import os
import optparse
import logging
import textwrap
import datetime
import xml.etree.ElementTree
import yaml
from contextlib import contextmanager
from cStringIO import StringIO
from .sources import sources
from .layers import layers
from .caches import caches
from .seeds import seeds
from .utils import update_config, MapProxyYAMLDumper, download_capabilities
from mapproxy.config.loader import load_configuration
from mapproxy.util.ext.wmsparse import parse_capabilities
def setup_logging(level=logging.INFO):
mapproxy_log = logging.getLogger('mapproxy')
mapproxy_log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(asctime)s] %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
mapproxy_log.addHandler(ch)
def write_header(f, capabilities):
print >>f, '# MapProxy configuration automatically generated from:'
print >>f, '# %s' % capabilities
print >>f, '#'
print >>f, '# NOTE: The generated configuration can be highly inefficient,'
print >>f, '# especially when multiple layers and caches are requested at once.'
print >>f, '# Make sure you understand the generated configuration!'
print >>f, '#'
print >>f, '# Created on %s with:' % datetime.datetime.now()
print >>f, ' \\\n'.join(textwrap.wrap(' '.join(sys.argv), initial_indent='# ', subsequent_indent='# '))
print >>f, ''
@contextmanager
def file_or_stdout(name):
if name == '-':
yield sys.stdout
else:
with open(name, 'wb') as f:
yield f
def config_command(args):
parser = optparse.OptionParser("usage: %prog autoconfig [options]")
parser.add_option('--capabilities',
help="URL or filename of WMS 1.1.1/1.3.0 capabilities document")
parser.add_option('--output', help="filename for created MapProxy config [default: -]", default="-")
parser.add_option('--output-seed', help="filename for created seeding config")
parser.add_option('--base', help='base config to include in created MapProxy config')
parser.add_option('--overwrite',
help='YAML file with overwrites for the created MapProxy config')
parser.add_option('--overwrite-seed',
help='YAML file with overwrites for the created seeding config')
parser.add_option('--force', default=False, action='store_true',
help="overwrite existing files")
options, args = parser.parse_args(args)
if not options.capabilities:
parser.print_help()
print >>sys.stderr, "\nERROR: --capabilities required"
return 2
if not options.output and not options.output_seed:
parser.print_help()
print >>sys.stderr, "\nERROR: --output and/or --output-seed required"
return 2
if not options.force:
if options.output and options.output != '-' and os.path.exists(options.output):
print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output
return 2
if options.output_seed and options.output_seed != '-' and os.path.exists(options.output_seed):
print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output_seed
return 2
log = logging.getLogger('mapproxy_conf_cmd')
log.addHandler(logging.StreamHandler())
setup_logging(logging.WARNING)
srs_grids = {}
if options.base:
base = load_configuration(options.base)
for name, grid_conf in base.grids.iteritems():
if name.startswith('GLOBAL_'):
continue
srs_grids[grid_conf.tile_grid().srs.srs_code] = name
cap_doc = options.capabilities
if cap_doc.startswith(('http://', 'https://')):
cap_doc = download_capabilities(options.capabilities).read()
else:
cap_doc = open(cap_doc, 'rb').read()
try:
cap = parse_capabilities(StringIO(cap_doc))
except (xml.etree.ElementTree.ParseError, ValueError), ex:
print >>sys.stderr, ex
print >>sys.stderr, cap_doc[:1000] + ('...' if len(cap_doc) > 1000 else '')
return 3
overwrite = None
if options.overwrite:
with open(options.overwrite, 'rb') as f:
overwrite = yaml.load(f)
overwrite_seed = None
if options.overwrite_seed:
with open(options.overwrite_seed, 'rb') as f:
overwrite_seed = yaml.load(f)
conf = {}
if options.base:
conf['base'] = os.path.abspath(options.base)
conf['services'] = {'wms': {'md': {'title': cap.metadata()['title']}}}
if overwrite:
conf['services'] = update_config(conf['services'], overwrite.pop('service', {}))
conf['sources'] = sources(cap)
if overwrite:
conf['sources'] = update_config(conf['sources'], overwrite.pop('sources', {}))
conf['caches'] = caches(cap, conf['sources'], srs_grids=srs_grids)
if overwrite:
conf['caches'] = update_config(conf['caches'], overwrite.pop('caches', {}))
conf['layers'] = layers(cap, conf['caches'])
if overwrite:
conf['layers'] = update_config(conf['layers'], overwrite.pop('layers', {}))
if overwrite:
conf = update_config(conf, overwrite)
seed_conf = {}
seed_conf['seeds'], seed_conf['cleanups'] = seeds(cap, conf['caches'])
if overwrite_seed:
seed_conf = update_config(seed_conf, overwrite_seed)
if options.output:
with file_or_stdout(options.output) as f:
write_header(f, options.capabilities)
yaml.dump(conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper)
if options.output_seed:
with file_or_stdout(options.output_seed) as f:
write_header(f, options.capabilities)
yaml.dump(seed_conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper)
return 0 | apache-2.0 | -2,084,402,879,449,501,700 | 34.713514 | 110 | 0.65607 | false |
stefan-jonasson/home-assistant | homeassistant/components/telegram_bot/__init__.py | 2 | 26538 | """
Component to send and receive Telegram messages.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/telegram_bot/
"""
import asyncio
import io
from functools import partial
import logging
import os
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA, ATTR_MESSAGE, ATTR_TITLE)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
ATTR_COMMAND, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_API_KEY,
CONF_PLATFORM, CONF_TIMEOUT, HTTP_DIGEST_AUTHENTICATION)
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import TemplateError
from homeassistant.setup import async_prepare_setup_platform
REQUIREMENTS = ['python-telegram-bot==8.1.1']
_LOGGER = logging.getLogger(__name__)
ATTR_ARGS = 'args'
ATTR_AUTHENTICATION = 'authentication'
ATTR_CALLBACK_QUERY = 'callback_query'
ATTR_CALLBACK_QUERY_ID = 'callback_query_id'
ATTR_CAPTION = 'caption'
ATTR_CHAT_ID = 'chat_id'
ATTR_CHAT_INSTANCE = 'chat_instance'
ATTR_DISABLE_NOTIF = 'disable_notification'
ATTR_DISABLE_WEB_PREV = 'disable_web_page_preview'
ATTR_EDITED_MSG = 'edited_message'
ATTR_FILE = 'file'
ATTR_FROM_FIRST = 'from_first'
ATTR_FROM_LAST = 'from_last'
ATTR_KEYBOARD = 'keyboard'
ATTR_KEYBOARD_INLINE = 'inline_keyboard'
ATTR_MESSAGEID = 'message_id'
ATTR_MSG = 'message'
ATTR_MSGID = 'id'
ATTR_PARSER = 'parse_mode'
ATTR_PASSWORD = 'password'
ATTR_REPLY_TO_MSGID = 'reply_to_message_id'
ATTR_REPLYMARKUP = 'reply_markup'
ATTR_SHOW_ALERT = 'show_alert'
ATTR_TARGET = 'target'
ATTR_TEXT = 'text'
ATTR_URL = 'url'
ATTR_USER_ID = 'user_id'
ATTR_USERNAME = 'username'
CONF_ALLOWED_CHAT_IDS = 'allowed_chat_ids'
CONF_PROXY_URL = 'proxy_url'
CONF_PROXY_PARAMS = 'proxy_params'
DOMAIN = 'telegram_bot'
SERVICE_SEND_MESSAGE = 'send_message'
SERVICE_SEND_PHOTO = 'send_photo'
SERVICE_SEND_DOCUMENT = 'send_document'
SERVICE_SEND_LOCATION = 'send_location'
SERVICE_EDIT_MESSAGE = 'edit_message'
SERVICE_EDIT_CAPTION = 'edit_caption'
SERVICE_EDIT_REPLYMARKUP = 'edit_replymarkup'
SERVICE_ANSWER_CALLBACK_QUERY = 'answer_callback_query'
SERVICE_DELETE_MESSAGE = 'delete_message'
EVENT_TELEGRAM_CALLBACK = 'telegram_callback'
EVENT_TELEGRAM_COMMAND = 'telegram_command'
EVENT_TELEGRAM_TEXT = 'telegram_text'
PARSER_HTML = 'html'
PARSER_MD = 'markdown'
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Required(CONF_PLATFORM): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ALLOWED_CHAT_IDS):
vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER, default=PARSER_MD): cv.string,
vol.Optional(CONF_PROXY_URL): cv.string,
vol.Optional(CONF_PROXY_PARAMS): dict,
})
BASE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER): cv.string,
vol.Optional(ATTR_DISABLE_NOTIF): cv.boolean,
vol.Optional(ATTR_DISABLE_WEB_PREV): cv.boolean,
vol.Optional(ATTR_KEYBOARD): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_SEND_MESSAGE = BASE_SERVICE_SCHEMA.extend({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
})
SERVICE_SCHEMA_SEND_FILE = BASE_SERVICE_SCHEMA.extend({
vol.Optional(ATTR_URL): cv.template,
vol.Optional(ATTR_FILE): cv.template,
vol.Optional(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_USERNAME): cv.string,
vol.Optional(ATTR_PASSWORD): cv.string,
vol.Optional(ATTR_AUTHENTICATION): cv.string,
})
SERVICE_SCHEMA_SEND_LOCATION = BASE_SERVICE_SCHEMA.extend({
vol.Required(ATTR_LONGITUDE): cv.template,
vol.Required(ATTR_LATITUDE): cv.template,
})
SERVICE_SCHEMA_EDIT_MESSAGE = SERVICE_SCHEMA_SEND_MESSAGE.extend({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
})
SERVICE_SCHEMA_EDIT_CAPTION = vol.Schema({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_EDIT_REPLYMARKUP = vol.Schema({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Required(ATTR_CALLBACK_QUERY_ID): vol.Coerce(int),
vol.Optional(ATTR_SHOW_ALERT): cv.boolean,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_DELETE_MESSAGE = vol.Schema({
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
}, extra=vol.ALLOW_EXTRA)
SERVICE_MAP = {
SERVICE_SEND_MESSAGE: SERVICE_SCHEMA_SEND_MESSAGE,
SERVICE_SEND_PHOTO: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_DOCUMENT: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_LOCATION: SERVICE_SCHEMA_SEND_LOCATION,
SERVICE_EDIT_MESSAGE: SERVICE_SCHEMA_EDIT_MESSAGE,
SERVICE_EDIT_CAPTION: SERVICE_SCHEMA_EDIT_CAPTION,
SERVICE_EDIT_REPLYMARKUP: SERVICE_SCHEMA_EDIT_REPLYMARKUP,
SERVICE_ANSWER_CALLBACK_QUERY: SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY,
SERVICE_DELETE_MESSAGE: SERVICE_SCHEMA_DELETE_MESSAGE,
}
def load_data(hass, url=None, filepath=None, username=None, password=None,
authentication=None, num_retries=5):
"""Load photo/document into ByteIO/File container from a source."""
try:
if url is not None:
# Load photo from URL
params = {"timeout": 15}
if username is not None and password is not None:
if authentication == HTTP_DIGEST_AUTHENTICATION:
params["auth"] = HTTPDigestAuth(username, password)
else:
params["auth"] = HTTPBasicAuth(username, password)
retry_num = 0
while retry_num < num_retries:
req = requests.get(url, **params)
if not req.ok:
_LOGGER.warning("Status code %s (retry #%s) loading %s.",
req.status_code, retry_num + 1, url)
else:
data = io.BytesIO(req.content)
if data.read():
data.seek(0)
data.name = url
return data
_LOGGER.warning("Empty data (retry #%s) in %s).",
retry_num + 1, url)
retry_num += 1
_LOGGER.warning("Can't load photo in %s after %s retries.",
url, retry_num)
elif filepath is not None:
if hass.config.is_allowed_path(filepath):
return open(filepath, "rb")
_LOGGER.warning("'%s' are not secure to load data from!", filepath)
else:
_LOGGER.warning("Can't load photo. No photo found in params!")
except (OSError, TypeError) as error:
_LOGGER.error("Can't load photo into ByteIO: %s", error)
return None
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the Telegram bot component."""
if not config[DOMAIN]:
return False
p_config = config[DOMAIN][0]
descriptions = yield from hass.async_add_job(
load_yaml_config_file,
os.path.join(os.path.dirname(__file__), 'services.yaml'))
p_type = p_config.get(CONF_PLATFORM)
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
try:
receiver_service = yield from \
platform.async_setup_platform(hass, p_config)
if receiver_service is False:
_LOGGER.error(
"Failed to initialize Telegram bot %s", p_type)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return False
notify_service = TelegramNotificationService(
hass,
p_config.get(CONF_API_KEY),
p_config.get(CONF_ALLOWED_CHAT_IDS),
p_config.get(ATTR_PARSER),
p_config.get(CONF_PROXY_URL),
p_config.get(CONF_PROXY_PARAMS)
)
@asyncio.coroutine
def async_send_telegram_message(service):
"""Handle sending Telegram Bot message service calls."""
def _render_template_attr(data, attribute):
attribute_templ = data.get(attribute)
if attribute_templ:
if any([isinstance(attribute_templ, vtype)
for vtype in [float, int, str]]):
data[attribute] = attribute_templ
else:
attribute_templ.hass = hass
try:
data[attribute] = attribute_templ.async_render()
except TemplateError as exc:
_LOGGER.error(
"TemplateError in %s: %s -> %s",
attribute, attribute_templ.template, exc)
data[attribute] = attribute_templ.template
msgtype = service.service
kwargs = dict(service.data)
for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE,
ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]:
_render_template_attr(kwargs, attribute)
_LOGGER.debug("New telegram message %s: %s", msgtype, kwargs)
if msgtype == SERVICE_SEND_MESSAGE:
yield from hass.async_add_job(
partial(notify_service.send_message, **kwargs))
elif msgtype == SERVICE_SEND_PHOTO:
yield from hass.async_add_job(
partial(notify_service.send_file, True, **kwargs))
elif msgtype == SERVICE_SEND_DOCUMENT:
yield from hass.async_add_job(
partial(notify_service.send_file, False, **kwargs))
elif msgtype == SERVICE_SEND_LOCATION:
yield from hass.async_add_job(
partial(notify_service.send_location, **kwargs))
elif msgtype == SERVICE_ANSWER_CALLBACK_QUERY:
yield from hass.async_add_job(
partial(notify_service.answer_callback_query, **kwargs))
elif msgtype == SERVICE_DELETE_MESSAGE:
yield from hass.async_add_job(
partial(notify_service.delete_message, **kwargs))
else:
yield from hass.async_add_job(
partial(notify_service.edit_message, msgtype, **kwargs))
# Register notification services
for service_notif, schema in SERVICE_MAP.items():
hass.services.async_register(
DOMAIN, service_notif, async_send_telegram_message,
descriptions.get(service_notif), schema=schema)
return True
class TelegramNotificationService:
"""Implement the notification services for the Telegram Bot domain."""
def __init__(self, hass, api_key, allowed_chat_ids, parser,
proxy_url=None, proxy_params=None):
"""Initialize the service."""
from telegram import Bot
from telegram.parsemode import ParseMode
from telegram.utils.request import Request
self.allowed_chat_ids = allowed_chat_ids
self._default_user = self.allowed_chat_ids[0]
self._last_message_id = {user: None for user in self.allowed_chat_ids}
self._parsers = {PARSER_HTML: ParseMode.HTML,
PARSER_MD: ParseMode.MARKDOWN}
self._parse_mode = self._parsers.get(parser)
request = None
if proxy_url is not None:
request = Request(proxy_url=proxy_url,
urllib3_proxy_kwargs=proxy_params)
self.bot = Bot(token=api_key, request=request)
self.hass = hass
def _get_msg_ids(self, msg_data, chat_id):
"""Get the message id to edit.
This can be one of (message_id, inline_message_id) from a msg dict,
returning a tuple.
**You can use 'last' as message_id** to edit
the last sended message in the chat_id.
"""
message_id = inline_message_id = None
if ATTR_MESSAGEID in msg_data:
message_id = msg_data[ATTR_MESSAGEID]
if (isinstance(message_id, str) and (message_id == 'last') and
(self._last_message_id[chat_id] is not None)):
message_id = self._last_message_id[chat_id]
else:
inline_message_id = msg_data['inline_message_id']
return message_id, inline_message_id
def _get_target_chat_ids(self, target):
"""Validate chat_id targets or return default target (first).
:param target: optional list of integers ([12234, -12345])
:return list of chat_id targets (integers)
"""
if target is not None:
if isinstance(target, int):
target = [target]
chat_ids = [t for t in target if t in self.allowed_chat_ids]
if chat_ids:
return chat_ids
_LOGGER.warning("Unallowed targets: %s, using default: %s",
target, self._default_user)
return [self._default_user]
def _get_msg_kwargs(self, data):
"""Get parameters in message data kwargs."""
def _make_row_inline_keyboard(row_keyboard):
"""Make a list of InlineKeyboardButtons.
It can accept:
- a list of tuples like:
`[(text_b1, data_callback_b1),
(text_b2, data_callback_b2), ...]
- a string like: `/cmd1, /cmd2, /cmd3`
- or a string like: `text_b1:/cmd1, text_b2:/cmd2`
"""
from telegram import InlineKeyboardButton
buttons = []
if isinstance(row_keyboard, str):
for key in row_keyboard.split(","):
if ':/' in key:
# commands like: 'Label:/cmd' become ('Label', '/cmd')
label = key.split(':/')[0]
command = key[len(label) + 1:]
buttons.append(
InlineKeyboardButton(label, callback_data=command))
else:
# commands like: '/cmd' become ('CMD', '/cmd')
label = key.strip()[1:].upper()
buttons.append(
InlineKeyboardButton(label, callback_data=key))
elif isinstance(row_keyboard, list):
for entry in row_keyboard:
text_btn, data_btn = entry
buttons.append(
InlineKeyboardButton(text_btn, callback_data=data_btn))
else:
raise ValueError(str(row_keyboard))
return buttons
# Defaults
params = {
ATTR_PARSER: self._parse_mode,
ATTR_DISABLE_NOTIF: False,
ATTR_DISABLE_WEB_PREV: None,
ATTR_REPLY_TO_MSGID: None,
ATTR_REPLYMARKUP: None,
CONF_TIMEOUT: None
}
if data is not None:
if ATTR_PARSER in data:
params[ATTR_PARSER] = self._parsers.get(
data[ATTR_PARSER], self._parse_mode)
if CONF_TIMEOUT in data:
params[CONF_TIMEOUT] = data[CONF_TIMEOUT]
if ATTR_DISABLE_NOTIF in data:
params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF]
if ATTR_DISABLE_WEB_PREV in data:
params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV]
if ATTR_REPLY_TO_MSGID in data:
params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID]
# Keyboards:
if ATTR_KEYBOARD in data:
from telegram import ReplyKeyboardMarkup
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup(
[[key.strip() for key in row.split(",")] for row in keys])
elif ATTR_KEYBOARD_INLINE in data:
from telegram import InlineKeyboardMarkup
keys = data.get(ATTR_KEYBOARD_INLINE)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup(
[_make_row_inline_keyboard(row) for row in keys])
return params
def _send_msg(self, func_send, msg_error, *args_msg, **kwargs_msg):
"""Send one message."""
from telegram.error import TelegramError
try:
out = func_send(*args_msg, **kwargs_msg)
if not isinstance(out, bool) and hasattr(out, ATTR_MESSAGEID):
chat_id = out.chat_id
self._last_message_id[chat_id] = out[ATTR_MESSAGEID]
_LOGGER.debug("Last message ID: %s (from chat_id %s)",
self._last_message_id, chat_id)
elif not isinstance(out, bool):
_LOGGER.warning("Update last message: out_type:%s, out=%s",
type(out), out)
return out
except TelegramError as exc:
_LOGGER.error("%s: %s. Args: %s, kwargs: %s",
msg_error, exc, args_msg, kwargs_msg)
def send_message(self, message="", target=None, **kwargs):
"""Send a message to one or multiple pre-allowed chat IDs."""
title = kwargs.get(ATTR_TITLE)
text = '{}\n{}'.format(title, message) if title else message
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send message in chat ID %s with params: %s",
chat_id, params)
self._send_msg(self.bot.sendMessage,
"Error sending message",
chat_id, text, **params)
def delete_message(self, chat_id=None, **kwargs):
"""Delete a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, _ = self._get_msg_ids(kwargs, chat_id)
_LOGGER.debug("Delete message %s in chat ID %s", message_id, chat_id)
deleted = self._send_msg(self.bot.deleteMessage,
"Error deleting message",
chat_id, message_id)
# reduce message_id anyway:
if self._last_message_id[chat_id] is not None:
# change last msg_id for deque(n_msgs)?
self._last_message_id[chat_id] -= 1
return deleted
def edit_message(self, type_edit, chat_id=None, **kwargs):
"""Edit a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, inline_message_id = self._get_msg_ids(kwargs, chat_id)
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug("Edit message %s in chat ID %s with params: %s",
message_id or inline_message_id, chat_id, params)
if type_edit == SERVICE_EDIT_MESSAGE:
message = kwargs.get(ATTR_MESSAGE)
title = kwargs.get(ATTR_TITLE)
text = '{}\n{}'.format(title, message) if title else message
_LOGGER.debug("Editing message with ID %s.",
message_id or inline_message_id)
return self._send_msg(self.bot.editMessageText,
"Error editing text message",
text, chat_id=chat_id, message_id=message_id,
inline_message_id=inline_message_id,
**params)
elif type_edit == SERVICE_EDIT_CAPTION:
func_send = self.bot.editMessageCaption
params[ATTR_CAPTION] = kwargs.get(ATTR_CAPTION)
else:
func_send = self.bot.editMessageReplyMarkup
return self._send_msg(func_send,
"Error editing message attributes",
chat_id=chat_id, message_id=message_id,
inline_message_id=inline_message_id,
**params)
def answer_callback_query(self, message, callback_query_id,
show_alert=False, **kwargs):
"""Answer a callback originated with a press in an inline keyboard."""
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug("Answer callback query with callback ID %s: %s, "
"alert: %s.", callback_query_id, message, show_alert)
self._send_msg(self.bot.answerCallbackQuery,
"Error sending answer callback query",
callback_query_id,
text=message, show_alert=show_alert, **params)
def send_file(self, is_photo=True, target=None, **kwargs):
"""Send a photo or a document."""
params = self._get_msg_kwargs(kwargs)
caption = kwargs.get(ATTR_CAPTION)
func_send = self.bot.sendPhoto if is_photo else self.bot.sendDocument
file_content = load_data(
self.hass,
url=kwargs.get(ATTR_URL),
filepath=kwargs.get(ATTR_FILE),
username=kwargs.get(ATTR_USERNAME),
password=kwargs.get(ATTR_PASSWORD),
authentication=kwargs.get(ATTR_AUTHENTICATION),
)
if file_content:
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send file to chat ID %s. Caption: %s.",
chat_id, caption)
self._send_msg(func_send, "Error sending file",
chat_id, file_content,
caption=caption, **params)
file_content.seek(0)
else:
_LOGGER.error("Can't send file with kwargs: %s", kwargs)
def send_location(self, latitude, longitude, target=None, **kwargs):
"""Send a location."""
latitude = float(latitude)
longitude = float(longitude)
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send location %s/%s to chat ID %s.",
latitude, longitude, chat_id)
self._send_msg(self.bot.sendLocation,
"Error sending location",
chat_id=chat_id,
latitude=latitude, longitude=longitude, **params)
class BaseTelegramBotEntity:
"""The base class for the telegram bot."""
def __init__(self, hass, allowed_chat_ids):
"""Initialize the bot base class."""
self.allowed_chat_ids = allowed_chat_ids
self.hass = hass
def _get_message_data(self, msg_data):
"""Return boolean msg_data_is_ok and dict msg_data."""
if not msg_data:
return False, None
bad_fields = ('text' not in msg_data and
'data' not in msg_data and
'chat' not in msg_data)
if bad_fields or 'from' not in msg_data:
# Message is not correct.
_LOGGER.error("Incoming message does not have required data (%s)",
msg_data)
return False, None
if (msg_data['from'].get('id') not in self.allowed_chat_ids or
('chat' in msg_data and
msg_data['chat'].get('id') not in self.allowed_chat_ids)):
# Origin is not allowed.
_LOGGER.error("Incoming message is not allowed (%s)", msg_data)
return True, None
data = {
ATTR_USER_ID: msg_data['from']['id'],
ATTR_FROM_FIRST: msg_data['from']['first_name']
}
if 'last_name' in msg_data['from']:
data[ATTR_FROM_LAST] = msg_data['from']['last_name']
if 'chat' in msg_data:
data[ATTR_CHAT_ID] = msg_data['chat']['id']
elif ATTR_MESSAGE in msg_data and 'chat' in msg_data[ATTR_MESSAGE]:
data[ATTR_CHAT_ID] = msg_data[ATTR_MESSAGE]['chat']['id']
return True, data
def process_message(self, data):
"""Check for basic message rules and fire an event if message is ok."""
if ATTR_MSG in data or ATTR_EDITED_MSG in data:
event = EVENT_TELEGRAM_COMMAND
if ATTR_MSG in data:
data = data.get(ATTR_MSG)
else:
data = data.get(ATTR_EDITED_MSG)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
if 'text' in data:
if data['text'][0] == '/':
pieces = data['text'].split(' ')
event_data[ATTR_COMMAND] = pieces[0]
event_data[ATTR_ARGS] = pieces[1:]
else:
event_data[ATTR_TEXT] = data['text']
event = EVENT_TELEGRAM_TEXT
else:
_LOGGER.warning("Message without text data received: %s", data)
event_data[ATTR_TEXT] = str(data)
event = EVENT_TELEGRAM_TEXT
self.hass.bus.async_fire(event, event_data)
return True
elif ATTR_CALLBACK_QUERY in data:
event = EVENT_TELEGRAM_CALLBACK
data = data.get(ATTR_CALLBACK_QUERY)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
event_data[ATTR_DATA] = data[ATTR_DATA]
event_data[ATTR_MSG] = data[ATTR_MSG]
event_data[ATTR_CHAT_INSTANCE] = data[ATTR_CHAT_INSTANCE]
event_data[ATTR_MSGID] = data[ATTR_MSGID]
self.hass.bus.async_fire(event, event_data)
return True
else:
_LOGGER.warning("Message with unknown data received: %s", data)
return True
| mit | -1,572,939,203,941,558,300 | 40.272162 | 79 | 0.579094 | false |
SonarOpenCommunity/sonar-cxx | cxx-sensors/src/tools/clangsa_createrules.py | 1 | 6838 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SonarQube C++ Community Plugin (cxx plugin)
# Copyright (C) 2010-2021 SonarOpenCommunity
# http://github.com/SonarOpenCommunity/sonar-cxx
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
#
"""
Simple script to generate the rules xml file for SonarQube cxx plugin
from the Clang Static Analyzer checkers.
The clang compiler should be available in the PATH
or output of clang -cc1 -analyzer-checker-help
as input file.
"""
from xml.dom import minidom
import argparse
import re
import subprocess
import sys
import xml.etree.ElementTree as ET
def CDATA(text=None):
element = ET.Element('![CDATA[')
element.text = text
return element
ET._original_serialize_xml = ET._serialize_xml
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
if elem.tag == '![CDATA[':
write("<%s%s]]>" % (elem.tag, elem.text))
return
return ET._original_serialize_xml(
write, elem, qnames, namespaces, short_empty_elements, **kwargs)
ET._serialize_xml = ET._serialize['xml'] = _serialize_xml
def collect_checkers(clangsa_output):
"""
Parse clang static analyzer output.
Return the list of checkers and the description.
"""
checkers_data = {}
# Checker name and description in one line.
pattern = re.compile(r'^\s\s(?P<checker_name>\S*)\s*(?P<description>.*)')
checker_name = None
for line in clangsa_output.splitlines():
line = line.decode(encoding='UTF-8')
if re.match(r'^CHECKERS:', line) or line == '':
continue
elif checker_name and not re.match(r'^\s\s\S', line):
# Collect description for the checker name.
checkers_data[checker_name] = line.strip()
checker_name = None
elif re.match(r'^\s\s\S+$', line.rstrip()):
# Only checker name is in the line.
checker_name = line.strip()
else:
# Checker name and description is in one line.
match = pattern.match(line.rstrip())
if match:
current = match.groupdict()
checkers_data[current['checker_name']] = current['description']
# Filter out debug checkers.
non_debug = {k: v for k, v in checkers_data.items() if 'debug' not in k}
return non_debug
def main():
parser = argparse.ArgumentParser(
description="""Generate the rules xml file for cxx plugin
plugin from the Clang Static Analyzer checkers.
https://clang-analyzer.llvm.org/""",
usage='%(prog)s -o clangsa.xml')
parser.add_argument('-i', '--input', dest='input_file', action='store',
required=False,
help="""Input file to read rules.
If parameter does not exist
it tries to call clang.""")
parser.add_argument('-o', '--output', dest='output_file', action='store',
required=True,
help="""Output file to write the xml rules.
If the file already exists
it will be overwritten.""")
args = parser.parse_args()
clang_version = "clang version ???".encode('utf-8')
if args.input_file:
with open(args.input_file, 'r') as input:
checker_data = collect_checkers(input.read().encode('utf-8'))
else:
try:
clang_version = ['clang', '--version']
version_info = subprocess.run(clang_version,
stdout=subprocess.PIPE,
check=True).stdout
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
# Only the first line is interesting.
clang_version = version_info.splitlines()[0]
try:
clang_checkers = ['clang', '-cc1', '-analyzer-checker-help']
checkers_output = subprocess.run(clang_checkers,
stdout=subprocess.PIPE,
check=True).stdout
print("Collecting clang checkers ...", end='')
checker_data = collect_checkers(checkers_output)
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
if not checker_data:
print("No checkers could be processed.")
sys.exit(1)
print(" done.")
print("Generating rules xml ...", end='')
# build a tree structure
rules = ET.Element("rules")
comment = " C and C++ rules for Clang Static Analyzer. " \
"https://clang-analyzer.llvm.org/\n" + \
"Rules list was generated based on " + \
clang_version.decode("utf-8") + " "
rules.append(ET.Comment(comment))
for checker_name, description in checker_data.items():
rule = ET.SubElement(rules, "rule")
key = ET.SubElement(rule, "key")
name = ET.SubElement(rule, "name")
desc = ET.SubElement(rule, "description")
sev = ET.SubElement(rule, "severity")
c_type = ET.SubElement(rule, "type")
key.text = checker_name
name.text = checker_name
sev.text = "MAJOR"
c_type.text = "BUG"
if sev.text != 'INFO':
ET.SubElement(rule, 'remediationFunction').text = 'LINEAR'
ET.SubElement(rule, 'remediationFunctionGapMultiplier').text = '5min'
auto_tag = checker_name.split('.')[0]
tag = ET.SubElement(rule, "tag")
tag.text = auto_tag.lower()
cdata = CDATA('\n<p>' + description.strip() +
'\n</p>\n <h2>References</h2>'
' <p><a href="https://clang-analyzer.llvm.org/"'
' target="_blank">clang-analyzer.llvm.org</a></p> \n')
desc.append(cdata)
xmlstr = minidom.parseString(
ET.tostring(rules, method='xml')).toprettyxml(indent=" ")
print(" done.")
with open(args.output_file, 'w') as out:
out.write(xmlstr)
if __name__ == '__main__':
main()
| lgpl-3.0 | 6,750,779,244,084,423,000 | 33.0199 | 81 | 0.580579 | false |
pyfidelity/rest-seed | backend/backrest/tests/test_change_password.py | 1 | 1714 | from pytest import fixture, mark
from transaction import commit
@fixture(scope='module')
def url(testing):
return testing.route_url('password-change')
@mark.user('alice')
def test_change_password(browser, url, alice):
data = dict(password='foo!', current='alice')
browser.put_json(url, data)
assert alice.validate_password('foo!')
@mark.user('alice')
def test_change_password_twice(browser, url, alice):
data = dict(password='foo!', current='alice')
browser.put_json(url, data)
assert alice.validate_password('foo!')
commit()
data = dict(password='alice', current='foo!')
browser.put_json(url, data)
alice = alice.query.one() # refetch alice after `commit`
assert alice.validate_password('alice')
@mark.user('alice')
def test_change_password_with_wrong_current_password(browser, url, alice):
data = dict(password='foo!', current='hurz?')
result = browser.put_json(url, data, status=400).json
assert [(e['name'], e['description']) for e in result['errors']] == [
('current', 'Password does not match')]
assert alice.validate_password('alice')
@mark.user('alice')
def test_change_password_without_current_password(browser, url, alice):
data = dict(password='foo!')
result = browser.put_json(url, data, status=400).json
assert [(e['name'], e['description']) for e in result['errors']] == [
('current', 'current is missing')]
assert alice.validate_password('alice')
@mark.user('alice')
def test_set_password_without_existing_password(browser, url, alice):
alice.password = None
data = dict(password='foo!', current=None)
browser.put_json(url, data)
assert alice.validate_password('foo!')
| bsd-2-clause | -1,270,315,287,765,068,800 | 31.961538 | 74 | 0.673279 | false |
mikedh/trimesh | trimesh/proximity.py | 1 | 19400 | """
proximity.py
---------------
Query mesh- point proximity.
"""
import numpy as np
from . import util
from .grouping import group_min
from .constants import tol, log_time
from .triangles import closest_point as closest_point_corresponding
from .triangles import points_to_barycentric
try:
from scipy.spatial import cKDTree
except BaseException as E:
from .exceptions import closure
cKDTree = closure(E)
def nearby_faces(mesh, points):
"""
For each point find nearby faces relatively quickly.
The closest point on the mesh to the queried point is guaranteed to be
on one of the faces listed.
Does this by finding the nearest vertex on the mesh to each point, and
then returns all the faces that intersect the axis aligned bounding box
centered at the queried point and extending to the nearest vertex.
Parameters
----------
mesh : trimesh.Trimesh
Mesh to query.
points : (n, 3) float
Points in space
Returns
-----------
candidates : (points,) int
Sequence of indexes for mesh.faces
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# an r-tree containing the axis aligned bounding box for every triangle
rtree = mesh.triangles_tree
# a kd-tree containing every vertex of the mesh
kdtree = cKDTree(mesh.vertices[mesh.referenced_vertices])
# query the distance to the nearest vertex to get AABB of a sphere
distance_vertex = kdtree.query(points)[0].reshape((-1, 1))
distance_vertex += tol.merge
# axis aligned bounds
bounds = np.column_stack((points - distance_vertex,
points + distance_vertex))
# faces that intersect axis aligned bounding box
candidates = [list(rtree.intersection(b)) for b in bounds]
return candidates
def closest_point_naive(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Does this by constructing a very large intermediate array and
comparing every point to every triangle.
Parameters
----------
mesh : Trimesh
Takes mesh to have same interfaces as `closest_point`
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distances between point and triangle
triangle_id : (m,) int
Index of triangle containing closest point
"""
# get triangles from mesh
triangles = mesh.triangles.view(np.ndarray)
# establish that input points are sane
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('triangles shape incorrect')
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)')
# create a giant tiled array of each point tiled len(triangles) times
points_tiled = np.tile(points, (1, len(triangles)))
on_triangle = np.array([closest_point_corresponding(
triangles, i.reshape((-1, 3))) for i in points_tiled])
# distance squared
distance_2 = [((i - q)**2).sum(axis=1)
for i, q in zip(on_triangle, points)]
triangle_id = np.array([i.argmin() for i in distance_2])
# closest cartesian point
closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)])
distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5
return closest, distance, triangle_id
def closest_point(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Parameters
----------
mesh : trimesh.Trimesh
Mesh to query
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distance to mesh.
triangle_id : (m,) int
Index of triangle containing closest point
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# do a tree- based query for faces near each point
candidates = nearby_faces(mesh, points)
# view triangles as an ndarray so we don't have to recompute
# the MD5 during all of the subsequent advanced indexing
triangles = mesh.triangles.view(np.ndarray)
# create the corresponding list of triangles
# and query points to send to the closest_point function
all_candidates = np.concatenate(candidates)
num_candidates = list(map(len, candidates))
tile_idxs = np.repeat(np.arange(len(points)), num_candidates)
query_point = points[tile_idxs, :]
query_tri = triangles[all_candidates]
# do the computation for closest point
query_close = closest_point_corresponding(query_tri, query_point)
query_group = np.cumsum(num_candidates)[:-1]
# vectors and distances for
# closest point to query point
query_vector = query_point - query_close
query_distance = util.diagonal_dot(query_vector, query_vector)
# get best two candidate indices by arg-sorting the per-query_distances
qds = np.array_split(query_distance, query_group)
idxs = np.int32([qd.argsort()[:2] if len(qd) > 1 else [0, 0] for qd in qds])
idxs[1:] += query_group.reshape(-1, 1)
# points, distances and triangle ids for best two candidates
two_points = query_close[idxs]
two_dists = query_distance[idxs]
two_candidates = all_candidates[idxs]
# the first candidate is the best result for unambiguous cases
result_close = query_close[idxs[:, 0]]
result_tid = two_candidates[:, 0]
result_distance = two_dists[:, 0]
# however: same closest point on two different faces
# find the best one and correct triangle ids if necessary
check_distance = two_dists.ptp(axis=1) < tol.merge
check_magnitude = np.all(np.abs(two_dists) > tol.merge, axis=1)
# mask results where corrections may be apply
c_mask = np.bitwise_and(check_distance, check_magnitude)
# get two face normals for the candidate points
normals = mesh.face_normals[two_candidates[c_mask]]
# compute normalized surface-point to query-point vectors
vectors = (query_vector[idxs[c_mask]] /
two_dists[c_mask].reshape(-1, 2, 1) ** 0.5)
# compare enclosed angle for both face normals
dots = (normals * vectors).sum(axis=2)
# take the idx with the most positive angle
# allows for selecting the correct candidate triangle id
c_idxs = dots.argmax(axis=1)
# correct triangle ids where necessary
# closest point and distance remain valid
result_tid[c_mask] = two_candidates[c_mask, c_idxs]
result_distance[c_mask] = two_dists[c_mask, c_idxs]
result_close[c_mask] = two_points[c_mask, c_idxs]
# we were comparing the distance squared so
# now take the square root in one vectorized operation
result_distance **= .5
return result_close, result_distance, result_tid
def signed_distance(mesh, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
mesh : trimesh.Trimesh
Mesh to query.
points : (n, 3) float
Points in space
Returns
----------
signed_distance : (n,) float
Signed distance from point to mesh
"""
# make sure we have a numpy array
points = np.asanyarray(points, dtype=np.float64)
# find the closest point on the mesh to the queried points
closest, distance, triangle_id = closest_point(mesh, points)
# we only care about nonzero distances
nonzero = distance > tol.merge
if not nonzero.any():
return distance
# For closest points that project directly in to the triangle, compute sign from
# triangle normal Project each point in to the closest triangle plane
nonzero = np.where(nonzero)[0]
normals = mesh.face_normals[triangle_id]
projection = (points[nonzero] -
(normals[nonzero].T * np.einsum(
"ij,ij->i",
points[nonzero] - closest[nonzero],
normals[nonzero])).T)
# Determine if the projection lies within the closest triangle
barycentric = points_to_barycentric(
mesh.triangles[triangle_id[nonzero]],
projection)
ontriangle = ~((
(barycentric < -tol.merge) | (barycentric > 1 + tol.merge)
).any(axis=1))
# Where projection does lie in the triangle, compare vector to projection to the
# triangle normal to compute sign
sign = np.sign(np.einsum(
"ij,ij->i",
normals[nonzero[ontriangle]],
points[nonzero[ontriangle]] - projection[ontriangle]))
distance[nonzero[ontriangle]] *= -1.0 * sign
# For all other triangles, resort to raycasting against the entire mesh
inside = mesh.ray.contains_points(points[nonzero[~ontriangle]])
sign = (inside.astype(int) * 2) - 1.0
# apply sign to previously computed distance
distance[nonzero[~ontriangle]] *= sign
return distance
class ProximityQuery(object):
"""
Proximity queries for the current mesh.
"""
def __init__(self, mesh):
self._mesh = mesh
@log_time
def on_surface(self, points):
"""
Given list of points, for each point find the closest point
on any triangle of the mesh.
Parameters
----------
points : (m,3) float, points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distance to surface
triangle_id : (m,) int
Index of closest triangle for each point.
"""
return closest_point(mesh=self._mesh,
points=points)
def vertex(self, points):
"""
Given a set of points, return the closest vertex index to each point
Parameters
----------
points : (n, 3) float
Points in space
Returns
----------
distance : (n,) float
Distance from source point to vertex.
vertex_id : (n,) int
Index of mesh.vertices for closest vertex.
"""
tree = self._mesh.kdtree
return tree.query(points)
def signed_distance(self, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
points : (n, 3) float
Points in space
Returns
----------
signed_distance : (n,) float
Signed distance from point to mesh.
"""
return signed_distance(self._mesh, points)
def longest_ray(mesh, points, directions):
"""
Find the lengths of the longest rays which do not intersect the mesh
cast from a list of points in the provided directions.
Parameters
-----------
points : (n, 3) float
Points in space.
directions : (n, 3) float
Directions of rays.
Returns
----------
signed_distance : (n,) float
Length of rays.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
directions = np.asanyarray(directions, dtype=np.float64)
if not util.is_shape(directions, (-1, 3)):
raise ValueError('directions must be (n,3)!')
if len(points) != len(directions):
raise ValueError('number of points must equal number of directions!')
faces, rays, locations = mesh.ray.intersects_id(points, directions,
return_locations=True,
multiple_hits=True)
if len(rays) > 0:
distances = np.linalg.norm(locations - points[rays],
axis=1)
else:
distances = np.array([])
# Reject intersections at distance less than tol.planar
rays = rays[distances > tol.planar]
distances = distances[distances > tol.planar]
# Add infinite length for those with no valid intersection
no_intersections = np.setdiff1d(np.arange(len(points)), rays)
rays = np.concatenate((rays, no_intersections))
distances = np.concatenate((distances,
np.repeat(np.inf,
len(no_intersections))))
return group_min(rays, distances)
def max_tangent_sphere(mesh,
points,
inwards=True,
normals=None,
threshold=1e-6,
max_iter=100):
"""
Find the center and radius of the sphere which is tangent to
the mesh at the given point and at least one more point with no
non-tangential intersections with the mesh.
Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016)
Shrinking sphere:
A parallel algorithm for computing the thickness of 3D objects,
Computer-Aided Design and Applications, 13:2, 199-207,
DOI: 10.1080/16864360.2015.1084186
Parameters
----------
points : (n, 3) float
Points in space.
inwards : bool
Whether to have the sphere inside or outside the mesh.
normals : (n, 3) float or None
Normals of the mesh at the given points
if is None computed automatically.
Returns
----------
centers : (n,3) float
Centers of spheres
radii : (n,) float
Radii of spheres
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
if normals is not None:
normals = np.asanyarray(normals, dtype=np.float64)
if not util.is_shape(normals, (-1, 3)):
raise ValueError('normals must be (n,3)!')
if len(points) != len(normals):
raise ValueError('number of points must equal number of normals!')
else:
normals = mesh.face_normals[closest_point(mesh, points)[2]]
if inwards:
normals = -normals
# Find initial tangent spheres
distances = longest_ray(mesh, points, normals)
radii = distances * 0.5
not_converged = np.ones(len(points), dtype=bool) # boolean mask
# If ray is infinite, find the vertex which is furthest from our point
# when projected onto the ray. I.e. find v which maximises
# (v-p).n = v.n - p.n.
# We use a loop rather a vectorised approach to reduce memory cost
# it also seems to run faster.
for i in np.where(np.isinf(distances))[0]:
projections = np.dot(mesh.vertices - points[i], normals[i])
# If no points lie outside the tangent plane, then the radius is infinite
# otherwise we have a point outside the tangent plane, take the one with maximal
# projection
if projections.max() < tol.planar:
radii[i] = np.inf
not_converged[i] = False
else:
vertex = mesh.vertices[projections.argmax()]
radii[i] = (np.dot(vertex - points[i], vertex - points[i]) /
(2 * np.dot(vertex - points[i], normals[i])))
# Compute centers
centers = points + normals * np.nan_to_num(radii.reshape(-1, 1))
centers[np.isinf(radii)] = [np.nan, np.nan, np.nan]
# Our iterative process terminates when the difference in sphere
# radius is less than threshold*D
D = np.linalg.norm(mesh.bounds[1] - mesh.bounds[0])
convergence_threshold = threshold * D
n_iter = 0
while not_converged.sum() > 0 and n_iter < max_iter:
n_iter += 1
n_points, n_dists, n_faces = mesh.nearest.on_surface(
centers[not_converged])
# If the distance to the nearest point is the same as the distance
# to the start point then we are done.
done = np.abs(
n_dists -
np.linalg.norm(
centers[not_converged] -
points[not_converged],
axis=1)) < tol.planar
not_converged[np.where(not_converged)[0][done]] = False
# Otherwise find the radius and center of the sphere tangent to the mesh
# at the point and the nearest point.
diff = n_points[~done] - points[not_converged]
old_radii = radii[not_converged].copy()
# np.einsum produces element wise dot product
radii[not_converged] = (np.einsum('ij, ij->i',
diff,
diff) /
(2 * np.einsum('ij, ij->i',
diff,
normals[not_converged])))
centers[not_converged] = points[not_converged] + \
normals[not_converged] * radii[not_converged].reshape(-1, 1)
# If change in radius is less than threshold we have converged
cvged = old_radii - radii[not_converged] < convergence_threshold
not_converged[np.where(not_converged)[0][cvged]] = False
return centers, radii
def thickness(mesh,
points,
exterior=False,
normals=None,
method='max_sphere'):
"""
Find the thickness of the mesh at the given points.
Parameters
----------
points : (n, 3) float
Points in space
exterior : bool
Whether to compute the exterior thickness
(a.k.a. reach)
normals : (n, 3) float
Normals of the mesh at the given points
If is None computed automatically.
method : string
One of 'max_sphere' or 'ray'
Returns
----------
thickness : (n,) float
Thickness at given points.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
if normals is not None:
normals = np.asanyarray(normals, dtype=np.float64)
if not util.is_shape(normals, (-1, 3)):
raise ValueError('normals must be (n,3)!')
if len(points) != len(normals):
raise ValueError('number of points must equal number of normals!')
else:
normals = mesh.face_normals[closest_point(mesh, points)[2]]
if method == 'max_sphere':
centers, radius = max_tangent_sphere(mesh=mesh,
points=points,
inwards=not exterior,
normals=normals)
thickness = radius * 2
return thickness
elif method == 'ray':
if exterior:
return longest_ray(mesh, points, normals)
else:
return longest_ray(mesh, points, -normals)
else:
raise ValueError('Invalid method, use "max_sphere" or "ray"')
| mit | 8,312,762,869,782,250,000 | 32.448276 | 88 | 0.609227 | false |
hcseob/py_spectre | py_spectre/psf.py | 1 | 50756 | # -*- coding: latin-1 -*-
"""
Copyright (c) 2008 Pycircuit Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Pycircuit nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import unittest
import struct, os, re
import operator
import numpy
# import psfasc
from copy import copy
from struct import unpack, pack
class PSFInvalid(Exception):
pass
def warning(str):
print "Warning: "+str
def indent(str, n=2):
return "\n".join([' '*n+s for s in str.split("\n")])
class PSFData(object):
@classmethod
def fromFile(cls, file):
obj = cls()
obj.deSerializeFile(file)
return obj
size=None
def __init__(self, value=None, extarg=None):
self.value = value
self.extarg = extarg
def setValue(self, value):
self.value = value
def __eq__(self, a):
return self.value == a
def __cmp__(self, a):
return cmp(self.value, a)
def __hash__(self):
return hash(self.value)
def deSerializeFile(self, file):
pass
def getSize(self):
self.size
def getValue(self):
return self.value
def __str__(self):
return str(self.value)
def toPSFasc(self, prec=None):
return str(self)
def __repr__(self):
return self.value.__repr__()
class PSFNumber(PSFData):
def __int__(self):
return self.value
def __add__(self, a):
return UInt32(self.value+int(a))
def __mul__(self, a):
return UInt32(self.value*int(a))
def __radd__(self, a):
return UInt32(self.value+int(a))
def __sub__(self, a):
return UInt32(self.value-int(a))
def __rsub__(self, a):
return UInt32(int(a)-self.value)
def __div__(self, a):
return UInt32(self.value/int(a))
def __rdiv__(self, a):
return UInt32(int(a)/self.value)
def __floordiv__(self, a):
return UInt32(self.value//int(a))
def __rfloordiv__(self, a):
return UInt32(int(a)//self.value)
def __mod__(self, a):
return UInt32(self.value%int(a))
class Int8(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
data=file.read(self.size)
self.value = unpack("b",data[3])[0]
class UInt8(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
data=file.read(self.size)
self.value = unpack("B",data[3])[0]
class Int32(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
self.value = unpack(">i",file.read(self.size))[0]
class UInt32(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
self.value = unpack(">I",file.read(self.size))[0]
class Int64(PSFNumber):
size=8
def __int__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">q",file.read(self.size))[0]
class UInt64(PSFNumber):
size=8
def __int__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">Q",file.read(self.size))[0]
class Float64(PSFNumber):
size=8
def __float__(self):
return float(self.value)
def toPSFasc(self, prec=6):
if prec:
fmt=('%%#%dg'%prec)
else:
fmt='%#g'
return fmt%self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">d",file.read(self.size))[0]
class Float32(PSFNumber):
size=4
def __float__(self):
return float(self.value)
def deSerializeFile(self, file, size=None):
self.value = unpack(">f",file.read(self.size))[0]
class ComplexFloat64(PSFNumber):
size=16
def toPSFasc(self, prec=6):
if prec:
fmt=('%%#%dg'%prec)
else:
fmt='%#g'
return "(" + fmt%self.value.real + " " + fmt%self.value.imag + ")"
def deSerializeFile(self, file, size=None):
re,im = unpack(">dd",file.read(self.size))
self.value = complex(re,im)
class String(PSFData):
def __str__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.len = unpack(">I",file.read(4))[0]
if self.len < 0x100:
self.value = file.read(self.len)
# Pad to 32-bit boundary
file.read((4-self.len)%4)
else:
raise Exception("String too long %d"%self.len)
def toPSFasc(self, prec=None):
return "\""+str(self.value)+"\""
class Struct(PSFData):
def __init__(self, structdef, value=None):
self.structdef = structdef
self.value = {}
if value:
self.setValue(value)
def __getitem__(self, key):
return self.value[key]
def getValue(self):
return dict([(k,v.getValue()) for k,v in self.value.items()])
def setValue(self, value):
assert(value != None and len(value) == len(self.structdef.children))
for element, val in zip(self.structdef.children, value):
valueobj = element.getDataObj()
valueobj.setValue(val)
self.value[element.name] = valueobj
def deSerializeFile(self, file):
for element in self.structdef.children:
value = element.getDataObj()
value.deSerializeFile(file)
self.value[element.name] = value
def toPSFasc(self, prec=None):
s="(\n"
for element in self.structdef.children:
s+=self.value[element.name].toPSFasc(prec)+"\n"
s+=")"
return s
def __repr__(self):
return "\n".join([indent(s) for s in map(repr,self.value.items())]) + "\n"
class Array(PSFData):
def setValue(self, value):
dataclass, length = self.extarg
if value != None:
self.children = [dataclass(value=val) for val in value]
else:
self.children = [dataclass(value=None) for val in range(length)]
def getValue(self):
return [v.getValue() for v in self.children]
def __iter__(self):
return self.children.__iter__()
def __tuple__(self):
return tuple(self.children)
def __repr__(self):
return "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class Chunk:
"""Base class for chunk"""
def __init__(self, psf=None, type=None):
self.psf = psf
self.fileoffset=None
if not hasattr(self.__class__, 'type'):
self.type = type
self.verbose = False
self.name = ""
def deSerializeFile(self, file):
self.fileoffset = file.tell()
type = UInt32.fromFile(file)
if (self.type != None) and self.type != type:
file.seek(-UInt32.size, 1)
raise IncorrectChunk(type, self.type)
def __repr__(self):
return self.__class__.__name__
class NextSectionType(Chunk):
type=1
class NextSectionSweep(Chunk):
type=2
class NextSectionTrace(Chunk):
type=3
class NextSectionValues(Chunk):
type=4
class EndOfStructDef(Chunk):
type=18
NextSectionClasses = [NextSectionType, NextSectionSweep, NextSectionTrace, NextSectionValues]
class Property(Chunk):
type=None
valueclass=None
def __init__(self, name=None, value=None):
Chunk.__init__(self)
self.name = String(name)
self.value = self.valueclass(value)
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.name = String.fromFile(file)
self.value = self.valueclass.fromFile(file)
def toPSFasc(self, prec=9):
return self.name.toPSFasc() + " " + self.value.toPSFasc(prec=prec)
def __repr__(self):
return self.__class__.__name__+"("+str(self.name)+","+str(self.value)+")"
class PropertyString(Property):
type=33
valueclass=String
class PropertyUInt(Property):
type=34
valueclass=UInt32
class PropertyFloat64(Property):
type=35
valueclass=Float64
PropertyClasses = [PropertyString, PropertyUInt, PropertyFloat64]
TYPEFLOATDOUBLE = 11
TYPEINTBYTE = 1
TYPECOMPLEXDOUBLE = 12
TYPESTRUCT = 16
TYPESTRING = 2 ## Incorrect number
TYPEARRAY = 3 ## Incorrect number
TYPEINTLONG = 5
class DataTypeDef(Chunk):
"""Class representing data type of waveform data"""
type=16
ClassDict = {
TYPEFLOATDOUBLE: Float64,
TYPEINTBYTE: Int8,
TYPECOMPLEXDOUBLE: ComplexFloat64,
TYPESTRING: String,
TYPEARRAY: Array,
TYPEINTLONG: Int32
}
PSFASCDict = {
TYPEFLOATDOUBLE: "FLOAT DOUBLE",
TYPEINTBYTE: "INT BYTE",
TYPECOMPLEXDOUBLE: "COMPLEX DOUBLE",
TYPESTRING: "STRING *",
TYPEINTLONG: "INT LONG"
}
def __init__(self, psf, id=0, name=None, datatypeid=0, structdef=None):
Chunk.__init__(self, psf, type)
self.id = id
self.name = name
self.datatypeid = datatypeid
self.structdef = structdef
self.properties = []
def getDataObj(self):
"""Get a data object described by the DataType"""
if self.datatypeid == TYPESTRUCT:
return self.structdef.getDataObj()
elif self.datatypeid == TYPEARRAY:
return Array(extarg=(self.ClassDict[self.structdef[0]], self.structdef[1]))
else:
return self.ClassDict[self.datatypeid](extarg=self.structdef)
def toPSFasc(self, prec=None):
r=self.name.toPSFasc(prec) + " "
if self.datatypeid == TYPESTRUCT:
r+=self.structdef.toPSFasc(prec)
elif self.datatypeid == TYPEARRAY:
r+="ARRAY ( %s ) "%str(self.structdef[1])+self.PSFASCDict[self.structdef[0]]
else:
r+= self.PSFASCDict[self.datatypeid]
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def getDataSize(self):
if self.datatypeid == TYPESTRUCT:
return self.structdef.getDataSize()
else:
return self.ClassDict[self.datatypeid].size
def deSerializeFile(self, file):
start = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
arraytype = UInt32.fromFile(file)
self.datatypeid = UInt32.fromFile(file)
if arraytype != 0:
self.datatypeid, self.structdef = TYPEARRAY, (UInt32.fromFile(file), self.datatypeid)
if self.datatypeid == 16:
self.structdef = StructDef.fromFile(file, self.psf)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid,
"properties":self.properties})+")"
class DataTypeRef(Chunk):
type=16
"""Class representing link to data type"""
def __init__(self, psf, type=None):
Chunk.__init__(self, psf, type)
self.id = None
self.name = None
self.datatypeid = 0
self.properties = []
def getDataObj(self):
"""Get a data object described by the DataType"""
return self.psf.types.idMap[self.datatypeid].getDataObj()
def toPSFasc(self, prec=None):
r=self.name.toPSFasc(prec) + " "
r+=self.psf.types.idMap[self.datatypeid].name.toPSFasc()
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def getDataSize(self):
return self.psf.types.idMap[self.datatypeid].getDataSize()
def deSerializeFile(self, file):
start = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.datatypeid = UInt32.fromFile(file)
assert(self.datatypeid != 0)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid,
"properties":self.properties})+")"
class StructDef(PSFData):
"""Class representing struct definition"""
@classmethod
def fromFile(cls, file, psf):
obj = cls()
obj.deSerializeFile(file, psf)
return obj
def __init__(self):
self.children = []
def getDataObj(self):
return Struct(self)
def getDataSize(self):
return sum([child.getDataSize() for child in self.children])
def toPSFasc(self, prec=None):
s="STRUCT(\n"
for child in self.children:
s+=child.toPSFasc(prec)+"\n"
s+=")"
return s
def deSerializeFile(self, file, psf):
while True:
chunk = readChunk(psf, file, expectedclasses=[DataTypeDef, EndOfStructDef])
if isinstance(chunk, EndOfStructDef):
break
else:
self.children.append(chunk)
def __repr__(self):
return self.__class__.__name__ + "(\n"+\
"\n".join(map(str,self.children))+\
")\n"
class SimpleContainer(Chunk):
type = 21
def __init__(self, psf, type=None, childrenclslist=None, childrenclsignore=None):
Chunk.__init__(self, psf, type)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.childrenclsignore = childrenclsignore
self.endpos = None
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
self.children = []
while file.tell() < self.endpos:
chunk = readChunk(self.psf, file, expectedclasses=self.childrenclslist+self.childrenclsignore)
if chunk.__class__ in self.childrenclslist:
self.children.append(chunk)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
s=""
if self.fileoffset:
s+= "0x%x"%self.fileoffset+ ":"
s+= self.__class__.__name__ + "(" + str(self.type) +")"
if self.endpos and self.fileoffset:
s+= "size="+str(self.endpos-self.fileoffset)
s+= "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
return s
class Container22(Chunk):
type=22
def __init__(self, psf, type=None, n=None, childrenclslist=None):
Chunk.__init__(self, psf, 22)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.endpos = None
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value # Save end position of Container
self.children = []
while file.tell() < self.endpos:
chunk = readChunk(self.psf, file,
expectedclasses=self.childrenclslist)
self.children.append(chunk)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
return "0x%x"%self.fileoffset +":" + self.__class__.__name__ +\
"(" + str(self.type) +")" + "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class ZeroPad(Chunk):
type = 20
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
size = UInt32.fromFile(file).value
self.endpos = file.tell() + size
file.seek(self.endpos)
class HashTable(Chunk):
type = 19
"""Class representing offset of trace data"""
def __init__(self, psf, n=None):
Chunk.__init__(self, psf, type)
self.children = []
self.extra=[]
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
startpos = file.tell()
size = UInt32.fromFile(file)
for i in range(0, size/8):
id = UInt32.fromFile(file)
offset = UInt32.fromFile(file)
self.children.append((id, offset))
def __repr__(self):
return self.__class__.__name__+"\n"+ "\n".join([" 0x%x: 0x%x"%(k,v.value) for k,v in self.children])+")"
class HashTableTrace(Chunk):
type = 19
"""Class representing offset of trace data"""
def __init__(self, psf):
Chunk.__init__(self, psf, type)
self.children = []
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.size = UInt32.fromFile(file)
for i in range(0, self.size.value/16):
id = UInt32.fromFile(file)
offset = UInt32.fromFile(file)
data1 = UInt32.fromFile(file).value
data2 = UInt32.fromFile(file).value
self.children.append((id,offset,data1,data2))
def __repr__(self):
return self.__class__.__name__+"\n"+ "\n".join([" %s: 0x%x 0x%x 0x%x"%(pack(">I",k.value),v.value,d1,d2) for k,v,d1,d2 in self.children])+")"
class HashContainer(Chunk):
type=21
hashclass = HashTable
def __init__(self, psf, childrenclslist=None, childrenclsignore=None):
Chunk.__init__(self, psf, type)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.childrenclsignore = childrenclsignore
self.endpos = None
self.hashtable = None
def __len__(self):
return len(self.children)
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
self.children = []
self.data = Container22(self.psf,
childrenclslist=self.childrenclslist)
self.data.deSerializeFile(file)
self.hashtable = self.hashclass(self.psf)
self.hashtable.deSerializeFile(file)
# Copy children reference from data
self.children = self.data.children
self.section = UInt32.fromFile(file)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
s=""
if self.fileoffset:
s += "0x%x"%self.fileoffset +":"
s += self.__class__.__name__ + "(" + str(self.type) +")"
if self.endpos:
s+=" size="+str(self.endpos-self.fileoffset) + "\n"
s += "\n".join([indent(s) for s in map(str,(self.children, self.hashtable))]) + "\n"
return s
class HeaderSection(SimpleContainer):
type=21
def __init__(self, psf, n=None):
SimpleContainer.__init__(self,psf, childrenclslist=PropertyClasses,
childrenclsignore=NextSectionClasses)
self.properties = {}
def addProperty(self, prop):
"""Add property to header"""
self.children.append(prop)
self.properties[prop.name] = prop.value
def deSerializeFile(self, file):
SimpleContainer.deSerializeFile(self, file)
# Read header properties
self.properties = {}
for prop in self.children:
self.properties[prop.name] = prop.value
def toPSFasc(self, prec=None):
r="HEADER\n"
r+='"PSFversion" "1.00"\n'
r+="\n".join([child.toPSFasc(prec) for child in self.children \
if not child.name.value[0:3].upper() == 'PSF'])
return r
class SweepSection(SimpleContainer):
type=21
def __init__(self, psf):
SimpleContainer.__init__(self, psf, childrenclslist=[DataTypeRef],
childrenclsignore=NextSectionClasses)
def deSerializeFile(self, file):
SimpleContainer.deSerializeFile(self, file)
# Read header properties
self.idMap = {}
for chunk in self.children:
self.idMap[chunk.id] = chunk
def getSweep(self, id):
return self.idMap[id]
def getNames(self):
return tuple([str(child.name) for child in self.children])
def toPSFasc(self, prec=None):
r="SWEEP\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class TypeSection(HashContainer):
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[DataTypeDef],
childrenclsignore=NextSectionClasses)
self.idMap = {}
self.nameMap = {}
def addType(self, type):
type.id = self.psf.allocId()
self.children.append(type)
self.idMap[type.id] = type
self.nameMap[type.name] = type
def getType(self, id):
return self.idMap[id]
def getTypeByName(self, name):
return self.nameMap[name]
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
# Read header properties
self.idMap = {}
for chunk in self.children:
self.idMap[chunk.id] = chunk
self.nameMap[chunk.name] = type
def toPSFasc(self, prec=None):
r="TYPE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class TraceSection(HashContainer):
hashclass = HashTableTrace
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[GroupDef, DataTypeRef])
self.idMap = {}
self.nameIndex = {}
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
self.idMap = {}
for index, chunk in enumerate(self.children):
self.idMap[chunk.id] = chunk
if isinstance(chunk, GroupDef):
self.nameIndex.update(dict([(par, (index,)+value) for par,value in chunk.getNameIndex().items()]))
else:
self.nameIndex[chunk.name] = (index,)
def getNameIndex(self):
return self.nameIndex
def toPSFasc(self, prec=None):
r="TRACE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
def getTraceNames(self):
result = []
for trace in self.children:
if isinstance(trace,GroupDef):
result += trace.getNames()
else:
result.append(trace.name)
return tuple(map(str, result))
def getTraceIndexByName(self, name):
"""Returns an index to the given trace name
The index is hierarchical so if if the traces are divided into 2 groups the index (0,1) means
child 1 of group 0
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.traces.getTraceIndexByName("VIN")
(0, 1)
>>> psf=PSFReader('./test/resultdirs/parsweep2/C=1e-12,R=1e-12/psf/ac.ac')
>>> psf.open()
>>> psf.traces.getTraceIndexByName("net3")
(0,)
"""
return self.nameIndex[name]
class ValuesSectionNonSweep(HashContainer):
type=21
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[NonSweepValue])
self.idMap={}
self.nameMap={}
def addValue(self, value):
value.id = self.psf.allocId()
if not isinstance(value, NonSweepValue):
raise ValueError("Value should be a NonSweepValue")
self.idMap[value.id] = value
self.nameMap[value.name] = value
self.children.append(value)
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
for child in self.children:
self.nameMap[child.name] = child
def getValuePropertiesByName(self, name):
return dict([(prop.name, prop.value) for prop in self.nameMap[name].properties])
def getValueByName(self, name):
return self.nameMap[name].getValue()
def getValueNames(self):
return tuple([child.name for child in self.children])
def toPSFasc(self, prec=None):
r="VALUE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class ValuesSectionSweep(SimpleContainer):
type=21
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
windowedsweep = self.psf.header.properties.has_key('PSF window size')
if windowedsweep:
el = ZeroPad(self.psf)
el.deSerializeFile(file)
isweep=0
while isweep < self.psf.header.properties['PSF sweep points']:
if windowedsweep:
value = SweepValueWindowed(self.psf)
else:
value = SweepValueSimple(self.psf)
isweep += value.deSerializeFile(file, n=self.psf.header.properties['PSF sweep points']-isweep)
self.children.append(value)
self.section = UInt32.fromFile(file)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def getSweepParamValues(self):
return reduce(operator.__add__, [child.getSweepParamValues() for child in self.children])
def getValueNames(self):
return self.psf.traces.getTraceNames()
def __len__(self):
return len(self.psf.traces)
def getValueByName(self, name):
windowedsweep = self.psf.header.properties.has_key('PSF window size')
index = self.psf.traces.getTraceIndexByName(name)
result = []
for child in self.children:
obj=child
for i in index:
obj = obj.children[i]
# If windowed sweep, each child will be a list of values in the window
if windowedsweep:
result += [v.getValue() for v in obj]
else:
result.append(obj.getValue())
return numpy.array(result)
def toPSFasc(self, prec=None):
r="VALUE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class NonSweepValue(Chunk):
type=16
def __init__(self, psf, id=None, typeid=None, name=None, value=None):
Chunk.__init__(self, psf, type)
self.id = id
self.name = name
self.typeid = typeid
if typeid:
self.valuetype = self.psf.types.idMap[self.typeid]
else:
self.valuetype = None
if value:
self.value = value
elif self.valuetype:
self.value = self.valuetype.getDataObj()
else:
self.value = None
self.properties = []
def getValue(self):
return self.value.getValue()
def setValue(self, value):
self.value.setValue(value)
def deSerializeFile(self, file):
startpos = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.typeid = UInt32.fromFile(file)
assert(self.typeid != 0)
self.valuetype = self.psf.types.idMap[self.typeid]
self.value = self.valuetype.getDataObj()
self.value.deSerializeFile(file)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def toPSFasc(self, prec=None):
r = self.name.toPSFasc(prec) + " " + self.valuetype.name.toPSFasc(prec) + " " + self.value.toPSFasc(prec)
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name, "id":"0x%x"%self.id, "typeid":"0x%x"%self.typeid,
"properties":self.properties,"value":self.value})+")"
class SweepValue(Chunk):
"""Class representing waveform data"""
type = 16
def __init__(self, psf, type=None):
Chunk.__init__(self, psf, type)
self.id = None
self.linktypeid = UInt32()
self.datatypeid = UInt32()
self.paramtype = None
self.paramvalue = None
self.children = []
self.properties = []
def deSerializeFile(self, file, n=None):
pass
def getSweepParamValues(self):
pass
def __len__(self):
return len(self.children)
def __repr__(self):
return self.__class__.__name__ + "(" + str(self.paramtype.name) + "=" + str(self.paramvalue) +","+ \
"children="+str(self.children) +")\n"
class SweepValueSimple(SweepValue):
def deSerializeFile(self, file, n=None):
Chunk.deSerializeFile(self, file)
self.paramtypeid = UInt32.fromFile(file)
self.paramtype = self.psf.sweeps.getSweep(self.paramtypeid)
self.paramvalue = self.paramtype.getDataObj()
self.paramvalue.deSerializeFile(file)
for datatype in self.psf.traces.children:
datatypeid = UInt32.fromFile(file)
if datatypeid in (17,16):
valuetypeid = UInt32.fromFile(file)
if valuetypeid != datatype.id:
## Unexpected value type id found
## This is probably because of missing trace values
## Undo read of datatypeid, valuetypeid and break out of loop and
file.seek(-2*UInt32.size, 1)
break
value = datatype.getDataObj()
value.deSerializeFile(file)
self.children.append(value)
elif datatypeid == 15:
## End of section
file.seek(-UInt32.size, 1)
break
else:
raise Exception("Datatypeid unknown 0x%x" % datatypeid)
return 1
def getSweepParamValues(self):
return [self.paramvalue.getValue()]
def toPSFasc(self, prec=None):
r=self.paramtype.name.toPSFasc(prec) + " " +self.paramvalue.toPSFasc(prec)+"\n"
r+="\n".join([valuetype.name.toPSFasc(prec) + " " + value.toPSFasc(prec) \
for valuetype, value in zip(self.psf.traces.children, self.children)])
return r
class SweepValueWindowed(SweepValue):
def deSerializeFile(self, file, n=None):
bufferstart = file.tell()
Chunk.deSerializeFile(self, file)
self.paramtypeid = UInt32.fromFile(file)
assert(len(self.psf.sweeps.children) == 1)
self.paramtype=self.psf.sweeps.children[0]
self.paramvalue = []
# Get sweep parameter values
paramvaluesize = self.paramtype.getDataSize()
windowsize = self.psf.header.properties['PSF window size'].value
leftinwindow = (file.tell()//windowsize + 1)*windowsize - file.tell()
windowlen = leftinwindow//paramvaluesize;
if n > windowlen:
n = windowlen
for j in xrange(n):
paramvalue = self.paramtype.getDataObj()
paramvalue.deSerializeFile(file)
if j < n:
self.paramvalue.append(paramvalue)
# Get trace values
for trace in self.psf.traces.children:
value = trace.getDataObj()
value.deSerializeFile(file, count=n,
windowsize=self.psf.header.properties['PSF window size'].value)
self.children.append(value)
# Skip trailing padding bytes
padsize = int((self.psf.header.properties['PSF buffer size'] - (file.tell()-bufferstart))% \
self.psf.header.properties['PSF buffer size'])
file.seek(padsize, 1)
return n
def getSweepParamValues(self):
return [v.getValue() for v in self.paramvalue]
def toPSFasc(self, prec=None):
r=''
for i, paramvalue in enumerate(self.paramvalue):
r+=self.paramtype.name.toPSFasc(prec) + " " + paramvalue.toPSFasc(prec) + "\n"
r+="\n".join([trace.name.toPSFasc(prec) + " " + value.toPSFasc(prec=prec, index=i) \
for trace,value in zip(self.psf.traces.children, self.children)])
if i < len(self.paramvalue)-1:
r+="\n"
return r
class GroupData(PSFData):
def __init__(self, groupdef):
PSFData.__init__(self)
self.groupdef = groupdef
self.children = []
def deSerializeFile(self, file, count=None, windowsize=None):
for element in self.groupdef.children:
if count==None:
value = element.getDataObj()
value.deSerializeFile(file)
self.children.append(value)
else:
valuearray=[]
# If a window is used in the PSF file, the entire window is stored
# and the data is aligned to the end of the window. So we need
# to skip window size - data size
file.seek(int(windowsize - count*element.getDataSize()), 1)
for i in xrange(0,count):
value = element.getDataObj()
value.deSerializeFile(file)
valuearray.append(value)
self.children.append(valuearray)
def toPSFasc(self, prec=None, index=None):
if index != None:
return "\n".join([v[index].toPSFasc(prec) for v in self.children])
else:
return "\n".join([v.toPSFasc(prec) for v in self.children])
def getSize(self):
return self.groupdef.getDataSize()
def __repr__(self):
return "GroupData" + "\n" + "\n".join([indent(s) for s in map(repr,self.children)]) + "\n"
class GroupDef(Chunk):
type=17
"""Class representing group of traces"""
def __init__(self, psf):
Chunk.__init__(self, psf)
self.children=[]
self.datasize=None
def getDataObj(self):
return GroupData(self)
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.nchildren = UInt32.fromFile(file)
# Read children
self.children = []
self.datasize = 0
for i in range(0, self.nchildren):
child = DataTypeRef(self.psf)
child.deSerializeFile(file)
self.children.append(child)
self.datasize += child.getDataSize()
def getNameIndex(self):
return dict([(v.name, (i,)) for i,v in enumerate(self.children)])
def toPSFasc(self, prec=None):
s=self.name.toPSFasc(prec) + " GROUP %d\n"%len(self.children)
s+="\n".join([child.toPSFasc(prec) for child in self.children])
return s
def getDataSize(self):
return self.datasize
def getNames(self):
return [str(child.name) for child in self.children]
def __repr__(self):
return "0x%x"%self.fileoffset +":" + self.__class__.__name__+ "(id=0x%x"%self.id+", nchildren=%d"%self.nchildren+")\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class UnknownChunk(Exception):
def __init__(self, chunktype):
self.type = chunktype
def __str__(self):
return "Unknown chunk of type: %d"%self.type
class InvalidChunk(Exception):
def __init__(self, chunk):
self.chunk = chunk
def __str__(self):
return "Invalid %s"%(self.chunk.__class__.__name__)
class IncorrectChunk(Exception):
def __init__(self, type, expectedtype):
self.type = type
self.expectedtype = expectedtype
def __str__(self):
return "Incorrect chunk type %d (should be %d)"%(self.type, self.expectedtype)
class LastValue(Exception):
pass
def readChunk(psf, file, expectedclasses=None):
type = UInt32.fromFile(file)
file.seek(-4, 1) # Rewind one word since the type will be read again by the deSerializeFile function
if expectedclasses:
if not type in [cls.type for cls in expectedclasses]:
raise ValueError("Unexpected type %d, not in "%type + str([cls.type for cls in expectedclasses]))
for cls in expectedclasses:
if type == cls.type:
chunk = cls(psf)
else:
raise Exception("Use expectedclasses!")
if type == 21:
chunk = Section(psf)
elif type == 20:
chunk = ZeroPad(psf)
elif type == 22:
chunk = Container22(psf, type, n=n)
elif type == 33:
chunk = PropertyString(psf)
elif type == 34:
chunk = PropertyUInt(psf)
elif type == 35:
chunk = PropertyFloat64(psf)
elif type == 16:
chunk = DataTypeDef(psf,type)
elif type == 17:
chunk = GroupDef(psf)
elif type == 19:
chunk = HashTable(psf, n=n)
elif type in (1,2,3,4):
file.seek(4,1)
return None
else:
warning("Unknown chunk %d"%type)
raise UnknownChunk(type)
chunk.deSerializeFile(file)
return chunk
class PSFReader(object):
def __init__(self, filename=None, asc=None):
self.header = None
self.types = TypeSection(self)
self.sweeps = None
self.traces = None
self.lastid = 0x1000
self.verbose = False
self.filename = filename
self.file = None
self.values = None
self.asc = asc
def open(self):
"""Open a PSF file and read its headers.
Example:
Trying to open a valid psf file
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
"""
if self.asc == None:
self.asc = False
if not self.asc:
self.file = open(self.filename, "rb")
if self.validate():
self.deSerializeFile(self.file)
else:
raise PSFInvalid("Invalid PSF file")
else:
newpsfobj = psfasc.parse("psfasc", open(self.filename).read())
self.header = newpsfobj.header
self.types = newpsfobj.types
self.sweeps = newpsfobj.sweeps
self.traces = newpsfobj.traces
self.values = newpsfobj.values
self.lastid = newpsfobj.lastid
self.verbose = newpsfobj.verbose
def validate(self):
"""Check if the PSF file is valid.
Returns True if valid, False otherwise
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.validate()
True
>>> psf=PSFReader('./test/psfasc/srcSweep.asc')
>>> psf.validate()
False
"""
if self.file == None:
file = open(self.filename, "rb")
else:
file = self.file
# Read Clarissa signature
file.seek(-4-8,2)
clarissa = file.read(8)
return clarissa == "Clarissa"
def getNSweepPoints(self):
"""Returns number of sweeps. 0 if not swept.
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getNSweepPoints()
4
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF sweep points']
def getNSweeps(self):
"""Returns the number of nested sweeps
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getNSweeps()
1
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF sweeps']
def __len__(self):
return len(self.values)
def getValueNames(self):
"""Returns a tuple of the names of the traces
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.getValueNames()
>>> psf.open()
>>> psf.getValueNames()
('VOUT', 'VIN', 'R0')
>>> psf=PSFReader('./test/resultdirs/simple/opBegin')
>>> psf.open()
>>> psf.getValueNames()
('R0', 'V1', 'V0', 'E0', 'VIN', 'NET9', 'VOUT')
"""
if self.values:
return self.values.getValueNames()
def getSweepParamNames(self):
return self.sweeps.getNames()
def getSweepParamValues(self, dim=0):
"""Returns a numpy.array of sweep parameter values for sweep dimension dim.
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getSweepParamValues(0)
array([ 1., 2., 3., 4.])
windowed result
>>> psf=PSFReader('./test/psf/timeSweep')
>>> psf.open()
>>> psf.getSweepParamValues(0)[:3]
array([ 0.00000000e+00, 2.00000000e-11, 5.33333333e-11])
"""
return numpy.array(self.values.getSweepParamValues())
def getValuePropertiesByName(self, name):
"""Returns the properties associated with value
>>> psf=PSFReader('./test/psf/opBegin')
>>> psf.open()
>>> psf.getValuePropertiesByName("XIRXRFMIXTRIM0.XM1PDAC1.XMN.MAIN")["Region"]
'subthreshold'
"""
return self.values.getValuePropertiesByName(name)
def getValuesByName(self, name):
"""Returns a numpy.array of trace values for swept results and a scalar for non swept.
Example:
swept psf file
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getValuesByName("VOUT")
array([-6., -4., -2., 0.])
>>> psf.getValuesByName("VIN")
array([ 1., 2., 3., 4.])
swept psf with complex numbers
>>> psf=PSFReader('./test/psf/frequencySweep')
>>> psf.open()
>>> res = psf.getValuesByName("ANT_CM")
>>> len(res)
123
>>> res[:3]
array([ 0.6+0.j, 0. +0.j, 0. +0.j])
swept windowed psf file
>>> psf=PSFReader('./test/psf/timeSweep')
>>> psf.open()
>>> psf.getValuesByName("INP")[0:3]
array([ 0.6 , 0.62486899, 0.66211478])
non-swept psf file
>>> psf=PSFReader('./test/psf/dcOpInfo.info')
>>> psf.open()
>>> psf.getValuesByName("IREG21U_0.MP5.b1")['betadc']
4.7957014499434756
swept psf file withouth groups
>>> psf=PSFReader('./test/resultdirs/parsweep/C=1e-12,R=1e-12/psf/ac.ac')
>>> psf.open()
>>> psf.getValuesByName("net3")
array([ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j])
"""
return self.values.getValueByName(name)
def nTraces(self):
"""Returns number of traces
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.nTraces()
3
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF traces']
def allocId(self):
self.lastid+=1
return self.lastid-1
def info(self):
s="Number of sweeps: %d\n"%self.getNSweeps()
if self.getNSweeps() > 0:
s+="Number of sweep points: %d\n"%self.getNSweepPoints()
s+="Number of traces: %d"%self.nTraces()
return s
def updateHeader(self):
if self.sweeps:
sweeps = len(self.sweeps.children)
else:
sweeps=0
self.header.addProperty(PropertyUInt("PSF sweeps", sweeps))
def deSerializeFile(self, file):
# Find filesize
file.seek(0,2)
filesize = file.tell()
# Last word contains the size of the data
file.seek(-4,2)
datasize = UInt32.fromFile(file).value
if self.verbose:
print "Total data size: ",datasize
# Read Clarissa signature
file.seek(-4-8,2)
clarissa = file.read(8)
if not clarissa == "Clarissa":
raise ValueError("Clarissa signature not found")
# Read section index table
sectionoffsets = {}
file.seek(-4-8-8,2)
pos = file.tell()
sectionnums = []
while file.tell() >= datasize:
sectionnum = UInt32.fromFile(file)
sectionnums.insert(0,sectionnum.value)
offset = UInt32.fromFile(file)
sectionoffsets[sectionnum] = offset
pos -= 8
file.seek(pos)
offsets = [sectionoffsets[secnum] for secnum in sectionnums]
sizes = map(operator.sub, offsets[1:]+[datasize], offsets)
sectionsizes = dict(zip(sectionnums, sizes))
if self.verbose:
print sectionoffsets, sectionsizes
file.seek(0)
self.unk1 = UInt32.fromFile(file)
if self.verbose:
print "First word: 0x%x"%self.unk1
# Load headers
file.seek(int(sectionoffsets[0]))
self.header = HeaderSection(self)
self.header.deSerializeFile(file)
if self.verbose:
print "HEADER"
print self.header
if sectionoffsets.has_key(1):
file.seek(int(sectionoffsets[1]))
self.types.deSerializeFile(file)
if self.verbose:
print "TYPE"
print self.types
if sectionoffsets.has_key(2):
file.seek(int(sectionoffsets[2]))
self.sweeps = SweepSection(self)
self.sweeps.deSerializeFile(file)
if self.verbose:
print "SWEEPS"
print self.sweeps
if sectionoffsets.has_key(3):
file.seek(int(sectionoffsets[3]))
self.traces = TraceSection(self)
self.traces.deSerializeFile(file)
if sectionoffsets.has_key(4):
file.seek(int(sectionoffsets[4]))
# Load data
if self.sweeps:
self.values = ValuesSectionSweep(self)
else:
self.values = ValuesSectionNonSweep(self)
self.values.deSerializeFile(file)
def printme(self):
print "HEADER"
print self.header
print "TYPES"
print self.types
if self.sweeps:
print "SWEEP"
print self.sweeps
if self.traces:
print "TRACE"
print self.traces
print "VALUES"
print self.values
def toPSFasc(self, prec=None):
"""Export to PSF ascii"""
sections = [self.header.toPSFasc(prec), self.types.toPSFasc(prec)]
if self.sweeps:
sections.append(self.sweeps.toPSFasc(prec))
if self.traces:
sections.append(self.traces.toPSFasc(prec))
if self.values:
sections.append(self.values.toPSFasc(prec))
r="\n".join(sections) + "\n"
r+="END\n"
return r
def __repr__(self):
return "\n".join(map(str, (self.header, self.types, self.sweeps, self.traces, self.values)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | 7,594,775,208,493,014,000 | 31.022713 | 190 | 0.569805 | false |
TresysTechnology/setools | tests/nodeconquery.py | 1 | 10617 | # Copyright 2014, Tresys Technology, LLC
# Copyright 2017, Chris PeBenito <[email protected]>
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SETools. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import unittest
from socket import AF_INET6
from ipaddress import IPv4Network, IPv6Network
from setools import SELinuxPolicy, NodeconQuery
class NodeconQueryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/nodeconquery.conf")
def test_000_unset(self):
"""Nodecon query with no criteria"""
# query with no parameters gets all nodecons.
nodecons = sorted(self.p.nodecons())
q = NodeconQuery(self.p)
q_nodecons = sorted(q.results())
self.assertListEqual(nodecons, q_nodecons)
def test_001_ip_version(self):
"""Nodecon query with IP version match."""
q = NodeconQuery(self.p, ip_version=AF_INET6)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1100::/16"), IPv6Network("1110::/16")], nodecons)
def test_020_user_exact(self):
"""Nodecon query with context user exact match"""
q = NodeconQuery(self.p, user="user20", user_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.20.1/32")], nodecons)
def test_021_user_regex(self):
"""Nodecon query with context user regex match"""
q = NodeconQuery(self.p, user="user21(a|b)", user_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.21.1/32"), IPv4Network("10.1.21.2/32")], nodecons)
def test_030_role_exact(self):
"""Nodecon query with context role exact match"""
q = NodeconQuery(self.p, role="role30_r", role_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.30.1/32")], nodecons)
def test_031_role_regex(self):
"""Nodecon query with context role regex match"""
q = NodeconQuery(self.p, role="role31(a|c)_r", role_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.31.1/32"), IPv4Network("10.1.31.3/32")], nodecons)
def test_040_type_exact(self):
"""Nodecon query with context type exact match"""
q = NodeconQuery(self.p, type_="type40", type_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.40.1/32")], nodecons)
def test_041_type_regex(self):
"""Nodecon query with context type regex match"""
q = NodeconQuery(self.p, type_="type41(b|c)", type_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.41.2/32"), IPv4Network("10.1.41.3/32")], nodecons)
def test_050_range_exact(self):
"""Nodecon query with context range exact match"""
q = NodeconQuery(self.p, range_="s0:c1 - s0:c0.c4")
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.50.1/32")], nodecons)
def test_051_range_overlap1(self):
"""Nodecon query with context range overlap match (equal)"""
q = NodeconQuery(self.p, range_="s1:c1 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap2(self):
"""Nodecon query with context range overlap match (subset)"""
q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap3(self):
"""Nodecon query with context range overlap match (superset)"""
q = NodeconQuery(self.p, range_="s1 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap4(self):
"""Nodecon query with context range overlap match (overlap low level)"""
q = NodeconQuery(self.p, range_="s1 - s1:c1,c2", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap5(self):
"""Nodecon query with context range overlap match (overlap high level)"""
q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_052_range_subset1(self):
"""Nodecon query with context range subset match"""
q = NodeconQuery(self.p, range_="s2:c1,c2 - s2:c0.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons)
def test_052_range_subset2(self):
"""Nodecon query with context range subset match (equal)"""
q = NodeconQuery(self.p, range_="s2:c1 - s2:c1.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons)
def test_053_range_superset1(self):
"""Nodecon query with context range superset match"""
q = NodeconQuery(self.p, range_="s3 - s3:c0.c4", range_superset=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons)
def test_053_range_superset2(self):
"""Nodecon query with context range superset match (equal)"""
q = NodeconQuery(self.p, range_="s3:c1 - s3:c1.c3", range_superset=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons)
def test_054_range_proper_subset1(self):
"""Nodecon query with context range proper subset match"""
q = NodeconQuery(self.p, range_="s4:c1,c2", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_054_range_proper_subset2(self):
"""Nodecon query with context range proper subset match (equal)"""
q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c3", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([], nodecons)
def test_054_range_proper_subset3(self):
"""Nodecon query with context range proper subset match (equal low only)"""
q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c2", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_054_range_proper_subset4(self):
"""Nodecon query with context range proper subset match (equal high only)"""
q = NodeconQuery(self.p, range_="s4:c1,c2 - s4:c1.c3", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_055_range_proper_superset1(self):
"""Nodecon query with context range proper superset match"""
q = NodeconQuery(self.p, range_="s5 - s5:c0.c4", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_055_range_proper_superset2(self):
"""Nodecon query with context range proper superset match (equal)"""
q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c3", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([], nodecons)
def test_055_range_proper_superset3(self):
"""Nodecon query with context range proper superset match (equal low)"""
q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c4", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_055_range_proper_superset4(self):
"""Nodecon query with context range proper superset match (equal high)"""
q = NodeconQuery(self.p, range_="s5 - s5:c1.c3", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_100_v4network_equal(self):
"""Nodecon query with IPv4 equal network"""
q = NodeconQuery(self.p, network="192.168.1.0/24", network_overlap=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("192.168.1.0/24")], nodecons)
def test_101_v4network_overlap(self):
"""Nodecon query with IPv4 network overlap"""
q = NodeconQuery(self.p, network="192.168.201.0/24", network_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("192.168.200.0/22")], nodecons)
def test_110_v6network_equal(self):
"""Nodecon query with IPv6 equal network"""
q = NodeconQuery(self.p, network="1100::/16", network_overlap=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1100::/16")], nodecons)
def test_111_v6network_overlap(self):
"""Nodecon query with IPv6 network overlap"""
q = NodeconQuery(self.p, network="1110:8000::/17", network_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1110::/16")], nodecons)
| lgpl-2.1 | -6,016,288,394,831,459,000 | 42.512295 | 100 | 0.655081 | false |
unix-beard/matasano | set1/detect_single_character_xor/detect_single_character_xor.py | 1 | 1514 | #!/usr/bin/env python3
################################################################################
# The matasano crypto challenges
# http://cryptopals.com/sets/1/challenges/4/
# Set 1 Challenge 4
# Detect single-character XOR
################################################################################
# One of the 60-character strings in the input file has been encrypted
# by single-character XOR. Find it.
# Key: int=53, char='5'
# Message: Now that the party is jumping
#
# NOTE: This implementation is strictly sequential
################################################################################
import sys
import string
def find_key(key, tuple_):
return chr(int(tuple_[0] + tuple_[1], base=16) ^ key)
def decode_with_key(key, s):
decoded_msg = ''
for t in zip(s[0::2], s[1::2]):
decoded_msg += find_key(key, t)
if len([c for c in decoded_msg if c in string.ascii_letters + ' \n']) == len(decoded_msg):
print('[*] Trying the key: int: {0}, char: {1}'.format(key, chr(key)))
print('Decoded message: {0}'.format(decoded_msg))
def decode(s):
print('Decoding [{0}]'.format(s))
for key in range(0, 256):
decode_with_key(key, s)
def remove_eol(s):
"""Removes trailing '\n' if there is one"""
return s[0:len(s) - 1] if s[len(s) - 1] == '\n' else s
def main():
with open(sys.argv[1], 'r') as f:
for encoded_str in f:
decode(remove_eol(encoded_str))
if __name__ == '__main__':
main()
| mit | 4,528,978,805,032,337,400 | 30.541667 | 94 | 0.509247 | false |
RaphaelKimmig/django_helpful | django_helpful/__init__.py | 1 | 1416 | # Copyright (c) 2013, Raphael Kimmig
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided
# with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
# AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .utils import *
try:
from .test_runners import *
except ImportError:
pass
| bsd-2-clause | -2,272,355,786,306,520,300 | 49.571429 | 79 | 0.786723 | false |
looker/sdk-examples | python/soft_delete_dashboard.py | 1 | 1367 | import sys
from typing import Sequence
import exceptions
from looker_sdk import client, error, models
sdk = client.setup("../looker.ini")
def main():
"""Given a dashboard title, get the ids of all dashboards with matching titles
and move them to trash.
$ python soft_delete_dashboard.py "An Unused Dashboard"
"""
dashboard_title = sys.argv[1] if len(sys.argv) > 1 else ""
if not dashboard_title:
raise exceptions.ArgumentError("Please provide: <dashboardTitle>")
dashboards = get_dashboards(dashboard_title)
delete_dashboards(dashboards)
def get_dashboards(title: str) -> Sequence[models.Dashboard]:
"""Get dashboards with matching title"""
lc_title = title.lower()
results = sdk.search_dashboards(title=lc_title)
if not results:
raise exceptions.NotFoundError(f'dashboard "{title}" not found')
assert isinstance(results, Sequence)
return results
def delete_dashboards(dashboards: Sequence[models.Dashboard]):
"""Soft delete dashboards"""
for dashboard in dashboards:
try:
assert dashboard.id
sdk.delete_dashboard(dashboard.id)
except error.SDKError:
print(f"Failed to delete dashboard with id {dashboard.id}.")
else:
print(f'"{dashboard.title}" (id {dashboard.id}) has been moved to trash.')
main()
| mit | -5,936,345,032,288,775,000 | 26.897959 | 86 | 0.675933 | false |
robmcmullen/peppy | peppy/major_modes/fortran_95.py | 1 | 1742 | # peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Fortran 95 programming language editing support.
Major mode for editing Fortran 95 files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.editra.style_specs import unique_keywords
from peppy.fundamental import FundamentalMode
class Fortran95Mode(FundamentalMode):
"""Stub major mode for editing Fortran 95 files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
keyword = 'Fortran 95'
editra_synonym = 'Fortran 95'
stc_lexer_id = wx.stc.STC_LEX_FORTRAN
start_line_comment = '!'
end_line_comment = ''
icon = 'icons/page_white.png'
default_classprefs = (
StrParam('extensions', 'f2k f90 f95 fpp', fullwidth=True),
StrParam('keyword_set_0', unique_keywords[38], hidden=False, fullwidth=True),
StrParam('keyword_set_1', unique_keywords[39], hidden=False, fullwidth=True),
StrParam('keyword_set_2', unique_keywords[40], hidden=False, fullwidth=True),
)
class Fortran95ModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for Fortran 95
"""
def getMajorModes(self):
yield Fortran95Mode
| gpl-2.0 | -6,352,703,335,974,964,000 | 32.5 | 85 | 0.723307 | false |
awacha/cct | cct/qtgui/devices/motor/movemotor/movemotor.py | 1 | 4527 | import logging
from PyQt5 import QtWidgets, QtGui
from .movemotor_ui import Ui_Form
from ....core.mixins import ToolWindow
from .....core.devices import Motor
from .....core.instrument.privileges import PRIV_MOVEMOTORS
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MoveMotor(QtWidgets.QWidget, Ui_Form, ToolWindow):
required_privilege = PRIV_MOVEMOTORS
def __init__(self, *args, **kwargs):
credo = kwargs.pop('credo')
self.motorname = kwargs.pop('motorname')
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.setupToolWindow(credo, required_devices=['Motor_' + self.motorname])
self._start_requested = False
self.setupUi(self)
def setupUi(self, Form):
Ui_Form.setupUi(self, Form)
self.motorComboBox.addItems(sorted(self.credo.motors.keys()))
self.motorComboBox.currentTextChanged.connect(self.onMotorSelected)
self.movePushButton.clicked.connect(self.onMove)
self.motorComboBox.setCurrentIndex(self.motorComboBox.findText(self.motorname))
self.relativeCheckBox.toggled.connect(self.onRelativeChanged)
self.targetDoubleSpinBox.editingFinished.connect(self.onEditingFinished)
self.onMotorSelected()
self.adjustSize()
def onEditingFinished(self):
if self.targetDoubleSpinBox.hasFocus():
self.onMove()
def onRelativeChanged(self):
self.onMotorPositionChange(self.motor(), self.motor().where())
if self.relativeCheckBox.isChecked():
self.targetDoubleSpinBox.setValue(0)
else:
self.targetDoubleSpinBox.setValue(self.motor().where())
self.adjustSize()
def setIdle(self):
super().setIdle()
self.movePushButton.setText('Move')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/motor.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movePushButton.setIcon(icon)
self.targetDoubleSpinBox.setEnabled(True)
self.motorComboBox.setEnabled(True)
self.relativeCheckBox.setEnabled(True)
self.movePushButton.setEnabled(True)
self._start_requested = False
def setBusy(self):
self.movePushButton.setText('Stop')
self.movePushButton.setIcon(QtGui.QIcon.fromTheme('process-stop'))
self.targetDoubleSpinBox.setEnabled(False)
self.motorComboBox.setEnabled(False)
self.relativeCheckBox.setEnabled(False)
self.movePushButton.setEnabled(True)
super().setBusy()
def motor(self) -> Motor:
return self.credo.motors[self.motorComboBox.currentText()]
def onMove(self):
if self.movePushButton.text() == 'Move':
self.movePushButton.setEnabled(False)
self._start_requested = True
if self.relativeCheckBox.isChecked():
self.motor().moverel(self.targetDoubleSpinBox.value())
else:
self.motor().moveto(self.targetDoubleSpinBox.value())
else:
self.movePushButton.setEnabled(False)
self.motor().stop()
def onMotorStart(self, motor: Motor):
if self._start_requested:
self.setBusy()
def onMotorSelected(self):
self.setWindowTitle('Move motor {}'.format(self.motorComboBox.currentText()))
for d in self.required_devices:
self.unrequireDevice(d)
self.required_devices = ['Motor_' + self.motorComboBox.currentText()]
self.requireDevice(self.required_devices[0])
motor = self.credo.motors[self.motorComboBox.currentText()]
self.onMotorPositionChange(motor, motor.where())
if self.relativeCheckBox.isChecked():
self.targetDoubleSpinBox.setValue(0.0)
else:
self.targetDoubleSpinBox.setValue(motor.where())
def onMotorPositionChange(self, motor: Motor, newposition: float):
self.positionLabel.setText('<b>{:.4f}</b>'.format(newposition))
left = motor.get_variable('softleft')
right = motor.get_variable('softright')
if self.relativeCheckBox.isChecked():
left -= newposition
right -= newposition
self.targetDoubleSpinBox.setMinimum(left)
self.targetDoubleSpinBox.setMaximum(right)
self.leftLimitLabel.setText('{:.4f}'.format(left))
self.rightLimitLabel.setText('{:.4f}'.format(right))
self.adjustSize()
def onMotorStop(self, motor: Motor, targetpositionreached: bool):
self.setIdle()
| bsd-3-clause | 2,612,978,641,138,935,300 | 38.365217 | 95 | 0.664237 | false |
venkatant/msproject | flow_statistics.py | 1 | 7329 | __author__ = 'venkat'
from header import *
from json_http_handler import *
class FlowWindow:
bottom_frame = 0
bottom_row = 0
class FlowTable:
def __init__(self):
self.dest_ip = None
self.dest_mask = None
self.dest_mac = None
self.dest_port = None
self.dest_node = None
return
def updateflowtable(self, destIp, destMask, destMac, destPort, destNode):
self.dest_ip = destIp
self.dest_mask = destMask
self.dest_mac = destMac
self.dest_port = destPort
self.dest_node = destNode
return
def displayflowtable(self):
print(self.dest_ip,
self.dest_mask,
self.dest_mac,
self.dest_port,
self.dest_node)
return
class FlowStatistics:
def __init__(self):
self.listbox = None
self.toplevel = None
self.no_of_flows = 0
def CurSelet(self):
print("Hello")
switch = str((self.mylistbox.get(self.mylistbox.curselection())))
print(switch)
def fillListWithNodesInfo(self):
'''
Create an object of Http JSON Handler Class to receive
resp from respective Rest URL's
'''
http_obj = HttpJsonHandler()
json_nodes = http_obj.getnodeinfo()
for node in json_nodes['nodeProperties']:
self.listbox.insert(END, node['node']['id'])
def displayFlowTableTitle(self, bottom_frame, bottom_row):
for column in range(5):
if column == 0:
label = Label(bottom_frame, text="Destination IP", borderwidth=0, width=15, fg="red")
elif column == 1:
label = Label(bottom_frame, text="Destination Mask", borderwidth=0, width=15, fg="red")
elif column == 2:
label = Label(bottom_frame, text="Output Mac", borderwidth=0, width=15, fg="red")
elif column == 3:
label = Label(bottom_frame, text="Output Port", borderwidth=0, width=15, fg="red")
elif column == 4:
label = Label(bottom_frame, text="Output Node", borderwidth=0, width=25, fg="red")
label.configure(bg="white")
label.grid(row=bottom_row, column=column, sticky="nsew", padx=1, pady=1)
return
def displayFlowTableContent(self, flow_list, flow_window_obj):
bottom_frame = flow_window_obj.bottom_frame
bottom_row = flow_window_obj.bottom_row
#for row in range(4):
for row in flow_list:
current_row = []
for column in range(5):
if column == 0:
label = Label(bottom_frame, text="%s" % row.dest_ip, borderwidth=0, width=15)
elif column == 1:
label = Label(bottom_frame, text="%s" % row.dest_mask, borderwidth=0, width=15)
elif column == 2:
label = Label(bottom_frame, text="%s" % row.dest_mac, borderwidth=0, width=15)
elif column == 3:
label = Label(bottom_frame, text="%s" % row.dest_port, borderwidth=0, width=15)
elif column == 4:
label = Label(bottom_frame, text="%s" % row.dest_node, borderwidth=0, width=25)
label.configure(bg="white")
label.grid(row=bottom_row, column=column, sticky="nsew", padx=1, pady=1)
current_row.append(label)
bottom_row += 1
for column in range(5):
bottom_frame.grid_columnconfigure(column, weight=1)
return
def CurListSelet(self, evt, flow_window_obj):
#mylistbox = evt.widget
switch=str((self.listbox.get(self.listbox.curselection())))
print(switch)
'''
Create an object of Http JSON Handler Class to receive
resp from respective Rest URL's
'''
http_obj = HttpJsonHandler()
json_flows = http_obj.getflowinfo(switch)
no_of_flows = 0
flow_list = []
for flowCount in json_flows['flowStatistic']:
destIp = json_flows['flowStatistic'][no_of_flows]['flow']['match']['matchField'][0]['value']
destMask = json_flows['flowStatistic'][no_of_flows]['flow']['match']['matchField'][0]['mask']
destPort = 0
destnode = '00:00:00:00:00:00:00:00'
try:
destMac = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['address']
try:
destPort = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][1]['port']['id']
destnode = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][1]['port']['node']['id']
except:
print('')
except KeyError:
destPort = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['port']['id']
destnode = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['port']['node']['id']
destMac = '000000000000'
# destIp, destMask, destMac, destPort, destNode
# Create an instance of FlowTable class
flow_table_entry = FlowTable()
flow_table_entry.updateflowtable(destIp, destMask, destMac, destPort, destnode)
flow_list.append(flow_table_entry)
no_of_flows += 1
flow_table_entry.displayflowtable()
# sort the list with switch_is as Key
flow_list.sort(key=lambda host:host.dest_ip)
self.displayFlowTableContent(flow_list, flow_window_obj)
def flowstatistics():
# Create an instance of FlowTable class
#flow_table_entry = FlowTable()
# Create an instance of FlowStatistics class
obj = FlowStatistics()
'''
scrollbar.config(command=obj.mylistbox.yview)
submit = Button(obj.toplevel, text="Submit", command=obj.CurSelet)
submit.pack()
'''
toplevel = Toplevel()
toplevel.title("Flow Monitoring")
toplevel.geometry("750x250")
top_row = 0
bottom_row = 0
top_frame = Frame(toplevel)
top_frame.pack(side=TOP)
top_label = Label(top_frame, text=" SELECT SWITCH TO GET FLOW ENTRIES", fg="red", borderwidth=0, width=40)
top_label.grid(row=top_row, rowspan=1)
top_row += 1
bottom_frame = Frame(toplevel)
bottom_frame.pack(side=TOP)
bottom_label = Label(bottom_frame, fg="green")
bottom_label.grid(row=bottom_row)
bottom_row += 1
scrollbar = Scrollbar(top_frame)
obj.listbox = Listbox(top_frame, yscrollcommand=scrollbar.set)
obj.listbox.config(height=4)
# Fills the list of nodes in the List Box
obj.fillListWithNodesInfo()
obj.listbox.grid(row=top_row, column=0, sticky="nsew", padx=1, pady=1)
scrollbar.grid(row=top_row, column=1, sticky="nsew", padx=1, pady=1)
scrollbar.config(command=obj.listbox.yview)
obj.displayFlowTableTitle(bottom_frame, bottom_row)
bottom_row += 1
flow_window_obj = FlowWindow()
flow_window_obj.bottom_row = bottom_row
flow_window_obj.bottom_frame = bottom_frame
# Below code to activate on selection of items in List Box
obj.listbox.bind('<<ListboxSelect>>', lambda event, arg=flow_window_obj: obj.CurListSelet(event, flow_window_obj))
return | gpl-2.0 | -5,882,701,297,109,460,000 | 32.167421 | 118 | 0.583163 | false |