repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
popazerty/test-1 | RecordTimer.py | 1 | 39985 | import os
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
import Screens.InfoBar
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def checkForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime(isWakeup=True)
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not checkForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, isAutoTimer = False, always_zap = False, zap_wakeup = "always", rename_repeat = True):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.zap_wakeup = zap_wakeup
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.rename_repeat = rename_repeat
self.needChangePriorityFrontend = config.usage.recording_frontend_priority.value != "-2" and config.usage.recording_frontend_priority.value != config.usage.frontend_priority.value
self.change_frontend = False
self.isAutoTimer = isAutoTimer
self.log_entries = []
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s, isAutoTimer=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay, self.isAutoTimer)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self, name=None):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
name = name or self.name
filename = begin_date + " - " + service_name
if name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + name
elif config.recording.filename_composition.value == "long":
filename += " - " + name + " - " + self.description
else:
filename += " - " + name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRecordingPreferredTuner(setdefault=True)
return False
name = self.name
description = self.description
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
if self.rename_repeat:
event_description = evt.getShortDescription()
if not event_description:
event_description = evt.getExtendedDescription()
if event_description and event_description != description:
description = event_description
event_name = evt.getEventName()
if event_name and event_name != name:
name = event_name
if not self.calculateFilename(event_name):
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + ".ts", self.begin, self.end, event_id, name.replace("\n", ""), description.replace("\n", ""), ' '.join(self.tags), bool(self.descramble), bool(self.record_ecm))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
self.setRecordingPreferredTuner(setdefault=True)
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == 1:
if self.always_zap:
if Screens.Standby.inStandby:
self.log(5, "wakeup and zap to recording service")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
cur_zap_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_zap_ref and not cur_zap_ref.getPath():# we do not zap away if it is no live service
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
self.log(5, "zap to recording service")
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + ".ts", "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle(self.Filename)
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if self.first_try_prepare:
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if Screens.Standby.inStandby:
self.setRecordingPreferredTuner()
self.failureCB(True)
elif not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20, default=True)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
if RecordTimerEntry.wasInDeepStandby and self.zap_wakeup in ("always", "from_deep_standby") or self.zap_wakeup in ("always", "from_standby"):
self.log(11, "wakeup and zap")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.keypress()
if Screens.Standby.inStandby: #In case some plugin did put the receiver already in standby
config.misc.standbyCounter.value = 0
else:
Notifications.AddNotification(Screens.Standby.Standby, StandbyCounterIncrease=False)
record_res = self.record_service.start()
self.setRecordingPreferredTuner(setdefault=True)
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
# Tell the trashcan we started recording. The trashcan gets events,
# but cannot tell what the associated path is.
Trashcan.instance.markDirty(self.Filename)
return True
elif next_state == self.StateEnded:
old_end = self.end
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if not checkForRecordings():
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or self.afterEvent == AFTEREVENT.AUTO and (Screens.Standby.inStandby or RecordTimerEntry.wasInStandby) and not config.misc.standbyCounter.value:
if not Screens.Standby.inTryQuitMainloop:
if Screens.Standby.inStandby:
RecordTimerEntry.TryQuitMainloop()
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour receiver. Shutdown now?"), timeout=20, default=True)
elif self.afterEvent == AFTEREVENT.STANDBY or self.afterEvent == AFTEREVENT.AUTO and RecordTimerEntry.wasInStandby:
if not Screens.Standby.inStandby:
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nreceiver to standby. Do that now?"), timeout=20, default=True)
else:
RecordTimerEntry.keypress()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def setRecordingPreferredTuner(self, setdefault=False):
if self.needChangePriorityFrontend:
elem = None
if not self.change_frontend and not setdefault:
elem = config.usage.recording_frontend_priority.value
self.change_frontend = True
elif self.change_frontend and setdefault:
elem = config.usage.frontend_priority.value
self.change_frontend = False
if elem is not None:
setPreferredTuner(int(elem))
def sendStandbyNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
if answer == True:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
notify = config.usage.show_message_when_recording_starts.value and \
not Screens.Standby.inStandby and \
Screens.InfoBar.InfoBar.instance and \
Screens.InfoBar.InfoBar.instance.execing
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
notify = True
if notify:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 3)
elif event == iRecordableService.evRecordAborted:
NavigationInstance.instance.RecordTimer.removeEntry(self)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
rename_repeat = long(xml.get("rename_repeat") or "1")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
always_zap = long(xml.get("always_zap") or "0")
zap_wakeup = str(xml.get("zap_wakeup") or "always")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit)
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
descramble = int(xml.get("descramble") or "1")
record_ecm = int(xml.get("record_ecm") or "0")
isAutoTimer = int(xml.get("isAutoTimer") or "0")
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags, descramble = descramble, record_ecm = record_ecm, isAutoTimer = isAutoTimer, always_zap = always_zap, zap_wakeup = zap_wakeup, rename_repeat = rename_repeat)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
w.first_try_prepare = True
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecording(self):
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
return True
return False
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append(' always_zap="' + str(int(timer.always_zap)) + '"')
list.append(' zap_wakeup="' + str(timer.zap_wakeup) + '"')
list.append(' rename_repeat="' + str(int(timer.rename_repeat)) + '"')
list.append(' descramble="' + str(int(timer.descramble)) + '"')
list.append(' record_ecm="' + str(int(timer.record_ecm)) + '"')
list.append(' isAutoTimer="' + str(int(timer.isAutoTimer)) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
import os
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now or isWakeup and timer.zap_wakeup in ("from_standby", "never"):
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def getNextTimerTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if next_act < now or isWakeup and timer.justplay and timer.zap_wakeup in ("from_standby", "never"):
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): # wird von loadTimer mit dosave=False aufgerufen
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if ignoreTSC != True:
print "timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInRepeatTimer(self, timer, event):
time_match = 0
is_editable = False
begin = event.getBeginTime()
duration = event.getDuration()
end = begin + duration
timer_end = timer.end
if timer.disabled and timer.isRunning():
if begin < timer.begin <= end or timer.begin <= begin <= timer_end:
return True
else:
return False
if timer.justplay and (timer_end - timer.begin) <= 1:
timer_end += 60
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(timer.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = timer.begin < begin or begin <= timer.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = timer.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - timer.begin) / 60)
if xend < xbegin:
xend += 1440
if timer.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
return time_match and is_editable
def isInTimer(self, eventid, begin, duration, service):
returnValue = None
type = 0
time_match = 0
isAutoTimer = False
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer_list:
if x.isAutoTimer == 1:
isAutoTimer = True
else:
isAutoTimer = False
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid:
# check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
timer_repeat = x.repeated
# if set 'don't stop current event but disable coming events' for repeat timer
running_only_curevent = x.disabled and x.isRunning() and timer_repeat
if running_only_curevent:
timer_repeat = 0
type_offset += 15
if timer_repeat != 0:
type_offset += 15
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
else:
# recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if type in (2,7,12,17,22,27):
# When full recording do not look further
returnValue = (time_match, [type])
break
elif returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
return returnValue
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
| gpl-2.0 |
ru-faraon/MITMf | core/packetfilter.py | 4 | 1221 | import threading
from core.utils import set_ip_forwarding, iptables
from core.logger import logger
from scapy.all import *
from traceback import print_exc
from netfilterqueue import NetfilterQueue
formatter = logging.Formatter("%(asctime)s [PacketFilter] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("PacketFilter", formatter)
class PacketFilter:
def __init__(self, filter):
self.filter = filter
def start(self):
set_ip_forwarding(1)
iptables().NFQUEUE()
self.nfqueue = NetfilterQueue()
self.nfqueue.bind(1, self.modify)
t = threading.Thread(name='packetparser', target=self.nfqueue.run)
t.setDaemon(True)
t.start()
def modify(self, pkt):
#log.debug("Got packet")
data = pkt.get_payload()
packet = IP(data)
try:
execfile(self.filter)
except Exception:
log.debug("Error occurred in filter")
print_exc()
pkt.set_payload(str(packet)) #set the packet content to our modified version
pkt.accept() #accept the packet
def stop(self):
self.nfqueue.unbind()
set_ip_forwarding(0)
iptables().flush() | gpl-3.0 |
djmaze/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/xcode.py | 137 | 50429 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
]
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
import sys
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings. This is intended to be
# used very sparingly. Really, almost everything should go into
# target-specific build settings sections. The project-wide settings are
# only intended to be used in cases where Xcode attempts to resolve
# variable references in a project context as opposed to a target context,
# such as when resolving sourceTree references while building up the tree
# tree view for UI display.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, or is a test, add its
# target to the targets, and (if it's a test) add it the to the
# test targets.
is_test = int(target.get('test', 0))
if target.get('run_as') or is_test:
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
# The test runner target has a build phase that executes the
# test, if this has the 'test' attribute. If the 'run_as' tag
# doesn't exist (meaning that this must be a test), then we
# define a default test command line.
command = target.get('run_as', {
'action': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}']
})
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if is_test and serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
if is_test:
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
target_dict = xcode_target_to_target_dict[dependency_xct]
if target_dict and int(target_dict.get('test', 0)):
assert dependency_xct.test_runner
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
cached_xcode_version = None
def InstalledXcodeVersion():
"""Fetches the installed version of Xcode, returns empty string if it is
unable to figure it out."""
global cached_xcode_version
if not cached_xcode_version is None:
return cached_xcode_version
# Default to an empty string
cached_xcode_version = ''
# Collect the xcodebuild's version information.
try:
import subprocess
cmd = ['/usr/bin/xcodebuild', '-version']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
xcodebuild_version_info = proc.communicate()[0]
# Any error, return empty string
if proc.returncode:
xcodebuild_version_info = ''
except OSError:
# We failed to launch the tool
xcodebuild_version_info = ''
# Pull out the Xcode version itself.
match_line = re.search('^Xcode (.*)$', xcodebuild_version_info, re.MULTILINE)
if match_line:
cached_xcode_version = match_line.group(1)
# Done!
return cached_xcode_version
def AddSourceToTarget(source, pbxp, xct):
# TODO(mark): Perhaps this can be made a little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext != '':
ext = ext[1:].lower()
if ext in source_extensions:
xct.SourcesPhase().AddFile(source)
else:
# Files that aren't added to a sources build phase can still go into
# the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. We use the type
# with "+bundle" appended if the target has "mac_bundle" set.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_bundle = int(spec.get('mac_bundle', 0))
if type != 'none':
type_bundle_key = type
if is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
target_product_name = spec.get('product_name', None)
if target_product_name:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_extension=spec.get('product_extension', None))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
# Xcode does not have a distinct type for loadable_modules that are pure
# BSD targets (ie-unbundled). It uses the same setup as a shared_library
# but the mach-o type is explictly set in the settings. So before we do
# anything else, for this one case, we stuff in that one setting. This
# would allow the other data in the spec to change it if need be.
if type == 'loadable_module' and not is_bundle:
xccl.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target as used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_basename = posixpath.basename(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = '@echo note: ' + ExpandXcodeVariables(message,
rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s_%s.make' % (target_name, rule['rule_name'])
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
# TODO(mark): quote the list of concrete_output_dirs.
if len(concrete_output_dirs) > 0:
makefile.write('\tmkdir -p %s\n' % ' '.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
makefile.write('\t%s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec "${DEVELOPER_BIN_DIR}/make" -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
for group in ['inputs', 'inputs_excluded']:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" if it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
# Add "copies".
for copy_group in spec.get('copies', []):
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
for key in ['sources', 'mac_bundle_resources']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
for action in spec.get('actions', []):
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
# TODO(mark): this logic isn't right. There are certain directories
# that are always searched, we should check to see if the library is
# in one of those directories, and if not, we should do the
# AppendBuildSetting thing.
if not posixpath.isabs(library) and not library.startswith('$'):
# TODO(mark): Need to check to see if library_dir is already in
# LIBRARY_SEARCH_PATHS.
library_dir = posixpath.dirname(library)
xct.AppendBuildSetting('LIBRARY_SEARCH_PATHS', library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
if 'defines' in configuration:
for define in configuration['defines']:
# If the define is of the form A="B", escape the quotes
# yielding A=\"\\\"B\\\"\". The extra set of quotes tell
# Xcode NOT to split on spaces, and still define a string
# literal (with quotes).
set_define = re.sub(r'^([^=]*=)"([^"]*)"$',
r'\1"\"\2\""', define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| bsd-3-clause |
lzambella/Qyoutube-dl | youtube_dl/extractor/swrmediathek.py | 165 | 3660 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_duration
class SWRMediathekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/(?:content/)?player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6',
'md5': '8c5f6f0172753368547ca8413a7768ac',
'info_dict': {
'id': '849790d0-dab8-11e3-a953-0026b975f2e6',
'ext': 'mp4',
'title': 'SWR odysso',
'description': 'md5:2012e31baad36162e97ce9eb3f157b8a',
'thumbnail': 're:^http:.*\.jpg$',
'duration': 2602,
'upload_date': '20140515',
'uploader': 'SWR Fernsehen',
'uploader_id': '990030',
},
}, {
'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
'md5': 'b10ab854f912eecc5a6b55cd6fc1f545',
'info_dict': {
'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
'ext': 'mp4',
'title': 'Nachtcafé - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen',
'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2',
'thumbnail': 're:http://.*\.jpg',
'duration': 5305,
'upload_date': '20140516',
'uploader': 'SWR Fernsehen',
'uploader_id': '990030',
},
}, {
'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6',
'md5': '4382e4ef2c9d7ce6852535fa867a0dd3',
'info_dict': {
'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6',
'ext': 'mp3',
'title': 'Saša Stanišic: Vor dem Fest',
'description': 'md5:5b792387dc3fbb171eb709060654e8c9',
'thumbnail': 're:http://.*\.jpg',
'duration': 3366,
'upload_date': '20140520',
'uploader': 'SWR 2',
'uploader_id': '284670',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video = self._download_json(
'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id, video_id, 'Downloading video JSON')
attr = video['attr']
media_type = attr['entry_etype']
formats = []
for entry in video['sub']:
if entry['name'] != 'entry_media':
continue
entry_attr = entry['attr']
codec = entry_attr['val0']
quality = int(entry_attr['val1'])
fmt = {
'url': entry_attr['val2'],
'quality': quality,
}
if media_type == 'Video':
fmt.update({
'format_note': ['144p', '288p', '544p', '720p'][quality - 1],
'vcodec': codec,
})
elif media_type == 'Audio':
fmt.update({
'acodec': codec,
})
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'title': attr['entry_title'],
'description': attr['entry_descl'],
'thumbnail': attr['entry_image_16_9'],
'duration': parse_duration(attr['entry_durat']),
'upload_date': attr['entry_pdatet'][:-4],
'uploader': attr['channel_title'],
'uploader_id': attr['channel_idkey'],
'formats': formats,
}
| gpl-3.0 |
AbsoluteMSTR/pies | pies/overrides.py | 2 | 7952 | """pies/overrides.py.
Overrides Python syntax to conform to the Python3 version as much as possible using a '*' import
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import abc
import functools
import sys
from numbers import Integral
from ._utils import unmodified_isinstance, with_metaclass
from .version_info import PY2, PY3, VERSION
native_dict = dict
native_round = round
native_filter = filter
native_map = map
native_zip = zip
native_range = range
native_str = str
native_chr = chr
native_input = input
native_next = next
native_object = object
common = ['native_dict', 'native_round', 'native_filter', 'native_map', 'native_range', 'native_str', 'native_chr',
'native_input', 'PY2', 'PY3', 'u', 'itemsview', 'valuesview', 'keysview', 'execute', 'integer_types',
'native_next', 'native_object', 'with_metaclass']
if PY3:
import urllib
import builtins
from urllib import parse
from collections import OrderedDict
integer_types = (int, )
def u(string):
return string
def itemsview(collection):
return collection.items()
def valuesview(collection):
return collection.values()
def keysview(collection):
return collection.keys()
urllib.quote = parse.quote
urllib.quote_plus = parse.quote_plus
urllib.unquote = parse.unquote
urllib.unquote_plus = parse.unquote_plus
urllib.urlencode = parse.urlencode
execute = getattr(builtins, 'exec')
if VERSION[1] < 2:
def callable(entity):
return hasattr(entity, '__call__')
common.append('callable')
__all__ = common + ['OrderedDict', 'urllib']
else:
from itertools import ifilter as filter
from itertools import imap as map
from itertools import izip as zip
from decimal import Decimal, ROUND_HALF_EVEN
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import codecs
str = unicode
chr = unichr
input = raw_input
range = xrange
integer_types = (int, long)
import sys
stdout = sys.stdout
stderr = sys.stderr
reload(sys)
sys.stdout = stdout
sys.stderr = stderr
sys.setdefaultencoding('utf-8')
def _create_not_allowed(name):
def _not_allow(*args, **kwargs):
raise NameError("name '{0}' is not defined".format(name))
_not_allow.__name__ = name
return _not_allow
for removed in ('apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks'):
globals()[removed] = _create_not_allowed(removed)
def u(s):
if isinstance(s, unicode):
return s
else:
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
def execute(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
class _dict_view_base(object):
__slots__ = ('_dictionary', )
def __init__(self, dictionary):
self._dictionary = dictionary
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, str(list(self.__iter__())))
def __unicode__(self):
return str(self.__repr__())
def __str__(self):
return str(self.__unicode__())
class dict_keys(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iterkeys()
class dict_values(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.itervalues()
class dict_items(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iteritems()
def itemsview(collection):
return dict_items(collection)
def valuesview(collection):
return dict_values(collection)
def keysview(collection):
return dict_keys(collection)
class dict(unmodified_isinstance(native_dict)):
def has_key(self, *args, **kwargs):
return AttributeError("'dict' object has no attribute 'has_key'")
def items(self):
return dict_items(self)
def keys(self):
return dict_keys(self)
def values(self):
return dict_values(self)
def round(number, ndigits=None):
return_int = False
if ndigits is None:
return_int = True
ndigits = 0
if hasattr(number, '__round__'):
return number.__round__(ndigits)
if ndigits < 0:
raise NotImplementedError('negative ndigits not supported yet')
exponent = Decimal('10') ** (-ndigits)
d = Decimal.from_float(number).quantize(exponent,
rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
else:
return float(d)
def next(iterator):
try:
iterator.__next__()
except Exception:
native_next(iterator)
class FixStr(type):
def __new__(cls, name, bases, dct):
if '__str__' in dct:
dct['__unicode__'] = dct['__str__']
dct['__str__'] = lambda self: self.__unicode__().encode('utf-8')
return type.__new__(cls, name, bases, dct)
if sys.version_info[1] <= 6:
def __instancecheck__(cls, instance):
if cls.__name__ == "object":
return isinstance(instance, native_object)
subclass = getattr(instance, '__class__', None)
subtype = type(instance)
instance_type = getattr(abc, '_InstanceType', None)
if not instance_type:
class test_object:
pass
instance_type = type(test_object)
if subtype is instance_type:
subtype = subclass
if subtype is subclass or subclass is None:
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype))
else:
def __instancecheck__(cls, instance):
if cls.__name__ == "object":
return isinstance(instance, native_object)
return type.__instancecheck__(cls, instance)
class object(with_metaclass(FixStr, object)):
pass
__all__ = common + ['round', 'dict', 'apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks', 'str', 'chr',
'input', 'range', 'filter', 'map', 'zip', 'object']
| mit |
FloatingGhost/MISP | app/files/scripts/stix2/misp2stix2_mapping.py | 2 | 22427 | def attribute_data_observable(data):
return {'type': 'artifact', 'payload_bin': data}
def attribute_data_pattern(data):
return "artifact:payload_bin = '{}'".format(data)
def define_address_type(address):
if ':' in address:
return 'ipv6-addr'
return 'ipv4-addr'
def observable_as(_, attribute_value):
return {'0': {'type': 'autonomous-system', 'number': attribute_value}}
def pattern_as(_, attribute_value):
return "[autonomous-system:number = '{}']".format(attribute_value)
def observable_attachment(*args):
observable = observable_file(args[0], args[1])
if len(args) == 3:
observable['0']['content_ref'] = '0'
return {'0': attribute_data_observable(args[2]), '1': observable['0']}
return observable
def pattern_attachment(*args):
pattern = pattern_file(args[0], args[1])[1:-1]
if len(args) == 3:
pattern += " AND {}".format(attribute_data_pattern(args[2]))
return "[{}]".format(pattern)
def observable_domain(_, attribute_value):
return {'0': {'type': 'domain-name', 'value': attribute_value}}
def pattern_domain(_, attribute_value):
return "[domain-name:value = '{}']".format(attribute_value)
def observable_domain_ip(_, attribute_value):
address_type = define_address_type(attribute_value)
domain_value, ip_value = attribute_value.split('|')
domain = observable_domain(_, domain_value)
domain['0']['resolves_to_refs'] = '1'
domain['1'] = {'type': address_type, 'value': ip_value}
return domain
def pattern_domain_ip(_, attribute_value):
domain_value, ip_value = attribute_value.split('|')
domain = pattern_domain(_, domain_value)[1:-1]
domain += " AND domain-name:resolves_to_refs[*].value = '{}'".format(ip_value)
return "[{}]".format(domain)
def observable_email_address(attribute_type, attribute_value):
email_type = "from_ref" if 'src' in attribute_type else "to_refs"
return {'0': {'type': 'email-addr', 'value': attribute_value},
'1': {'type': 'email-message', email_type: '0', 'is_multipart': 'false'}}
def pattern_email_address(attribute_type, attribute_value):
email_type = "from_ref" if 'src' in attribute_type else "to_refs"
return "[email-message:{} = '{}']".format(email_type, attribute_value)
def observable_email_attachment(_, attribute_value):
observable = observable_file(_, attribute_value)
observable['1'] = {"type": "email-message", 'is_multipart': 'true',
"body_multipart": [{"content_disposition": "attachment; filename=''".format(attribute_value), "body_raw_ref": "0"}]}
return observable
def pattern_email_attachment(_, attribute_value):
return "[email-message:body_multipart[*].body_raw_ref.name = '{}']".format(attribute_value)
def observable_email_message(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
return {'0': {'type': 'email-message', email_type: attribute_value, 'is_multipart': 'false'}}
def pattern_email_message(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
return "[email-message:{} = '{}']".format(email_type, attribute_value)
def observable_file(_, attribute_value):
return {'0': {'type': 'file', 'name': attribute_value}}
def pattern_file(_, attribute_value):
return "[file:name = '{}']".format(attribute_value)
def observable_file_hash(attribute_type, attribute_value):
_, hash_type = attribute_type.split('|')
value1, value2 = attribute_value.split('|')
return {'0': {'type': 'file', 'name': value1, 'hashes': {hash_type: value2}}}
def pattern_file_hash(attribute_type, attribute_value):
_, hash_type = attribute_type.split('|')
value1, value2 = attribute_value.split('|')
return "[file:name = '{0}' AND file:hashes.'{1}' = '{2}']".format(value1, hash_type, value2)
def observable_hash(attribute_type, attribute_value):
return {'0': {'type': 'file', 'hashes': {attribute_type: attribute_value}}}
def pattern_hash(attribute_type, attribute_value):
return "[file:hashes.'{}' = '{}']".format(attribute_type, attribute_value)
def observable_hostname_port(_, attribute_value):
hostname, port = attribute_value.split('|')
hostname_port = observable_domain(_, hostname)
hostname_port[1] = observable_port(_, port)['0']
return hostname_port
def pattern_hostname_port(_, attribute_value):
hostname, port = attribute_value.split('|')
return "[{} AND {}]".format(pattern_domain(_, hostname)[1:-1], pattern_port(_, port)[1:-1])
def observable_ip(attribute_type, attribute_value):
ip_type = attribute_type.split('-')[1]
address_type = define_address_type(attribute_value)
return {'0': {'type': address_type, 'value': attribute_value},
'1': {'type': 'network-traffic', '{}_ref'.format(ip_type): '0',
'protocols': [address_type.split('-')[0]]}}
def pattern_ip(attribute_type, attribute_value):
ip_type = attribute_type.split('-')[1]
address_type = define_address_type(attribute_value)
return "[network-traffic:{0}_ref.type = '{1}' AND network-traffic:{0}_ref.value = '{2}']".format(ip_type, address_type, attribute_value)
def observable_ip_port(attribute_type, attribute_value):
ip_type, _ = attribute_type.split('|')
ip, port = attribute_value.split('|')
ip_port = observable_ip(ip_type, ip)
port_type = "{}_port".format(ip_type.split('-')[1])
ip_port['1'][port_type] = port
return ip_port
def pattern_ip_port(attribute_type, attribute_value):
ip_type, _ = attribute_type.split('|')
ip, port = attribute_value.split('|')
port_type = "{}_port".format(ip_type.split('-')[1])
return "[network-traffic:{} = '{}' AND {}]".format(port_type, port, pattern_ip(ip_type, ip)[1:-1])
def observable_mac_address(_, attribute_value):
return {'0': {'type': 'mac-addr', 'value': attribute_value}}
def pattern_mac_address(_, attribute_value):
return "[mac-addr:value = '{}']".format(attribute_value)
def observable_malware_sample(*args):
observable = observable_file_hash("filename|md5", args[1])
if len(args) == 3:
observable['0']['content_ref'] = '0'
return {'0': attribute_data_observable(args[2]), '1': observable['0']}
return observable
def pattern_malware_sample(*args):
pattern = pattern_file_hash("filename|md5", args[1])[1:-1]
if len(args) == 3:
pattern += " AND {}".format(attribute_data_pattern(args[2]))
return "[{}]".format(pattern)
def observable_mutex(_, attribute_value):
return {'0': {'type': 'mutex', 'name': attribute_value}}
def pattern_mutex(_, attribute_value):
return "[mutex:name = '{}']".format(attribute_value)
def observable_port(_, attribute_value):
return {'0': {'type': 'network-traffic', 'dst_port': attribute_value, 'protocols': []}}
def pattern_port(_, attribute_value):
return "[network-traffic:dst_port = '{}']".format(attribute_value)
def observable_regkey(_, attribute_value):
return {'0': {'type': 'windows-registry-key', 'key': attribute_value.strip()}}
def pattern_regkey(_, attribute_value):
return "[windows-registry-key:key = '{}']".format(attribute_value.strip())
def observable_regkey_value(_, attribute_value):
from stix2 import WindowsRegistryValueType
key, value = attribute_value.split('|')
regkey = observable_regkey(_, key)
regkey['0']['values'] = WindowsRegistryValueType(**{'name': value.strip()})
return regkey
def pattern_regkey_value(_, attribute_value):
key, value = attribute_value.split('|')
regkey = pattern_regkey(_, key)[1:-1]
regkey += " AND windows-registry-key:values = '{}'".format(value.strip())
return "[{}]".format(regkey)
def observable_reply_to(_, attribute_value):
return {'0': {'type': 'email-addr', 'value': attribute_value},
'1': {'type': 'email-message', 'additional_header_fields': {'Reply-To': ['0']}, 'is_multipart': 'false'}}
def pattern_reply_to(_, attribute_value):
return "[email-message:additional_header_fields.reply_to = '{}']".format(attribute_value)
def observable_url(_, attribute_value):
return {'0': {'type': 'url', 'value': attribute_value}}
def pattern_url(_, attribute_value):
return "[url:value = '{}']".format(attribute_value)
def observable_x509(_, attribute_value):
return {'0': {'type': 'x509-certificate', 'hashes': {'sha1': attribute_value}}}
def pattern_x509(_, attribute_value):
return "[x509-certificate:hashes = '{}']".format(attribute_value)
def return_vulnerability(name):
return {'source_name': 'cve', 'external_id': name}
mispTypesMapping = {
'link': {'to_call': 'handle_link'},
'vulnerability': {'to_call': 'add_vulnerability', 'vulnerability_args': return_vulnerability},
'md5': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha1': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha256': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'filename': {'to_call': 'handle_usual_type', 'observable': observable_file, 'pattern': pattern_file},
'filename|md5': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha1': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha256': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'ip-src': {'to_call': 'handle_usual_type', 'observable': observable_ip, 'pattern': pattern_ip},
'ip-dst': {'to_call': 'handle_usual_type', 'observable': observable_ip, 'pattern': pattern_ip},
'hostname': {'to_call': 'handle_usual_type', 'observable': observable_domain, 'pattern': pattern_domain},
'domain': {'to_call': 'handle_usual_type', 'observable': observable_domain, 'pattern': pattern_domain},
'domain|ip': {'to_call': 'handle_usual_type', 'observable': observable_domain_ip, 'pattern': pattern_domain_ip},
'email-src': {'to_call': 'handle_usual_type', 'observable': observable_email_address, 'pattern': pattern_email_address},
'email-dst': {'to_call': 'handle_usual_type', 'observable': observable_email_address, 'pattern': pattern_email_address},
'email-subject': {'to_call': 'handle_usual_type', 'observable': observable_email_message, 'pattern': pattern_email_message},
'email-body': {'to_call': 'handle_usual_type', 'observable': observable_email_message, 'pattern': pattern_email_message},
'email-attachment': {'to_call': 'handle_usual_type', 'observable': observable_email_attachment, 'pattern': pattern_email_attachment},
'url': {'to_call': 'handle_usual_type', 'observable': observable_url, 'pattern': pattern_url},
'regkey': {'to_call': 'handle_usual_type', 'observable': observable_regkey, 'pattern': pattern_regkey},
'regkey|value': {'to_call': 'handle_usual_type', 'observable': observable_regkey_value, 'pattern': pattern_regkey_value},
'malware-sample': {'to_call': 'handle_usual_type', 'observable': observable_malware_sample, 'pattern': pattern_malware_sample},
'mutex': {'to_call': 'handle_usual_type', 'observable': observable_mutex, 'pattern': pattern_mutex},
'uri': {'to_call': 'handle_usual_type', 'observable': observable_url, 'pattern': pattern_url},
'authentihash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'ssdeep': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'imphash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'pehash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'impfuzzy': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha224': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha384': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512/224': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512/256': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'tlsh': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'cdhash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'filename|authentihash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|ssdeep': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|imphash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|impfuzzy': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|pehash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha224': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha384': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512/224': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512/256': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|tlsh': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'x509-fingerprint-sha1': {'to_call': 'handle_usual_type', 'observable': observable_x509, 'pattern': pattern_x509},
'port': {'to_call': 'handle_usual_type', 'observable': observable_port, 'pattern': pattern_port},
'ip-dst|port': {'to_call': 'handle_usual_type', 'observable': observable_ip_port, 'pattern': pattern_ip_port},
'ip-src|port': {'to_call': 'handle_usual_type', 'observable': observable_ip_port, 'pattern': pattern_ip_port},
'hostname|port': {'to_call': 'handle_usual_type', 'observable': observable_hostname_port, 'pattern': pattern_hostname_port},
'email-reply-to': {'to_call': 'handle_usual_type', 'observable': observable_reply_to, 'pattern': pattern_reply_to},
'attachment': {'to_call': 'handle_usual_type', 'observable': observable_attachment, 'pattern': pattern_attachment},
'mac-address': {'to_call': 'handle_usual_type', 'observable': observable_mac_address, 'pattern': pattern_mac_address},
'AS': {'to_call': 'handle_usual_type', 'observable': observable_as, 'pattern': pattern_as}
#'email-dst-display-name': {'observable': {'0': {'type': 'email-addr', 'display_name': ''}},
# 'pattern': 'email-addr:display_name = \'{0}\''},
#'email-src-display-name': {'observable': {'0': {'type': 'email-addr', 'display_name': ''}},
# 'pattern': 'email-addr:display_name = \'{0}\''}
}
network_traffic_pattern = "network-traffic:{0} = '{1}' AND "
network_traffic_src_ref = "src_ref.type = '{0}' AND network-traffic:src_ref.value"
network_traffic_dst_ref = "dst_ref.type = '{0}' AND network-traffic:dst_ref.value"
objectsMapping = {'asn': {'to_call': 'handle_usual_object_name',
'observable': {'type': 'autonomous-system'},
'pattern': "autonomous-system:{0} = '{1}' AND "},
'course-of-action': {'to_call': 'add_course_of_action_from_object'},
'domain-ip': {'to_call': 'handle_usual_object_name',
'pattern': "domain-name:{0} = '{1}' AND "},
'email': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'email-message'}},
'pattern': "email-{0}:{1} = '{2}' AND "},
'file': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'file', 'hashes': {}}},
'pattern': "file:{0} = '{1}' AND "},
'ip-port': {'to_call': 'handle_usual_object_name',
'pattern': network_traffic_pattern},
'network-socket': {'to_call': 'handle_usual_object_name',
'pattern': network_traffic_pattern},
'pe': {'to_call': 'populate_objects_to_parse'},
'pe-section': {'to_call': 'populate_objects_to_parse'},
'process': {'to_call': 'handle_usual_object_name',
'pattern': "process:{0} = '{1}' AND "},
'registry-key': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'windows-registry-key'}},
'pattern': "windows-registry-key:{0} = '{1}' AND "},
'url': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'url'}},
'pattern': "url:{0} = '{1}' AND "},
'vulnerability': {'to_call': 'add_object_vulnerability'},
'x509': {'to_call': 'handle_usual_object_name',
'pattern': "x509-certificate:{0} = '{1}' AND "}
}
asnObjectMapping = {'asn': 'number', 'description': 'name', 'subnet-announced': 'value'}
domainIpObjectMapping = {'ip-dst': 'resolves_to_refs[*].value', 'domain': 'value'}
emailObjectMapping = {'email-body': {'email_type': 'message', 'stix_type': 'body'},
'subject': {'email_type': 'message', 'stix_type': 'subject'},
'to': {'email_type': 'message', 'stix_type': 'to_refs'}, 'cc': {'email_type': 'message', 'stix_type': 'cc_refs'},
'to-display-name': {'email_type': 'addr', 'stix_type': 'display_name'},
'from': {'email_type': 'message', 'stix_type': 'from_ref'},
'from-display-name': {'email_type': 'addr', 'stix_type': 'display_name'},
'reply-to': {'email_type': 'message', 'stix_type': 'additional_header_fields.reply_to'},
'attachment': {'email_type': 'message', 'stix_type': 'body_multipart[*].body_raw_ref.name'},
'send-date': {'email_type': 'message', 'stix_type': 'date'},
'x-mailer': {'email_type': 'message', 'stix_type': 'additional_header_fields.x_mailer'}}
fileMapping = {'hashes': "hashes.'{0}'", 'size-in-bytes': 'size', 'filename': 'name', 'mime-type': 'mime_type'}
ipPortObjectMapping = {'ip': network_traffic_dst_ref,
'src-port': 'src_port', 'dst-port': 'dst_port',
'first-seen': 'start', 'last-seen': 'end',
'domain': 'value'}
networkSocketMapping = {'address-family': 'address_family', 'domain-family': 'protocol_family',
'protocol': 'protocols', 'src-port': 'src_port', 'dst-port': 'dst_port',
'ip-src': network_traffic_src_ref, 'ip-dst': network_traffic_dst_ref,
'hostname-src': network_traffic_src_ref, 'hostname-dst': network_traffic_dst_ref}
peMapping = {'type': 'pe_type', 'number-sections': 'number_of_sections', 'imphash': 'imphash'}
peSectionMapping = {'name': 'name', 'size-in-bytes': 'size', 'entropy': 'entropy'}
processMapping = {'name': 'name', 'pid': 'pid', 'creation-time': 'created'}
regkeyMapping = {'data-type': 'data_type', 'data': 'data', 'name': 'name',
'last-modified': 'modified', 'key': 'key'}
urlMapping = {'url': 'value', 'domain': 'value', 'port': 'dst_port'}
x509mapping = {'pubkey-info-algorithm': 'subject_public_key_algorithm', 'subject': 'subject',
'pubkey-info-exponent': 'subject_public_key_exponent', 'issuer': 'issuer',
'pubkey-info-modulus': 'subject_public_key_modulus', 'serial-number': 'serial_number',
'validity-not-before': 'validity_not_before', 'validity-not-after': 'validity_not_after',
'version': 'version',}
defineProtocols = {'80': 'http', '443': 'https'}
relationshipsSpecifications = {'attack-pattern': {'vulnerability': 'targets', 'identity': 'targets',
'malware': 'uses', 'tool': 'uses'},
'campaign': {'intrusion-set': 'attributed-to', 'threat-actor': 'attributed-to',
'identity': 'targets', 'vulnerability': 'targets',
'attack-pattern': 'uses', 'malware': 'uses',
'tool': 'uses'},
'course-of-action':{'attack-pattern': 'mitigates', 'malware': 'mitigates',
'tool': 'mitigates', 'vulnerability': 'mitigates'},
'indicator': {'attack-pattern': 'indicates', 'campaign': 'indicates',
'intrusion-set': 'indicates', 'malware': 'indicates',
'threat-actor': 'indicates', 'tool': 'indicates'},
'intrusion-set': {'threat-actor': 'attributed-to', 'identity': 'targets',
'vulnerability': 'targets', 'attack-pattern': 'uses',
'malware': 'uses', 'tool': 'uses'},
'malware': {'identity': 'targets', 'vulnerability': 'targets',
'tool': 'uses', 'malware': 'variant-of'},
'threat-actor': {'identity': 'attributed-to', 'vulnerability': 'targets',
'attack-pattern': 'uses', 'malware': 'uses',
'tool': 'uses'},
'tool': {'identity': 'targets', 'vulnerability': 'targets'}
}
| agpl-3.0 |
widdowquinn/pyani | docs/conf.py | 1 | 5946 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This is a configuration file with no functioning code
# pylint: skip-file
#
# pyani documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 11 13:27:32 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyani"
copyright = "2017-2019, Leighton Pritchard"
author = "Leighton Pritchard"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.2.9"
# The full version, including alpha/beta/rc tags.
release = "0.2.9"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
"donate.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pyanidoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pyani.tex", "pyani Documentation", "Leighton Pritchard", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pyani", "pyani Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyani",
"pyani Documentation",
author,
"pyani",
"One line description of project.",
"Miscellaneous",
)
]
# -- Magic to run sphinx-apidoc automatically -----------------------------
# See https://github.com/rtfd/readthedocs.org/issues/1139
# on which this is based.
def run_apidoc(_):
"""Call sphinx-apidoc on Bio and BioSQL modules."""
from sphinx.ext.apidoc import main as apidoc_main
apidoc_main(["-e", "-F", "-o", "api/", "../pyani"])
# os.remove("api/thapbi_pict.rst") # replaced with index.rst
def setup(app):
"""Over-ride Sphinx setup to trigger sphinx-apidoc."""
app.connect("builder-inited", run_apidoc)
| mit |
grpc/grpc | src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py | 9 | 27324 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import distutils.spawn
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
from six import moves
import grpc
import grpc.experimental
from tests.unit import test_common
from tests.unit.framework.common import test_constants
import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
import tests.protoc_plugin.protos.service.test_service_pb2_grpc as service_pb2_grpc
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
SERVICER_IDENTIFIER = 'TestServiceServicer'
ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = response_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
class _Service(
collections.namedtuple('_Service', (
'servicer_methods',
'server',
'stub',
))):
"""A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
def _CreateService():
"""Provides a servicer backend and a stub.
Returns:
A _Service with which to test RPCs.
"""
servicer_methods = _ServicerMethods()
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iterator, context):
return servicer_methods.StreamingInputCall(request_iterator,
context)
def FullDuplexCall(self, request_iterator, context):
return servicer_methods.FullDuplexCall(request_iterator, context)
def HalfDuplexCall(self, request_iterator, context):
return servicer_methods.HalfDuplexCall(request_iterator, context)
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
def _CreateIncompleteService():
"""Provides a servicer backend that fails to implement methods and its stub.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
pass
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
def _streaming_input_request_iterator():
for _ in range(3):
request = request_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request():
request = request_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def testImportAttributes(self):
# check that we can access the generated module and its members.
self.assertIsNotNone(getattr(service_pb2_grpc, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService()
self.assertIsNotNone(service.servicer_methods)
self.assertIsNotNone(service.server)
self.assertIsNotNone(service.stub)
service.server.stop(None)
def testIncompleteServicer(self):
service = _CreateIncompleteService()
request = request_pb2.SimpleRequest(response_size=13)
with self.assertRaises(grpc.RpcError) as exception_context:
service.stub.UnaryCall(request)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNIMPLEMENTED)
service.server.stop(None)
def testUnaryCall(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
response = service.stub.UnaryCall(request)
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFuture(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response = response_future.result()
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFutureExpired(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testUnaryCallFutureCancelled(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response_future.cancel()
self.assertTrue(response_future.cancelled())
self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testUnaryCallFutureFailed(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.fail():
response_future = service.stub.UnaryCall.future(request)
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingOutputCall(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
expected_responses = service.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingOutputCallExpired(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.pause():
responses = service.stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingOutputCallCancelled(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testStreamingOutputCallFailed(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.fail():
responses = service.stub.StreamingOutputCall(request)
self.assertIsNotNone(responses)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingInputCall(self):
service = _CreateService()
response = service.stub.StreamingInputCall(
_streaming_input_request_iterator())
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFuture(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response = response_future.result()
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFutureExpired(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future.exception().code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingInputCallFutureCancelled(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
service.server.stop(None)
def testStreamingInputCallFutureFailed(self):
service = _CreateService()
with service.servicer_methods.fail():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testFullDuplexCall(self):
service = _CreateService()
responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
expected_responses = service.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testFullDuplexCallExpired(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.pause():
responses = service.stub.FullDuplexCall(
request_iterator, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testFullDuplexCallCancelled(self):
service = _CreateService()
request_iterator = _full_duplex_request_iterator()
responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testFullDuplexCallFailed(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.fail():
responses = service.stub.FullDuplexCall(request_iterator)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testHalfDuplexCall(self):
service = _CreateService()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
expected_responses = service.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testHalfDuplexCallWedged(self):
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
service = _CreateService()
with wait():
responses = service.stub.HalfDuplexCall(
half_duplex_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
@unittest.skipIf(sys.version_info[0] < 3 or sys.version_info[1] < 6,
"Unsupported on Python 2.")
class SimpleStubsPluginTest(unittest.TestCase):
servicer_methods = _ServicerMethods()
class Servicer(service_pb2_grpc.TestServiceServicer):
def UnaryCall(self, request, context):
return SimpleStubsPluginTest.servicer_methods.UnaryCall(
request, context)
def StreamingOutputCall(self, request, context):
return SimpleStubsPluginTest.servicer_methods.StreamingOutputCall(
request, context)
def StreamingInputCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.StreamingInputCall(
request_iterator, context)
def FullDuplexCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.FullDuplexCall(
request_iterator, context)
def HalfDuplexCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.HalfDuplexCall(
request_iterator, context)
def setUp(self):
super(SimpleStubsPluginTest, self).setUp()
self._server = test_common.test_server()
service_pb2_grpc.add_TestServiceServicer_to_server(
self.Servicer(), self._server)
self._port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._target = 'localhost:{}'.format(self._port)
def tearDown(self):
self._server.stop(None)
super(SimpleStubsPluginTest, self).tearDown()
def testUnaryCall(self):
request = request_pb2.SimpleRequest(response_size=13)
response = service_pb2_grpc.TestService.UnaryCall(
request,
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_response = self.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallInsecureSugar(self):
request = request_pb2.SimpleRequest(response_size=13)
response = service_pb2_grpc.TestService.UnaryCall(request,
self._target,
insecure=True,
wait_for_ready=True)
expected_response = self.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testStreamingOutputCall(self):
request = _streaming_output_request()
expected_responses = self.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
responses = service_pb2_grpc.TestService.StreamingOutputCall(
request,
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingInputCall(self):
response = service_pb2_grpc.TestService.StreamingInputCall(
_streaming_input_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_response = self.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testFullDuplexCall(self):
responses = service_pb2_grpc.TestService.FullDuplexCall(
_full_duplex_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_responses = self.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testHalfDuplexCall(self):
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service_pb2_grpc.TestService.HalfDuplexCall(
half_duplex_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_responses = self.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
class ModuleMainTest(unittest.TestCase):
"""Test case for running `python -m grpc_tools.protoc`.
"""
def test_clean_output(self):
if sys.executable is None:
raise unittest.SkipTest(
"Running on a interpreter that cannot be invoked from the CLI.")
proto_dir_path = os.path.join("src", "proto")
test_proto_path = os.path.join(proto_dir_path, "grpc", "testing",
"empty.proto")
streams = tuple(tempfile.TemporaryFile() for _ in range(2))
work_dir = tempfile.mkdtemp()
try:
invocation = (sys.executable, "-m", "grpc_tools.protoc",
"--proto_path", proto_dir_path, "--python_out",
work_dir, "--grpc_python_out", work_dir,
test_proto_path)
proc = subprocess.Popen(invocation,
stdout=streams[0],
stderr=streams[1])
proc.wait()
outs = []
for stream in streams:
stream.seek(0)
self.assertEqual(0, len(stream.read()))
self.assertEqual(0, proc.returncode)
except Exception: # pylint: disable=broad-except
shutil.rmtree(work_dir)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 |
mpeuster/estate | experiments/scale-down-hack/pox/pox/messenger/__init__.py | 37 | 19551 | # Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The POX Messenger system.
The Messenger system is a way to build services in POX that can be
consumed by external clients.
Sometimes a controller might need to interact with the outside world.
Sometimes you need to integrate with an existing piece of software and
maybe you don't get to choose how you communicate with it. Other times,
you have the opportunity and burden of rolling your own. The Messenger
system is meant to help you with the latter case.
In short, channels are a system for communicating between POX and
external programs by exchanging messages encoded in JSON. It is intended
to be quite general, both in the communication models it supports and in
the transports is supports (as of this writing, it supports a
straightforward TCP socket transport and an HTTP transport). Any
service written to use the Messenger should theoretically be usable via
any transport.
*Connections* are somehow established when a client connects via some
*Transport*. The server can individually send messages to a specific client.
A client can send messages to a *Channel* on the server. A client can also
become a member of a channel, after which it will receive any messages
the server sends to that channel. There is always a default channel with
no name.
Channels can either be permanent or temporary. Temporary channels are
automatically destroyed when they no longer contain any members.
"""
from pox.lib.revent.revent import *
from pox.core import core as core
import json
import time
import random
import hashlib
from base64 import b32encode
log = core.getLogger()
# JSON decoder used by default
defaultDecoder = json.JSONDecoder()
class ChannelJoin (Event):
""" Fired on a channel when a client joins. """
def __init__ (self, connection, channel, msg = {}):
Event.__init__(self)
self.con = connection
self.channel = channel
self.msg = msg
class ConnectionClosed (Event):
""" Fired on a connection when it closes. """
def __init__ (self, connection):
Event.__init__(self)
self.con = connection
class ChannelLeave (Event):
""" Fired on a channel when a client leaves. """
def __init__ (self, connection, channel):
Event.__init__(self)
self.con = connection
self.channel = channel
class ChannelCreate (Event):
""" Fired on a Nexus when a channel is created. """
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
class ChannelDestroy (Event):
"""
Fired on the channel and its Nexus right before a channel is destroyed.
Set .keep = True to keep the channel after all.
"""
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
self.keep = False
class ChannelDestroyed (Event):
"""
Fired on the channel and its Nexus right after a channel is destroyed.
"""
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
class MissingChannel (Event):
"""
Fired on a Nexus when a message has been received to a non-existant channel.
You can create the channel in response to this.
"""
def __init__ (self, connection, channel_name, msg):
Event.__init__(self)
self.con = connection
self.channel_name = channel_name
self.msg = msg
class MessageReceived (Event):
"""
Fired by a channel when a message has been receieved.
Always fired on the Connection itself. Also fired on the corresponding
Channel object as specified by the CHANNEL key.
The listener looks like:
def _handle_MessageReceived (event, msg):
"""
def __init__ (self, connection, channel, msg):
Event.__init__(self)
self.con = connection
self.msg = msg
self.channel = channel
def is_to_channel (self, channel):
"""
Returns True if this message is to the given channel
"""
if isinstance(channel, Channel):
channel = channel.name
if channel == self.channel: return True
if channel in self.channel: return True
return False
def _invoke (self, handler, *args, **kw):
# Special handling -- pass the message
return handler(self, self.msg, *args, **kw)
def _get_nexus (nexus):
if nexus is None: nexus = "MessengerNexus"
if isinstance(nexus, str):
if not core.hasComponent(nexus):
#TODO: Wait for channel Nexus
s = "MessengerNexus %s is not available" % (nexus,)
log.error(s)
raise RuntimeError(s)
return getattr(core, nexus)
assert isinstance(nexus, MessengerNexus)
return nexus
class Transport (object):
def __init__ (self, nexus):
self._nexus = _get_nexus(nexus)
def _forget (self, connection):
""" Forget about a connection """
raise RuntimeError("Not implemented")
class Connection (EventMixin):
"""
Superclass for Connections.
This could actually be a bit thinner, if someone wants to clean it up.
Maintains the state and handles message parsing and dispatch for a
single connection.
"""
_eventMixin_events = set([
MessageReceived,
ConnectionClosed,
])
def __init__ (self, transport):
"""
transport is the source of the connection (e.g, TCPTransport).
"""
EventMixin.__init__(self)
self._is_connected = True
self._transport = transport
self._newlines = False
# Transports that don't do their own encapsulation can use _recv_raw(),
# which uses this. (Such should probably be broken into a subclass.)
self._buf = bytes()
key,num = self._transport._nexus.generate_session()
self._session_id,self._session_num = key,num
def _send_welcome (self):
"""
Send a message to a client so they know they're connected
"""
self.send({"CHANNEL":"","cmd":"welcome","session_id":self._session_id})
def _close (self):
"""
Called internally to shut the connection down.
"""
if self._is_connected is False: return
self._transport._forget(self)
self._is_connected = False
for name,chan in self._transport._nexus._channels.items():
chan._remove_member(self)
self.raiseEventNoErrors(ConnectionClosed, self)
#self._transport._nexus.raiseEventNoErrors(ConnectionClosed, self)
def send (self, whatever):
"""
Send data over the connection.
It will first be encoded into JSON, and optionally followed with
a newline. Ultimately, it will be passed to send_raw() to actually
be sent.
"""
if self._is_connected is False: return False
s = json.dumps(whatever, default=str)
if self._newlines: s += "\n"
self.send_raw(s)
return True
def send_raw (self, data):
"""
This method should actually send data out over the connection.
Subclasses need to implement this.
"""
raise RuntimeError("Not implemented")
@property
def is_connected (self):
"""
True if this Connection is still connected.
"""
return self._is_connected
def _rx_message (self, msg):
"""
Raises events when a complete message is available.
Subclasses may want to call this when they have a new message
available. See _recv_raw().
"""
e = self.raiseEventNoErrors(MessageReceived,self,msg.get('CHANNEL'),msg)
self._transport._nexus._rx_message(self, msg)
def _rx_raw (self, data):
"""
If your subclass receives a stream instead of discrete messages, this
method can parse out individual messages and call _recv_msg() when
it has full messages.
"""
if len(data) == 0: return
if len(self._buf) == 0:
if data[0].isspace():
self._buf = data.lstrip()
else:
self._buf = data
else:
self._buf += data
while len(self._buf) > 0:
try:
msg, l = defaultDecoder.raw_decode(self._buf)
except:
# Need more data before it's a valid message
# (.. or the stream is corrupt and things will never be okay
# ever again)
return
self._buf = self._buf[l:]
if len(self._buf) != 0 and self._buf[0].isspace():
self._buf = self._buf.lstrip()
self._rx_message(msg)
def __str__ (self):
"""
Subclasses should implement better versions of this.
"""
return "<%s/%s/%i>" % (self.__class__.__name__, self._session_id,
self._session_num)
def close (self):
"""
Close the connection.
"""
self._close()
class Channel (EventMixin):
"""
Allows one to easily listen to only messages that have a CHANNEL key
with a specific name.
Generally you will not create these classes directly, but by calling
getChannel() on the ChannelNexus.
"""
_eventMixin_events = set([
MessageReceived,
ChannelJoin, # Immedaitely when a connection goes up
ChannelLeave, # When a connection goes down
ChannelDestroy,
ChannelDestroyed,
])
def __init__ (self, name, nexus = None, temporary = False):
"""
name is the name for the channel (i.e., the value for the messages'
CHANNEL key).
nexus is the specific MessengerNexus with which this channel is to be
associated (defaults to core.MessengerNexus).
"""
EventMixin.__init__(self)
assert isinstance(name, basestring)
self._name = name
self._nexus = _get_nexus(nexus)
self._nexus._channels[name] = self
self.temporary = temporary
self._members = set() # Member Connections
@property
def name (self):
return self._name
def _destroy (self):
""" Remove channel """
e = self.raiseEvent(ChannelDestroy, self)
if e:
if e.keep: return False
self._nexus.raiseEvent(e)
if e.keep: return False
del self._nexus._channels[self._name]
# We can't just do the follow because then listeners
# can't tell if the channel is now empty...
#for sub in set(self._members):
# sub.raiseEvent(ChannelLeave, sub, self)
#
#self._members.clear()
# .. so do the following really straightforward...
for sub in set(self._members):
self._remove_member(sub, allow_destroy = False)
e = ChannelDestroyed(self)
self.raiseEvent(e)
self._nexus.raiseEvent(e)
def _add_member (self, con, msg = {}):
if con in self._members: return
self._members.add(con)
self.raiseEvent(ChannelJoin, con, self, msg)
def _remove_member (self, con, allow_destroy = True):
if con not in self._members: return
self._members.remove(con)
self.raiseEvent(ChannelLeave, con, self)
if not allow_destroy: return
if self.temporary is True:
if len(self._members) == 0:
self._destroy()
def send (self, msg):
d = dict(msg)
d['CHANNEL'] = self._name
for r in list(self._members):
if not r.is_connected: continue
r.send(d)
def __str__ (self):
return "<Channel " + self.name + ">"
def reply (_msg, **kw):
if not isinstance(_msg, dict):
# We'll also take an event...
_msg = _msg.msg
kw['CHANNEL'] = _msg.get('CHANNEL')
if 'XID' in _msg: kw['XID'] = _msg.get('XID')
return kw
class ChannelBot (object):
"""
A very simple framework for writing "bots" that respond to messages
on a channel.
"""
def __str__ (self):
return "<%s@%s>" % (self.__class__.__name__, self.channel)
def __init__ (self, channel, nexus = None, weak = False, extra = {}):
self._startup(channel, nexus, weak, extra)
def _startup (self, channel, nexus = None, weak = False, extra = {}):
self._nexus = _get_nexus(nexus)
if isinstance(channel, Channel):
self.channel = channel
else:
self.channel = self._nexus.get_channel(channel, create=True)
self.listeners = self.channel.addListeners(self, weak = weak)
self.prefixes = None
self._init(extra)
if self.prefixes is None:
self.prefixes = []
for n in dir(self):
if n.startswith("_exec_"):
n = n.split("_")[2]
self.prefixes.append(n)
def _handle_ChannelDestroyed (self, event):
self.channel.removeListeners(self.listeners)
self._destroyed()
def _handle_ChannelJoin (self, event):
self._join(event, event.con, event.msg)
def _handle_ChannelLeave (self, event):
self._leave(event.con, len(self.channel._members) == 0)
def _handle_MessageReceived (self, event, msg):
for prefix in self.prefixes:
if prefix in event.msg:
cmd = "_exec_%s_%s" % (prefix, str(event.msg[prefix]))
if hasattr(self, cmd):
getattr(self, cmd)(event)
return #TODO: Return val?
for prefix in self.prefixes:
if prefix in event.msg:
cmd = "_exec_" + prefix
if hasattr(self, cmd):
getattr(self, cmd)(event, msg[prefix])
return #TODO: Return val?
self._unhandled(event)
def _unhandled (self, event):
""" Called when no command found """
pass
def _join (self, event, connection, msg):
""" Called when a connection joins """
pass
def _leave (self, connection, empty):
"""
Called when a connection leaves
If channel now has no members, empty is True
"""
pass
def _destroyed (self):
""" Called when channel is destroyed """
pass
def _init (self, extra):
"""
Called during initialization
'extra' is any additional information passed in when initializing
the bot. In particular, this may be the message that goes along
with its invitation into a channel.
"""
pass
def reply (__self, __event, **kw):
"""
Unicast reply to a specific message.
"""
__event.con.send(reply(__event, **kw))
def send (__self, __msg={}, **kw):
"""
Send a message to all members of this channel.
"""
m = {}
m.update(__msg)
m.update(kw)
__self.channel.send(m)
class DefaultChannelBot (ChannelBot):
def _init (self, extra):
self._bots = {}
def add_bot (self, bot, name = None):
"""
Registers a bot (an instance of ChannelBot) so that it can be
invited to other channels.
"""
assert issubclass(bot, ChannelBot)
if name is None:
name = bot.__name__
self._bots[name] = bot
def _exec_newlines_False (self, event):
event.con._newlines = False
def _exec_newlines_True (self, event):
event.con._newlines = True
def _exec_cmd_invite (self, event):
"""
Invites a bot that has been registered with add_bot() to a channel.
Note that you can invite a bot to an empty (new) temporary channel.
It will stay until the first member leaves.
"""
botname = event.msg.get('bot')
botclass = self._bots.get(botname)
channel = event.msg.get('channel')
new_channel = False
if channel is None:
new_channel = True
channel = self._gen_channel_name(event.msg.get("prefix", "temp"))
chan = self._nexus.get_channel(channel, create=True, temporary=True)
if chan is None:
#TODO: send an error
log.warning("A bot was invited to a nonexistent channel (%s)"
% (channel,))
return
if botclass is None:
#TODO: send an error
log.warning("A nonexistent bot (%s) was invited to a channel"
% (botname,))
return
bot = botclass(channel, self._nexus)
if new_channel:
self.reply(event, new_channel = new_channel)
def _unhandled (self, event):
log.warn("Default channel got unknown command: "
+ str(event.msg.get('cmd')))
def _gen_channel_name (self, prefix = "temp"):
""" Makes up a channel name """
prefix += "_"
import random
while True:
# Sloppy
r = random.randint(1, 100000)
n = prefix + str(r)
if r not in self._nexus._channels:
break
return n
def _exec_cmd_new_channel (self, event):
""" Generates a new channel with random name """
prefix = event.msg.get('prefix', 'temp')
n = self._gen_channel_name(prefix)
ch = self._nexus.get_channel(n, create=True, temporary=True)
ch._add_member(event.con, event.msg)
self.reply(event, new_channel = n)
def _exec_cmd_join_channel (self, event):
""" Joins/creates a channel """
temp = event.msg.get('temporary', True) # Default temporary!
ch = self._nexus.get_channel(event.msg['channel'], temporary=temp)
if ch is None: return
ch._add_member(event.con, event.msg)
def _exec_cmd_leave_channel (self, event):
ch = self._nexus.get_channel(event.msg['channel'])
if ch is None: return
ch._remove_member(event.con)
def _exec_test (self, event, value):
log.info("Default channel got: " + str(value))
self.reply(event, test = value.upper())
class MessengerNexus (EventMixin):
"""
Transports, Channels, etc. are all associated with a MessengerNexus.
Typically, there is only one, and it is registered as
pox.core.MessengerNexus
"""
_eventMixin_events = set([
MissingChannel, # When a msg arrives to nonexistent channel
ChannelDestroy,
ChannelDestroyed,
ChannelCreate,
])
def __init__ (self):
EventMixin.__init__(self)
self._channels = {} # name -> Channel
self.default_bot = DefaultChannelBot("", self)
self._next_ses = 1
self._session_salt = str(time.time())
def generate_session (self):
"""
Return a new session ID tuple (key, num)
The key is a unique and not-trivial-to-guess alphanumeric value
associated with the session.
The num is a unique numerical value associated with the session.
"""
r = self._next_ses
self._next_ses += 1
key = str(random.random()) + str(time.time()) + str(r)
key += str(id(key)) + self._session_salt
key = b32encode(hashlib.md5(key).digest()).upper().replace('=','')
def alphahex (r):
""" base 16 on digits 'a' through 'p' """
r=hex(r)[2:].lower()
return ''.join(chr((10 if ord(x) >= 97 else 49) + ord(x)) for x in r)
key = alphahex(r) + key
return key,r
def get_channel (self, name, create = True, temporary = False):
if name is None: name = ""
if name in self._channels:
return self._channels[name]
elif create:
c = Channel(name, self, temporary = temporary)
self.raiseEvent(ChannelCreate, c)
return c
else:
return None
def _rx_message (self, con, msg):
"""
Dispatches messages to listeners of this nexus and to its Channels.
Called by Connections.
"""
ret = False
assert isinstance(msg, dict)
if isinstance(msg, dict):
channels = msg.get('CHANNEL')
if channels is None:
channels = [""]
if not isinstance(channels, list):
channels = [channels]
for cname in channels:
channel = self.get_channel(cname, create=False)
if channel is None:
e = self.raiseEvent(MissingChannel, con, cname, msg)
if e is not None: cname = e.channel_name
channel = self.get_channel(cname, create=False)
if channel is not None:
#print "raise on", channel
channel.raiseEvent(MessageReceived, con, channel, msg)
ret = True
return ret
def launch ():
core.registerNew(MessengerNexus)
| apache-2.0 |
gingerboy92/android_kernel_xiaomi_msm8916---messedup | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
le9i0nx/ansible | lib/ansible/plugins/lookup/together.py | 57 | 2150 | # (c) 2013, Bradley Young <[email protected]>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: toghether
author: Bradley Young <[email protected]>
version_added: '1.3'
short_description: merges lists into syncronized list
description:
- Creates a list with the iterated elements of the supplied lists
- "To clarify with an example, [ 'a', 'b' ] and [ 1, 2 ] turn into [ ('a',1), ('b', 2) ]"
- This is basicaly the same as the 'zip_longest' filter and Python function
- Any 'unbalanced' elements will be substituted with 'None'
options:
_terms:
description: list of lists to merge
required: True
"""
EXAMPLES = """
- name: item.0 returns from the 'a' list, item.1 returns from the '1' list
debug:
msg: "{{ item.0 }} and {{ item.1 }}"
with_together:
- ['a', 'b', 'c', 'd']
- [1, 2, 3, 4]
"""
RETURN = """
_list:
description: syncronized list
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import zip_longest
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def _lookup_variables(self, terms):
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_together requires at least one element in each list")
return [self._flatten(x) for x in zip_longest(*my_list, fillvalue=None)]
| gpl-3.0 |
jmwright/cadquery-x | gui/libs/future/backports/email/feedparser.py | 82 | 22736 | # Copyright (C) 2004-2006 Python Software Foundation
# Authors: Baxter, Wouters and Warsaw
# Contact: [email protected]
"""FeedParser - An email feed parser.
The feed parser implements an interface for incrementally parsing an email
message, line by line. This has advantages for certain applications, such as
those reading email messages off a socket.
FeedParser.feed() is the primary interface for pushing new data into the
parser. It returns when there's nothing more it can do with the available
data. When you have no more data to push into the parser, call .close().
This completes the parsing and returns the root message object.
The other advantage of this parser is that it will never raise a parsing
exception. Instead, when it finds something unexpected, it adds a 'defect' to
the current message. Defects are just instances that live on the message
object's .defects attribute.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import object, range, super
from future.utils import implements_iterator, PY3
__all__ = ['FeedParser', 'BytesFeedParser']
import re
from future.backports.email import errors
from future.backports.email import message
from future.backports.email._policybase import compat32
NLCRE = re.compile('\r\n|\r|\n')
NLCRE_bol = re.compile('(\r\n|\r|\n)')
NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
NLCRE_crack = re.compile('(\r\n|\r|\n)')
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
# except controls, SP, and ":".
headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
EMPTYSTRING = ''
NL = '\n'
NeedMoreData = object()
# @implements_iterator
class BufferedSubFile(object):
"""A file-ish object that can have new data loaded into it.
You can also push and pop line-matching predicates onto a stack. When the
current predicate matches the current line, a false EOF response
(i.e. empty string) is returned instead. This lets the parser adhere to a
simple abstraction -- it parses until EOF closes the current message.
"""
def __init__(self):
# The last partial line pushed into this object.
self._partial = ''
# The list of full, pushed lines, in reverse order
self._lines = []
# The stack of false-EOF checking predicates.
self._eofstack = []
# A flag indicating whether the file has been closed or not.
self._closed = False
def push_eof_matcher(self, pred):
self._eofstack.append(pred)
def pop_eof_matcher(self):
return self._eofstack.pop()
def close(self):
# Don't forget any trailing partial line.
self._lines.append(self._partial)
self._partial = ''
self._closed = True
def readline(self):
if not self._lines:
if self._closed:
return ''
return NeedMoreData
# Pop the line off the stack and see if it matches the current
# false-EOF predicate.
line = self._lines.pop()
# RFC 2046, section 5.1.2 requires us to recognize outer level
# boundaries at any level of inner nesting. Do this, but be sure it's
# in the order of most to least nested.
for ateof in self._eofstack[::-1]:
if ateof(line):
# We're at the false EOF. But push the last line back first.
self._lines.append(line)
return ''
return line
def unreadline(self, line):
# Let the consumer push a line back into the buffer.
assert line is not NeedMoreData
self._lines.append(line)
def push(self, data):
"""Push some new data into this object."""
# Handle any previous leftovers
data, self._partial = self._partial + data, ''
# Crack into lines, but preserve the newlines on the end of each
parts = NLCRE_crack.split(data)
# The *ahem* interesting behaviour of re.split when supplied grouping
# parentheses is that the last element of the resulting list is the
# data after the final RE. In the case of a NL/CR terminated string,
# this is the empty string.
self._partial = parts.pop()
#GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
# is there a \n to follow later?
if not self._partial and parts and parts[-1].endswith('\r'):
self._partial = parts.pop(-2)+parts.pop()
# parts is a list of strings, alternating between the line contents
# and the eol character(s). Gather up a list of lines after
# re-attaching the newlines.
lines = []
for i in range(len(parts) // 2):
lines.append(parts[i*2] + parts[i*2+1])
self.pushlines(lines)
def pushlines(self, lines):
# Reverse and insert at the front of the lines.
self._lines[:0] = lines[::-1]
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if line == '':
raise StopIteration
return line
class FeedParser(object):
"""A feed-style parser of email."""
def __init__(self, _factory=message.Message, **_3to2kwargs):
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
else: policy = compat32
"""_factory is called with no arguments to create a new message obj
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
self._factory = _factory
self.policy = policy
try:
_factory(policy=self.policy)
self._factory_kwds = lambda: {'policy': self.policy}
except TypeError:
# Assume this is an old-style factory
self._factory_kwds = lambda: {}
self._input = BufferedSubFile()
self._msgstack = []
if PY3:
self._parse = self._parsegen().__next__
else:
self._parse = self._parsegen().next
self._cur = None
self._last = None
self._headersonly = False
# Non-public interface for supporting Parser's headersonly flag
def _set_headersonly(self):
self._headersonly = True
def feed(self, data):
"""Push more data into the parser."""
self._input.push(data)
self._call_parse()
def _call_parse(self):
try:
self._parse()
except StopIteration:
pass
def close(self):
"""Parse all remaining data and return the root message object."""
self._input.close()
self._call_parse()
root = self._pop_message()
assert not self._msgstack
# Look for final set of defects
if root.get_content_maintype() == 'multipart' \
and not root.is_multipart():
defect = errors.MultipartInvariantViolationDefect()
self.policy.handle_defect(root, defect)
return root
def _new_message(self):
msg = self._factory(**self._factory_kwds())
if self._cur and self._cur.get_content_type() == 'multipart/digest':
msg.set_default_type('message/rfc822')
if self._msgstack:
self._msgstack[-1].attach(msg)
self._msgstack.append(msg)
self._cur = msg
self._last = msg
def _pop_message(self):
retval = self._msgstack.pop()
if self._msgstack:
self._cur = self._msgstack[-1]
else:
self._cur = None
return retval
def _parsegen(self):
# Create a new message and start by parsing headers.
self._new_message()
headers = []
# Collect the headers, searching for a line that doesn't match the RFC
# 2822 header or continuation pattern (including an empty line).
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
if not headerRE.match(line):
# If we saw the RFC defined header/body separator
# (i.e. newline), just throw it away. Otherwise the line is
# part of the body so push it back.
if not NLCRE.match(line):
defect = errors.MissingHeaderBodySeparatorDefect()
self.policy.handle_defect(self._cur, defect)
self._input.unreadline(line)
break
headers.append(line)
# Done with the headers, so parse them and figure out what we're
# supposed to see in the body of the message.
self._parse_headers(headers)
# Headers-only parsing is a backwards compatibility hack, which was
# necessary in the older parser, which could raise errors. All
# remaining lines in the input are thrown into the message body.
if self._headersonly:
lines = []
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
if self._cur.get_content_type() == 'message/delivery-status':
# message/delivery-status contains blocks of headers separated by
# a blank line. We'll represent each header block as a separate
# nested message object, but the processing is a bit different
# than standard message/* types because there is no body for the
# nested messages. A blank line separates the subparts.
while True:
self._input.push_eof_matcher(NLCRE.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
msg = self._pop_message()
# We need to pop the EOF matcher in order to tell if we're at
# the end of the current file, not the end of the last block
# of message headers.
self._input.pop_eof_matcher()
# The input stream must be sitting at the newline or at the
# EOF. We want to see if we're at the end of this subpart, so
# first consume the blank line, then test the next line to see
# if we're at this subpart's EOF.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
if line == '':
break
# Not at EOF so this is a line we're going to need.
self._input.unreadline(line)
return
if self._cur.get_content_maintype() == 'message':
# The message claims to be a message/* type, then what follows is
# another RFC 2822 message.
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
self._pop_message()
return
if self._cur.get_content_maintype() == 'multipart':
boundary = self._cur.get_boundary()
if boundary is None:
# The message /claims/ to be a multipart but it has not
# defined a boundary. That's a problem which we'll handle by
# reading everything until the EOF and marking the message as
# defective.
defect = errors.NoBoundaryInMultipartDefect()
self.policy.handle_defect(self._cur, defect)
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
# Make sure a valid content type was specified per RFC 2045:6.4.
if (self._cur.get('content-transfer-encoding', '8bit').lower()
not in ('7bit', '8bit', 'binary')):
defect = errors.InvalidMultipartContentTransferEncodingDefect()
self.policy.handle_defect(self._cur, defect)
# Create a line match predicate which matches the inter-part
# boundary as well as the end-of-multipart boundary. Don't push
# this onto the input stream until we've scanned past the
# preamble.
separator = '--' + boundary
boundaryre = re.compile(
'(?P<sep>' + re.escape(separator) +
r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
capturing_preamble = True
preamble = []
linesep = False
close_boundary_seen = False
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
mo = boundaryre.match(line)
if mo:
# If we're looking at the end boundary, we're done with
# this multipart. If there was a newline at the end of
# the closing boundary, then we need to initialize the
# epilogue with the empty string (see below).
if mo.group('end'):
close_boundary_seen = True
linesep = mo.group('linesep')
break
# We saw an inter-part boundary. Were we in the preamble?
if capturing_preamble:
if preamble:
# According to RFC 2046, the last newline belongs
# to the boundary.
lastline = preamble[-1]
eolmo = NLCRE_eol.search(lastline)
if eolmo:
preamble[-1] = lastline[:-len(eolmo.group(0))]
self._cur.preamble = EMPTYSTRING.join(preamble)
capturing_preamble = False
self._input.unreadline(line)
continue
# We saw a boundary separating two parts. Consume any
# multiple boundary lines that may be following. Our
# interpretation of RFC 2046 BNF grammar does not produce
# body parts within such double boundaries.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
mo = boundaryre.match(line)
if not mo:
self._input.unreadline(line)
break
# Recurse to parse this subpart; the input stream points
# at the subpart's first line.
self._input.push_eof_matcher(boundaryre.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
# Because of RFC 2046, the newline preceding the boundary
# separator actually belongs to the boundary, not the
# previous subpart's payload (or epilogue if the previous
# part is a multipart).
if self._last.get_content_maintype() == 'multipart':
epilogue = self._last.epilogue
if epilogue == '':
self._last.epilogue = None
elif epilogue is not None:
mo = NLCRE_eol.search(epilogue)
if mo:
end = len(mo.group(0))
self._last.epilogue = epilogue[:-end]
else:
payload = self._last._payload
if isinstance(payload, str):
mo = NLCRE_eol.search(payload)
if mo:
payload = payload[:-len(mo.group(0))]
self._last._payload = payload
self._input.pop_eof_matcher()
self._pop_message()
# Set the multipart up for newline cleansing, which will
# happen if we're in a nested multipart.
self._last = self._cur
else:
# I think we must be in the preamble
assert capturing_preamble
preamble.append(line)
# We've seen either the EOF or the end boundary. If we're still
# capturing the preamble, we never saw the start boundary. Note
# that as a defect and store the captured text as the payload.
if capturing_preamble:
defect = errors.StartBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
self._cur.set_payload(EMPTYSTRING.join(preamble))
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# If we're not processing the preamble, then we might have seen
# EOF without seeing that end boundary...that is also a defect.
if not close_boundary_seen:
defect = errors.CloseBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
return
# Everything from here to the EOF is epilogue. If the end boundary
# ended in a newline, we'll need to make sure the epilogue isn't
# None
if linesep:
epilogue = ['']
else:
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
epilogue.append(line)
# Any CRLF at the front of the epilogue is not technically part of
# the epilogue. Also, watch out for an empty string epilogue,
# which means a single newline.
if epilogue:
firstline = epilogue[0]
bolmo = NLCRE_bol.match(firstline)
if bolmo:
epilogue[0] = firstline[len(bolmo.group(0)):]
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# Otherwise, it's some non-multipart type, so the entire rest of the
# file contents becomes the payload.
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
def _parse_headers(self, lines):
# Passed a list of lines that make up the headers for the current msg
lastheader = ''
lastvalue = []
for lineno, line in enumerate(lines):
# Check for continuation
if line[0] in ' \t':
if not lastheader:
# The first line of the headers was a continuation. This
# is illegal, so let's note the defect, store the illegal
# line, and ignore it for purposes of headers.
defect = errors.FirstHeaderLineIsContinuationDefect(line)
self.policy.handle_defect(self._cur, defect)
continue
lastvalue.append(line)
continue
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
lastheader, lastvalue = '', []
# Check for envelope header, i.e. unix-from
if line.startswith('From '):
if lineno == 0:
# Strip off the trailing newline
mo = NLCRE_eol.search(line)
if mo:
line = line[:-len(mo.group(0))]
self._cur.set_unixfrom(line)
continue
elif lineno == len(lines) - 1:
# Something looking like a unix-from at the end - it's
# probably the first line of the body, so push back the
# line and stop.
self._input.unreadline(line)
return
else:
# Weirdly placed unix-from line. Note this as a defect
# and ignore it.
defect = errors.MisplacedEnvelopeHeaderDefect(line)
self._cur.defects.append(defect)
continue
# Split the line on the colon separating field name from value.
# There will always be a colon, because if there wasn't the part of
# the parser that calls us would have started parsing the body.
i = line.find(':')
assert i>0, "_parse_headers fed line with no : and no leading WS"
lastheader = line[:i]
lastvalue = [line]
# Done with all the lines, so handle the last header.
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
class BytesFeedParser(FeedParser):
"""Like FeedParser, but feed accepts bytes."""
def feed(self, data):
super().feed(data.decode('ascii', 'surrogateescape'))
| lgpl-3.0 |
MarkTheF4rth/youtube-dl | youtube_dl/extractor/youtube.py | 20 | 92251 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
encode_dict,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
orderedSet,
parse_duration,
remove_start,
smuggle_url,
str_to_int,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
ISO3166Utils,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
(username, password) = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'Email': username,
'GALX': galx,
'Passwd': password,
'PersistentCookie': 'yes',
'_utf8': '霱',
'bgresponse': 'js_disabled',
'checkConnection': '',
'checkedDomains': 'youtube',
'dnConn': '',
'pstMsg': '0',
'rmShown': '1',
'secTok': '',
'signIn': 'Sign in',
'timeStmp': '',
'service': 'youtube',
'uilel': '3',
'hl': 'en_US',
}
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
note='Logging in', errnote='unable to log in', fatal=False)
if login_results is False:
return False
if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
self._downloader.report_warning(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
tfa_form_strs.update({
'Pin': tfa_code,
'TrustDevice': 'on',
})
tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
tfa_req, None,
note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
if tfa_results is False:
return False
if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
return False
if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus # or vid.plus/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240},
'6': {'ext': 'flv', 'width': 450, 'height': 270},
'13': {'ext': '3gp'},
'17': {'ext': '3gp', 'width': 176, 'height': 144},
'18': {'ext': 'mp4', 'width': 640, 'height': 360},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720},
'34': {'ext': 'flv', 'width': 640, 'height': 360},
'35': {'ext': 'flv', 'width': 854, 'height': 480},
'36': {'ext': '3gp', 'width': 320, 'height': 240},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
'43': {'ext': 'webm', 'width': 640, 'height': 360},
'44': {'ext': 'webm', 'width': 854, 'height': 480},
'45': {'ext': 'webm', 'width': 1280, 'height': 720},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080},
'59': {'ext': 'mp4', 'width': 854, 'height': 480},
'78': {'ext': 'mp4', 'width': 854, 'height': 480},
# 3d videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
# Apple HTTP Live Streaming
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
'250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
'251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'description': 'md5:782e8651347686cba06e58f71ab51773',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia'
}
},
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'upload_date': '20100909',
'uploader': 'The Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'upload_date': '20110629',
'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'upload_date': '20120724',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫艾倫',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'mp4',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:33',
},
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
}
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}],
'params': {
'skip_download': True,
},
},
{
'url': 'http://vid.plus/FlRa-iH7PGw',
'only_matching': True,
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in ['sbv', 'vtt', 'srt']:
params = compat_urllib_parse.urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if mobj is None:
self._downloader.report_warning(err_msg)
return {}
player_config = json.loads(mobj.group(1))
try:
args = player_config['args']
caption_url = args['ttsurl']
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse.urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in ['sbv', 'vtt', 'srt']:
params = compat_urllib_parse.urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_from_m3u8(self, manifest_url, video_id):
url_map = {}
def _get_urls(_manifest):
lines = _manifest.split('\n')
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
url_map[itag] = format_url
return url_map
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
def _parse_dash_manifest(
self, video_id, dash_manifest_url, player_url, age_gate, fatal=True):
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
dash_doc = self._download_xml(
dash_manifest_url, video_id,
note='Downloading DASH manifest',
errnote='Could not download DASH manifest',
fatal=fatal)
if dash_doc is False:
return []
formats = []
for a in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'):
mime_type = a.attrib.get('mimeType')
for r in a.findall('{urn:mpeg:DASH:schema:MPD:2011}Representation'):
url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
if url_el is None:
continue
if mime_type == 'text/vtt':
# TODO implement WebVTT downloading
pass
elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
segment_list = r.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
format_id = r.attrib['id']
video_url = url_el.text
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
f = {
'format_id': format_id,
'url': video_url,
'width': int_or_none(r.attrib.get('width')),
'height': int_or_none(r.attrib.get('height')),
'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
'asr': int_or_none(r.attrib.get('audioSamplingRate')),
'filesize': filesize,
'fps': int_or_none(r.attrib.get('frameRate')),
}
if segment_list is not None:
f.update({
'initialization_url': segment_list.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib['sourceURL'],
'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
'protocol': 'http_dash_segments',
})
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == format_id)
except StopIteration:
full_info = self._formats.get(format_id, {}).copy()
full_info.update(f)
codecs = r.attrib.get('codecs')
if codecs:
if full_info.get('acodec') == 'none' and 'vcodec' not in full_info:
full_info['vcodec'] = codecs
elif full_info.get('vcodec') == 'none' and 'acodec' not in full_info:
full_info['acodec'] = codecs
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
# Get video info
embed_webpage = None
is_live = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse.urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
# Try looking directly into the video webpage
mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
if mobj:
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = (
'%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (proto, video_id, el_type))
video_info_webpage = self._download_webpage(
video_info_url,
video_id, note=False,
errnote='unable to download video info webpage')
get_video_info = compat_parse_qs(video_info_webpage)
if get_video_info.get('use_cipher_signature') != ['True']:
add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
break
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
if regions_allowed:
raise ExtractorError('YouTube said: This video is available in %s only' % (
', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
expected=True)
raise ExtractorError(
'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
''', r'\1', video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
entries = []
feed_ids = []
multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
for feed in multifeed_metadata_list.split(','):
feed_data = compat_parse_qs(feed)
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
view_count = None
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
raise ExtractorError('Unable to extract uploader name')
video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
# uploader_id
video_uploader_id = None
mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning('unable to extract uploader nickname')
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
video_webpage, 'upload date', default=None)
if upload_date:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
if 'length_seconds' not in video_info:
self._downloader.report_warning('unable to extract video duration')
video_duration = None
else:
video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
'format_id': itag,
'url': video_real_url,
'player_url': player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
'filesize': int_or_none(url_data.get('clen', [None])[0]),
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
}
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, ext = kind_ext
dct['ext'] = ext
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
codecs = codecs.split(',')
if len(codecs) == 2:
acodec, vcodec = codecs[0], codecs[1]
else:
acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
dct.update({
'acodec': acodec,
'vcodec': vcodec,
})
if format_id in self._formats:
dct.update(self._formats[format_id])
formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for dash_manifest_url in dash_mpds:
dash_formats = {}
try:
for df in self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal):
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
\? (?:.*?&)*? (?:p|a|list)=
| p/
)
(
(?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
}, {
'note': 'embedded',
'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'note': 'Embedded SWF player',
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincout': 21,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading Youtube mix')
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
raise ExtractorError(
'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
# Extract the video ids from the playlist pages
def _entries():
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.finditer(self._VIDEO_RE, content_html)
# We remove the duplicates and the link with index 0
# (it's not the first video of the playlist)
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
for vid_id in new_ids:
yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
page, 'title')
return self.playlist_result(_entries(), playlist_id, playlist_title)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if 'v' in query_dict:
video_id = query_dict['v'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
return self._extract_playlist(playlist_id)
class YoutubeChannelIE(InfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
}
}]
@staticmethod
def extract_videos_from_page(page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page):
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._TEMPLATE_URL % channel_id
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_playlist_id = self._search_regex(
r'data-channel-external-id="([^"]+)"',
channel_page, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
def _entries():
more_widget_html = content_html = channel_page
for pagenum in itertools.count(1):
for video_id, video_title in self.extract_videos_from_page(content_html):
yield self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
mobj = re.search(
r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), channel_id,
'Downloading page #%s' % (pagenum + 1),
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(_entries(), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'title': 'TheLinuxFoundation',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
for pagenum in itertools.count(1):
url_query = {
'search_query': query.encode('utf-8'),
'page': pagenum,
'spf': 'navigate',
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page')
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = self._ids_to_results(orderedSet(re.findall(
r'href="/watch\?v=(.{11})', html_content)))
videos += new_videos
if not new_videos or len(videos) > limit:
break
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(InfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
[r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
part_url_snippet = self._html_search_regex(
r'(?s)href="([^"]+)"', part_code, 'item URL')
part_url = compat_urlparse.urljoin(
'https://www.youtube.com/', part_url_snippet)
entries.append({
'_type': 'url',
'url': part_url,
'title': part_title,
})
return {
'_type': 'playlist',
'entries': entries,
'title': query,
}
class YoutubeShowIE(InfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(
'https://www.youtube.com/show/%s/playlists' % playlist_id, playlist_id, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
entries = [
self.url_result(
'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
for season in m_seasons
]
title = self._og_search_title(webpage, fatal=False)
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'entries': entries,
}
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
if not new_ids:
break
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(
self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|playlist\?list=WL)|:ytwatchlater'
_TESTS = [] # override PlaylistIE tests
def _real_extract(self, url):
return self._extract_playlist('WL')
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| unlicense |
torte/lymph | setup.py | 2 | 2729 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys
with open('README.rst') as f:
description = f.read()
install_requires = [
'docopt>=0.6.1',
'kazoo>=1.3.1',
'kombu>=3.0.16',
'gevent',
'msgpack-python>=0.4.0',
'psutil>=2.1.1',
'PyYAML>=3.11',
'pyzmq>=14.3.0',
'redis>=2.9.1',
'setproctitle>=1.1.8',
'six>=1.6',
'Werkzeug>=0.10.4',
'blessings>=1.5.1',
'netifaces>=0.10.4',
'mock>=1.0.1',
'PyHamcrest>=1.8.2',
'pytz',
'iso8601>=0.1.10',
]
if sys.version_info.major == 2:
install_requires.append('Monotime>=1.0')
elif sys.version_info.major == 3:
install_requires.remove('gevent')
install_requires.append('gevent>=1.1a2')
setup(
name='lymph',
url='http://github.com/deliveryhero/lymph/',
version='0.8.0-dev',
namespace_packages=['lymph'],
packages=find_packages(),
license=u'Apache License (2.0)',
author=u'Delivery Hero Holding GmbH',
maintainer=u'Johannes Dollinger',
maintainer_email=u'[email protected]',
description=u'a service framework',
long_description=description,
include_package_data=True,
install_requires=install_requires,
extras_require={
'sentry': ['raven'],
'newrelic': ['newrelic'],
},
entry_points={
'console_scripts': ['lymph = lymph.cli.main:main'],
'lymph.cli': [
'discover = lymph.cli.discover:DiscoverCommand',
'emit = lymph.cli.emit:EmitCommand',
'help = lymph.cli.help:HelpCommand',
'inspect = lymph.cli.inspect:InspectCommand',
'instance = lymph.cli.service:InstanceCommand',
'list = lymph.cli.list:ListCommand',
'node = lymph.cli.service:NodeCommand',
'request = lymph.cli.request:RequestCommand',
'shell = lymph.cli.shell:ShellCommand',
'subscribe = lymph.cli.subscribe:SubscribeCommand',
'tail = lymph.cli.tail:TailCommand',
'config = lymph.cli.config:ConfigCommand',
],
'nose.plugins.0.10': ['lymph = lymph.testing.nose:LymphPlugin'],
'pytest11': ['lymph = lymph.testing.pytest'],
'kombu.serializers': [
'lymph-json = lymph.serializers.kombu:json_serializer_args',
'lymph-msgpack = lymph.serializers.kombu:msgpack_serializer_args',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
)
| apache-2.0 |
websbydrew/TestPython | Python Files/Exercise Files/Ch5/htmlparsing_finished.py | 2 | 1751 | #
# Example file for parsing and processing HTML
# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)
# import the HTMLParser module
from HTMLParser import HTMLParser
metacount = 0;
# create a subclass and override the handler methods
class MyHTMLParser(HTMLParser):
# function to handle an opening tag in the doc
# this will be called when the closing ">" of the tag is reached
def handle_starttag(self, tag, attrs):
global metacount
print "Encountered a start tag:", tag
if tag == "meta":
metacount += 1
pos = self.getpos() # returns a tuple indication line and character
print "At line: ", pos[0], " position ", pos[1]
if attrs.__len__ > 0:
print "\tAttributes:"
for a in attrs:
print "\t", a[0],"=",a[1]
# function to handle the ending tag
def handle_endtag(self, tag):
print "Encountered an end tag:", tag
pos = self.getpos()
print "At line: ", pos[0], " position ", pos[1]
# function to handle character and text data (tag contents)
def handle_data(self, data):
print "Encountered some data:", data
pos = self.getpos()
print "At line: ", pos[0], " position ", pos[1]
# function to handle the processing of HTML comments
def handle_comment(self, data):
print "Encountered comment:", data
pos = self.getpos()
print "At line: ", pos[0], " position ", pos[1]
def main():
# instantiate the parser and feed it some HTML
parser = MyHTMLParser()
# open the sample HTML file and read it
f = open("samplehtml.html")
if f.mode == "r":
contents = f.read() # read the entire file
parser.feed(contents)
print "%d meta tags encountered" % metacount
if __name__ == "__main__":
main();
| gpl-3.0 |
coderbone/SickRage | lib/sqlalchemy/inspection.py | 79 | 3103 | # sqlalchemy/inspect.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The inspection module provides the :func:`.inspect` function,
which delivers runtime information about a wide variety
of SQLAlchemy objects, both within the Core as well as the
ORM.
The :func:`.inspect` function is the entry point to SQLAlchemy's
public API for viewing the configuration and construction
of in-memory objects. Depending on the type of object
passed to :func:`.inspect`, the return value will either be
a related object which provides a known interface, or in many
cases it will return the object itself.
The rationale for :func:`.inspect` is twofold. One is that
it replaces the need to be aware of a large variety of "information
getting" functions in SQLAlchemy, such as :meth:`.Inspector.from_engine`,
:func:`.orm.attributes.instance_state`, :func:`.orm.class_mapper`,
and others. The other is that the return value of :func:`.inspect`
is guaranteed to obey a documented API, thus allowing third party
tools which build on top of SQLAlchemy configurations to be constructed
in a forwards-compatible way.
.. versionadded:: 0.8 The :func:`.inspect` system is introduced
as of version 0.8.
"""
from . import util, exc
_registrars = util.defaultdict(list)
def inspect(subject, raiseerr=True):
"""Produce an inspection object for the given target.
The returned value in some cases may be the
same object as the one given, such as if a
:class:`.Mapper` object is passed. In other
cases, it will be an instance of the registered
inspection type for the given object, such as
if an :class:`.engine.Engine` is passed, an
:class:`.Inspector` object is returned.
:param subject: the subject to be inspected.
:param raiseerr: When ``True``, if the given subject
does not
correspond to a known SQLAlchemy inspected type,
:class:`sqlalchemy.exc.NoInspectionAvailable`
is raised. If ``False``, ``None`` is returned.
"""
type_ = type(subject)
for cls in type_.__mro__:
if cls in _registrars:
reg = _registrars[cls]
if reg is True:
return subject
ret = reg(subject)
if ret is not None:
break
else:
reg = ret = None
if raiseerr and (
reg is None or ret is None
):
raise exc.NoInspectionAvailable(
"No inspection system is "
"available for object of type %s" %
type_)
return ret
def _inspects(*types):
def decorate(fn_or_cls):
for type_ in types:
if type_ in _registrars:
raise AssertionError(
"Type %s is already "
"registered" % type_)
_registrars[type_] = fn_or_cls
return fn_or_cls
return decorate
def _self_inspects(cls):
_inspects(cls)(True)
return cls
| gpl-3.0 |
cgstudiomap/cgstudiomap | main/eggs/phonenumbers-7.1.1-py2.7.egg/phonenumbers/data/region_RE.py | 11 | 1850 | """Auto-generated file, do not edit by hand. RE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_RE = PhoneMetadata(id='RE', country_code=262, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[268]\\d{8}', possible_number_pattern='\\d{9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='262\\d{6}', possible_number_pattern='\\d{9}', example_number='262161234'),
mobile=PhoneNumberDesc(national_number_pattern='6(?:9[23]|47)\\d{6}', possible_number_pattern='\\d{9}', example_number='692123456'),
toll_free=PhoneNumberDesc(national_number_pattern='80\\d{7}', possible_number_pattern='\\d{9}', example_number='801234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='89[1-37-9]\\d{6}', possible_number_pattern='\\d{9}', example_number='891123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='8(?:1[019]|2[0156]|84|90)\\d{6}', possible_number_pattern='\\d{9}', example_number='810123456'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([268]\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', national_prefix_formatting_rule='0\\1')],
main_country_for_code=True,
leading_digits='262|6[49]|8')
| agpl-3.0 |
nijel/weblate | weblate/trans/validators.py | 1 | 1538 | #
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from weblate.checks.flags import Flags
def validate_filemask(val):
"""Validate that filemask contains *."""
if "*" not in val:
raise ValidationError(
_("Filemask does not contain * as a language placeholder!")
)
def validate_autoaccept(val):
"""Validate correct value for autoaccept."""
if val == 1:
raise ValidationError(
_(
"A value of 1 is not allowed for autoaccept as "
"it would permit users to vote on their own suggestions."
)
)
def validate_check_flags(val):
"""Validate check influencing flags."""
flags = Flags(val)
flags.validate()
| gpl-3.0 |
alistairlow/tensorflow | tensorflow/python/ops/string_ops.py | 33 | 5311 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for working with string Tensors.
See the @{$python/string_ops} guide.
@@string_to_hash_bucket_fast
@@string_to_hash_bucket_strong
@@string_to_hash_bucket
@@reduce_join
@@string_join
@@string_split
@@substr
@@as_string
@@encode_base64
@@decode_base64
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_string_ops import *
from tensorflow.python.util import deprecation
# pylint: enable=wildcard-import
def string_split(source, delimiter=" ", skip_empty=True): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
If `delimiter` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
For example:
N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output
will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
Args:
source: `1-D` string `Tensor`, the strings to split.
delimiter: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
delimiter = ops.convert_to_tensor(delimiter, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
# pylint: disable=protected-access
indices, values, shape = gen_string_ops._string_split(
source, delimiter=delimiter, skip_empty=skip_empty)
# pylint: enable=protected-access
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
def _reduce_join_reduction_dims(x, axis, reduction_indices):
"""Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
def reduce_join(inputs, axis=None,
keep_dims=False,
separator="",
name=None,
reduction_indices=None):
reduction_indices = _reduce_join_reduction_dims(
inputs, axis, reduction_indices)
return gen_string_ops.reduce_join(
inputs=inputs,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator,
name=name)
reduce_join.__doc__ = deprecation.rewrite_argument_docstring(
gen_string_ops.reduce_join.__doc__, "reduction_indices", "axis")
ops.NotDifferentiable("StringToHashBucket")
ops.NotDifferentiable("StringToHashBucketFast")
ops.NotDifferentiable("StringToHashBucketStrong")
ops.NotDifferentiable("ReduceJoin")
ops.NotDifferentiable("StringJoin")
ops.NotDifferentiable("StringSplit")
ops.NotDifferentiable("AsString")
ops.NotDifferentiable("EncodeBase64")
ops.NotDifferentiable("DecodeBase64")
| apache-2.0 |
Widiot/simpleblog | venv/lib/python3.5/site-packages/pygments/plugin.py | 25 | 1721 | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def iter_entry_points(group_name):
try:
import pkg_resources
except ImportError:
return []
return pkg_resources.iter_entry_points(group_name)
def find_plugin_lexers():
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| mit |
antonvino/inmoov-basic | hexapod_scripts_base/maestro_lib.py | 1 | 13426 | ###########################################################################################
# Filename:
# Device.py
###########################################################################################
# Project Authors:
# Juhapekka Piiroinen
# Brian Wu
#
# Changes:
# June 14, 2010 by Juhapekka Piiroinen - changes committed to svn
# - added comments for the device commands according to the manual from Pololu
# - added latest draft code for rotating base servo (Parallax Continuous Rotating Servo)
# - note! you should be able to clear error flags with .get_errors function according to the manual
# - renamed CameraDriver to LegacyCameraDriver as Brian Wu has done better one
# - integrated batch of changes provided by Brian Wu
#
# June 11, 2010 by Brian Wu - Changes committed thru email
# - Decoupling the implementation from the program
#
# April 19, 2010 by Juhapekka Piiroinen
# - Initial Release
#
# Email:
# [email protected]
#
# License:
# GNU/GPLv3
#
# Description:
# A python-wrapper for Pololu Micro Maestro 6-Channel USB Servo Controller
#
############################################################################################
# /!\ Notes /!\
# You will have to enable _USB Dual Port_ mode from the _Pololu Maestro Control Center_.
#
############################################################################################
# Device Documentation is available @ http://www.pololu.com/docs/pdf/0J40/maestro.pdf
############################################################################################
# (C) 2010 Juhapekka Piiroinen
# Brian Wu
############################################################################################
import serial
import time
def log(*msgline):
for msg in msgline:
print msg,
print
class Device(object):
def __init__(self,con_port="COM6",ser_port="COM7",timeout=1): #/dev/ttyACM0 and /dev/ttyACM1 for Linux
############################
# lets introduce and init the main variables
self.con = None
self.ser = None
self.isInitialized = False
############################
# lets connect the TTL Port
try:
self.con = serial.Serial(con_port,timeout=timeout,baudrate=9600)
self.con.close()
self.con.open()
self.con.baudrate = 9600
log("Link to Command Port -", con_port, "- successful")
except serial.serialutil.SerialException, e:
print e
log("Link to Command Port -", con_port, "- failed")
if self.con:
#####################
#If your Maestro's serial mode is "UART, detect baud rate", you must first send it the baud rate indication byte 0xAA on
#the RX line before sending any commands. The 0xAA baud rate indication byte can be the first byte of a Pololu protocol
#command.
#http://www.pololu.com/docs/pdf/0J40/maestro.pdf - page 35
# self.con.baudrate = 9600
# self.con.write(chr(0xAA))
# self.con.flush()
# log("Baud rate indication byte 0xAA sent!")
pass
###################################
# lets connect the TTL Port
try:
self.ser = serial.Serial(ser_port,timeout=timeout,baudrate=9600)
self.ser.close()
self.ser.open()
self.ser.baudrate = 9600
log("Link to TTL Port -", ser_port, "- successful")
except serial.serialutil.SerialException, e:
print e
log("Link to TTL Port -", ser_port, "- failed!")
self.isInitialized = (self.con!=None and self.ser!=None)
if (self.isInitialized):
err_flags = self.get_errors()
log("Device error flags read (",err_flags,") and cleared")
log("Device initialized:",self.isInitialized)
###########################################################################################################################
## common write function for handling all write related tasks
def write(self,*data):
if not self.isInitialized: log("Not initialized"); return
if not self.ser.writable():
log("Device not writable")
return
for d in data:
self.ser.write(chr(d))
self.ser.flush()
###########################################################################################################################
## Go Home
# Compact protocol: 0xA2
# --
# This command sends all servos and outputs to their home positions, just as if an error had occurred. For servos and
# outputs set to "Ignore", the position will be unchanged.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def go_home(self):
if not self.isInitialized: log("Not initialized"); return
self.write(0xA2)
###########################################################################################################################
## Set Target
# Compact protocol: 0x84, channel number, target low bits, target high bits
# --
# The lower 7 bits of the third data byte represent bits 0-6 of the target (the lower 7 bits), while the lower 7 bits of the
# fourth data byte represent bits 7-13 of the target. The target is a non-negative integer.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def set_target(self,servo,value):
if not self.isInitialized: log("Not initialized"); return
highbits,lowbits = divmod(value,32)
self.write(0x84,servo,lowbits << 2,highbits)
###########################################################################################################################
## Set Speed
# Compact protocol: 0x87, channel number, speed low bits, speed high bits
# --
# This command limits the speed at which a servo channel's output value changes. The speed limit is given in units of (0.25 us)/(10 ms)
# --
# For example, the command 0x87, 0x05, 0x0C, 0x01 sets
# the speed of servo channel 5 to a value of 140, which corresponds to a speed of 3.5 us/ms. What this means is that if
# you send a Set Target command to adjust the target from, say, 1000 us to 1350 us, it will take 100 ms to make that
# adjustment. A speed of 0 makes the speed unlimited, so that setting the target will immediately affect the position. Note
# that the actual speed at which your servo moves is also limited by the design of the servo itself, the supply voltage, and
# mechanical loads; this parameter will not help your servo go faster than what it is physically capable of.
# --
# At the minimum speed setting of 1, the servo output takes 40 seconds to move from 1 to 2 ms.
# The speed setting has no effect on channels configured as inputs or digital outputs.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def set_speed(self,servo,speed):
if not self.isInitialized: log("Not initialized"); return
highbits,lowbits = divmod(speed,32)
self.write(0x87,servo,lowbits << 2,highbits)
###########################################################################################################################
## Set Acceleration
# Compact protocol: 0x89, channel number, acceleration low bits, acceleration high bits
# --
# This command limits the acceleration of a servo channel's output. The acceleration limit is a value from 0 to 255 in units of (0.25 us)/(10 ms)/(80 ms),
# --
# A value of 0 corresponds to no acceleration limit. An acceleration limit causes the speed of a servo to slowly ramp up until it reaches the maximum speed, then
# to ramp down again as position approaches target, resulting in a relatively smooth motion from one point to another.
# With acceleration and speed limits, only a few target settings are required to make natural-looking motions that would
# otherwise be quite complicated to produce.
# --
# At the minimum acceleration setting of 1, the servo output takes about 3 seconds to move smoothly from a target of 1 ms to a target of 2 ms.
# The acceleration setting has no effect on channels configured as inputs or digital outputs.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def set_acceleration(self,servo,acceleration):
if not self.isInitialized: log("Not initialized"); return
highbits,lowbits = divmod(acceleration,32)
self.write(0x89,servo,lowbits << 2,highbits)
###########################################################################################################################
## Get Position
# Compact protocol: 0x90, channel number
# Response: position low 8 bits, position high 8 bits
# --
# This command allows the device communicating with the Maestro to get the position value of a channel. The position
# is sent as a two-byte response immediately after the command is received.
# --
# If the specified channel is configured as a servo, this position value represents the current pulse width that the Maestro
# is transmitting on the channel, reflecting the effects of any previous commands, speed and acceleration limits, or scripts
# running on the Maestro.
# --
# If the channel is configured as a digital output, a position value less than 6000 means the Maestro is driving the line low,
# while a position value of 6000 or greater means the Maestro is driving the line high.
# --
# If the channel is configured as an input, the position represents the voltage measured on the channel. The inputs on
# channels 0-11 are analog: their values range from 0 to 1023, representing voltages from 0 to 5 V. The inputs on channels
# 12-23 are digital: their values are either exactly 0 or exactly 1023.
# --
# Note that the formatting of the position in this command differs from the target/speed/acceleration formatting in the
# other commands. Since there is no restriction on the high bit, the position is formatted as a standard little-endian two-
# byte unsigned integer. For example, a position of 2567 corresponds to a response 0x07, 0x0A.
# --
# Note that the position value returned by this command is equal to four times the number displayed in the Position box
# in the Status tab of the Maestro Control Center.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def get_position(self,servo):
if not self.isInitialized: log("Not initialized"); return None
self.write(0x90,servo)
data = self.ser.read(2)
if data:
return (ord(data[0])+(ord(data[1])<<8))/4
else:
return None
###########################################################################################################################
## Get Moving State
# Compact protocol: 0x93
# Response: 0x00 if no servos are moving, 0x01 if servos are moving
# --
# This command is used to determine whether the servo outputs have reached their targets or are still changing, limited
# by speed or acceleration settings. Using this command together with the Set Target command, you can initiate several
# servo movements and wait for all the movements to finish before moving on to the next step of your program.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def get_moving_state(self):
if not self.isInitialized: log("Not initialized"); return None
self.write(0x93)
data = self.ser.read(1)
if data:
return ord(data[0])
else:
return None
###########################################################################################################################
## Get Errors
# Compact protocol: 0xA1
# --
# Response: error bits 0-7, error bits 8-15
# --
# Use this command to examine the errors that the Maestro has detected.
# --
# The error register is sent as a two-byte response immediately after the command is received,
# then all the error bits are cleared. For most applications using serial control, it is a good idea to check errors continuously
# and take appropriate action if errors occur.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def get_errors(self):
if not self.isInitialized: log("Not initialized"); return None
self.write(0xA1)
data = self.ser.read(2)
if data:
return ord(data[0])+(ord(data[1])<<8)
else:
return None
###########################################################################################################################
## a helper function for Set Target
def wait_until_at_target(self):
while (self.get_moving_state()):
time.sleep(0.1)
###########################################################################################################################
## Lets close and clean when we are done
def __del__(self):
if (self.ser):
self.ser.close()
if (self.con):
self.con.close()
del(self.ser)
del(self.con)
| mit |
erickt/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/mongodb/fields/json.py | 44 | 2251 | """
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
import six
import datetime
from decimal import Decimal
from django.conf import settings
from django.utils import simplejson
from mongoengine.fields import StringField
class JSONEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
assert settings.TIME_ZONE == 'UTC'
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return simplejson.JSONEncoder.default(self, obj)
def dumps(value):
assert isinstance(value, dict)
return JSONEncoder().encode(value)
def loads(txt):
value = simplejson.loads(txt, parse_float=Decimal, encoding=settings.DEFAULT_CHARSET)
assert isinstance(value, dict)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONField(StringField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
def __init__(self, *args, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = '{}'
StringField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if not value:
return {}
elif isinstance(value, six.string_types):
res = loads(value)
assert isinstance(res, dict)
return JSONDict(**res)
else:
return value
def get_db_prep_save(self, value):
"""Convert our JSON object to a string before we save"""
if not value:
return super(JSONField, self).get_db_prep_save("")
else:
return super(JSONField, self).get_db_prep_save(dumps(value))
| apache-2.0 |
yongshengwang/hue | desktop/core/src/desktop/api2.py | 21 | 4908 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import tempfile
import time
import StringIO
import zipfile
from django.core import management
from django.shortcuts import redirect
from django.utils import html
from desktop.lib.django_util import JsonResponse
from desktop.lib.export_csvxls import make_response
from desktop.lib.i18n import smart_str
from desktop.models import Document2, Document
from django.http import HttpResponse
LOG = logging.getLogger(__name__)
def get_document(request):
if request.GET.get('id'):
doc = Document2.objects.get(id=request.GET['id'])
else:
doc = Document2.objects.get(uuid=request.GET['uuid'])
response = _massage_doc_for_json(doc, request.user, with_data=request.GET.get('with_data'))
return JsonResponse(response)
def _massage_doc_for_json(document, user, with_data=False):
massaged_doc = {
'id': document.id,
'uuid': document.uuid,
'owner': document.owner.username,
'type': html.conditional_escape(document.type),
'name': html.conditional_escape(document.name),
'description': html.conditional_escape(document.description),
'isMine': document.owner == user,
'lastModified': document.last_modified.strftime("%x %X"),
'lastModifiedInMillis': time.mktime(document.last_modified.timetuple()),
'version': document.version,
'is_history': document.is_history,
# tags
# dependencies
}
if with_data:
massaged_doc['data'] = document.data_dict
return massaged_doc
def export_documents(request):
if request.GET.get('documents'):
selection = json.loads(request.GET.get('documents'))
else:
selection = json.loads(request.POST.get('documents'))
# If non admin, only export documents the user owns
docs = Document2.objects
if not request.user.is_superuser:
docs = docs.filter(owner=request.user)
docs = docs.filter(id__in=selection).order_by('-id')
doc_ids = docs.values_list('id', flat=True)
f = StringIO.StringIO()
if doc_ids:
doc_ids = ','.join(map(str, doc_ids))
management.call_command('dumpdata', 'desktop.Document2', primary_keys=doc_ids, indent=2, use_natural_keys=True, verbosity=2, stdout=f)
if request.GET.get('format') == 'json':
return JsonResponse(f.getvalue(), safe=False)
elif request.GET.get('format') == 'zip':
zfile = zipfile.ZipFile(f, 'w')
zfile.writestr("hue.json", f.getvalue())
for doc in docs:
if doc.type == 'notebook':
try:
from spark.models import Notebook
zfile.writestr("notebook-%s-%s.txt" % (doc.name, doc.id), smart_str(Notebook(document=doc).get_str()))
except Exception, e:
print e
LOG.exception(e)
zfile.close()
response = HttpResponse(content_type="application/zip")
response["Content-Length"] = len(f.getvalue())
response['Content-Disposition'] = 'attachment; filename="hue-documents.zip"'
response.write(f.getvalue())
return response
else:
return make_response(f.getvalue(), 'json', 'hue-documents')
def import_documents(request):
if request.FILES.get('documents'):
documents = request.FILES['documents'].read()
else:
documents = json.loads(request.POST.get('documents'))
documents = json.loads(documents)
docs = []
for doc in documents:
if not request.user.is_superuser:
doc['fields']['owner'] = [request.user.username]
owner = doc['fields']['owner'][0]
doc['fields']['tags'] = []
if Document2.objects.filter(uuid=doc['fields']['uuid'], owner__username=owner).exists():
doc['pk'] = Document2.objects.get(uuid=doc['fields']['uuid'], owner__username=owner).pk
else:
doc['pk'] = None
docs.append(doc)
f = tempfile.NamedTemporaryFile(mode='w+', suffix='.json')
f.write(json.dumps(docs))
f.flush()
stdout = StringIO.StringIO()
try:
management.call_command('loaddata', f.name, stdout=stdout)
except Exception, e:
return JsonResponse({'message': smart_str(e)})
Document.objects.sync()
if request.POST.get('redirect'):
return redirect(request.POST.get('redirect'))
else:
return JsonResponse({'message': stdout.getvalue()})
| apache-2.0 |
apanju/GMIO_Odoo | addons/stock_picking_wave/__init__.py | 374 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking_wave
import wizard
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mimoralea/king-pong | multicnet.py | 1 | 6980 | import tensorflow as tf
import cv2
import numpy as np
class MultilayerConvolutionalNetwork:
"""
This class manages the deep neural network
that will be used by the agent to learn
and extrapolate the state space
"""
def __init__(self, input_width, input_height, nimages, nchannels):
self.session = tf.InteractiveSession()
self.input_width = input_width
self.input_height = input_height
self.nimages = nimages
self.nchannels = nchannels
self.a = tf.placeholder("float", [None, self.nchannels])
self.y = tf.placeholder("float", [None])
self.input_image, self.y_conv, self.h_fc1, self.train_step = self.build_network()
self.session.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
def weight_variable(self, shape, stddev = 0.01):
"""
Initialize weight with slight amount of noise to
break symmetry and prevent zero gradients
"""
initial = tf.truncated_normal(shape, stddev = stddev)
return tf.Variable(initial)
def bias_variable(self, shape, value = 0.01):
"""
Initialize ReLU neurons with slight positive initial
bias to avoid dead neurons
"""
initial = tf.constant(value, shape=shape)
return tf.Variable(initial)
def conv2d(self, x, W, stride = 1):
"""
We use a stride size of 1 and zero padded convolutions
to ensure we get the same output size as it was our input
"""
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
def max_pool_2x2(self, x):
"""
Our pooling is plain old max pooling over 2x2 blocks
"""
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1], padding = "SAME")
def build_weights_biases(self, weights_shape):
"""
Build the weights and bias of a convolutional layer
"""
return self.weight_variable(weights_shape), \
self.bias_variable(weights_shape[-1:])
def convolve_relu_pool(self, nn_input, weights_shape, stride = 4, pool = True):
"""
Convolve the input to the network with the weight tensor,
add the bias, apply the ReLU function and finally max pool
"""
W_conv, b_conv = self.build_weights_biases(weights_shape)
h_conv = tf.nn.relu(self.conv2d(nn_input, W_conv, stride) + b_conv)
if not pool:
return h_conv
return self.max_pool_2x2(h_conv)
def build_network(self):
"""
Sets up the deep neural network
"""
# the input is going to be reshaped to a
# 80x80 color image (4 channels)
input_image = tf.placeholder("float", [None, self.input_width,
self.input_height, self.nimages])
# create the first convolutional layers
h_pool1 = self.convolve_relu_pool(input_image, [8, 8, self.nimages, 32])
h_conv2 = self.convolve_relu_pool(h_pool1, [4, 4, 32, 64], 2, False)
h_conv3 = self.convolve_relu_pool(h_conv2, [3, 3, 64, 64], 1, False)
# create the densely connected layers
W_fc1, b_fc1 = self.build_weights_biases([5 * 5 * 64, 512])
h_conv3_flat = tf.reshape(h_conv3, [-1, 5 * 5 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# finally add the readout layer
W_fc2, b_fc2 = self.build_weights_biases([512, self.nchannels])
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
readout_action = tf.reduce_sum(tf.mul(readout, self.a), reduction_indices=1)
cost_function = tf.reduce_mean(tf.square(self.y - readout_action))
train_step = tf.train.AdamOptimizer(1e-8).minimize(cost_function)
return input_image, readout, h_fc1, train_step
def train(self, value_batch, action_batch, state_batch):
"""
Does the actual training step
"""
self.train_step.run(feed_dict = {
self.y : value_batch,
self.a : action_batch,
self.input_image : state_batch
})
def save_variables(self, a_file, h_file, stack):
"""
Saves neural network weight variables for
debugging purposes
"""
readout_t = self.readout_act(stack)
a_file.write(",".join([str(x) for x in readout_t]) + '\n')
h_file.write(",".join([str(x) for x in self.h_fc1.eval(
feed_dict={self.input_image:[stack]})[0]]) + '\n')
def save_percepts(self, path, x_t1):
"""
Saves an image array to visualize
how the image is compressed before saving
"""
cv2.imwrite(path, np.rot90(x_t1))
def save_network(self, directory, iteration):
"""
Saves the progress of the agent
for further use later on
"""
self.saver.save(self.session, directory + '/network', global_step = iteration)
def attempt_restore(self, directory):
"""
Restors the latest file saved if
available
"""
checkpoint = tf.train.get_checkpoint_state(directory)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.session, checkpoint.model_checkpoint_path)
return checkpoint.model_checkpoint_path
def preprocess_percepts(self, x_t1_colored, reshape = True):
"""
The raw image arrays get shrunk down and
remove any color whatsoever. Also gets it in
3 dimensions if needed
"""
x_t1_resized = cv2.resize(x_t1_colored, (self.input_width, self.input_height))
x_t1_greyscale = cv2.cvtColor(x_t1_resized, cv2.COLOR_BGR2GRAY)
ret, x_t1 = cv2.threshold(x_t1_greyscale, 1, 255, cv2.THRESH_BINARY)
"""
import time
timestamp = int(time.time())
cv2.imwrite("percepts/%d-color.png" % timestamp,
np.rot90(x_t1_colored))
cv2.imwrite("percepts/%d-resized.png" % timestamp,
np.rot90(x_t1_resized))
cv2.imwrite("percepts/%d-greyscale.png" % timestamp,
np.rot90(x_t1_greyscale))
cv2.imwrite("percepts/%d-bandw.png" % timestamp,
np.rot90(x_t1))
"""
if not reshape:
return x_t1
return np.reshape(x_t1, (80, 80, 1))
def readout_act(self, stack):
"""
Gets the best action
for a given stack of images
"""
stack = [stack] if hasattr(stack, 'shape') and len(stack.shape) == 3 else stack
return self.y_conv.eval(feed_dict = {self.input_image: stack})
def select_best_action(self, stack):
"""
Selects the action with the
highest value
"""
return np.argmax(self.readout_act(stack))
def main():
print('This module should be imported')
pass
if __name__ == "__main__":
main()
| mit |
scipy/scipy | scipy/signal/tests/test_max_len_seq.py | 12 | 3106 | import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from pytest import raises as assert_raises
from numpy.fft import fft, ifft
from scipy.signal import max_len_seq
class TestMLS:
def test_mls_inputs(self):
# can't all be zero state
assert_raises(ValueError, max_len_seq,
10, state=np.zeros(10))
# wrong size state
assert_raises(ValueError, max_len_seq, 10,
state=np.ones(3))
# wrong length
assert_raises(ValueError, max_len_seq, 10, length=-1)
assert_array_equal(max_len_seq(10, length=0)[0], [])
# unknown taps
assert_raises(ValueError, max_len_seq, 64)
# bad taps
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
def test_mls_output(self):
# define some alternate working taps
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
8: [7, 5, 3]}
# assume the other bit levels work, too slow to test higher orders...
for nbits in range(2, 8):
for state in [None, np.round(np.random.rand(nbits))]:
for taps in [None, alt_taps[nbits]]:
if state is not None and np.all(state == 0):
state[0] = 1 # they can't all be zero
orig_m = max_len_seq(nbits, state=state,
taps=taps)[0]
m = 2. * orig_m - 1. # convert to +/- 1 representation
# First, make sure we got all 1's or -1
err_msg = "mls had non binary terms"
assert_array_equal(np.abs(m), np.ones_like(m),
err_msg=err_msg)
# Test via circular cross-correlation, which is just mult.
# in the frequency domain with one signal conjugated
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
out_len = 2**nbits - 1
# impulse amplitude == test_len
err_msg = "mls impulse has incorrect value"
assert_allclose(tester[0], out_len, err_msg=err_msg)
# steady-state is -1
err_msg = "mls steady-state has incorrect value"
assert_allclose(tester[1:], np.full(out_len - 1, -1),
err_msg=err_msg)
# let's do the split thing using a couple options
for n in (1, 2**(nbits - 1)):
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
length=n)
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
length=1)
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
length=out_len - n - 1)
new_m = np.concatenate((m1, m2, m3))
assert_array_equal(orig_m, new_m)
| bsd-3-clause |
wilsonkichoi/zipline | zipline/data/data_portal.py | 1 | 64491 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import mul
import bcolz
from logbook import Logger
import numpy as np
import pandas as pd
from pandas.tslib import normalize_date
from six import iteritems
from six.moves import reduce
from zipline.assets import Asset, Future, Equity
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.data.us_equity_loader import (
USEquityDailyHistoryLoader,
USEquityMinuteHistoryLoader,
)
from zipline.utils import tradingcalendar
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.errors import (
NoTradeDataAvailableTooEarly,
NoTradeDataAvailableTooLate,
HistoryWindowStartsBeforeData,
)
log = Logger('DataPortal')
BASE_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price", "last_traded"
])
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume"
])
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price"
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader):
self._market_opens = market_opens
self._minute_reader = minute_reader
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
date = dt.date()
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != date:
market_open = self._market_opens.loc[date]
cache = self._caches[field] = (dt.date(), market_open, {})
_, market_open, entries = cache
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = max(last_max, np.nanmax(window))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
window_min = np.nanmin(window)
if pd.isnull(window_min):
val = last_min
else:
val = min(last_min, window_min)
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
normalized_dt = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_dt, True):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = self.closes(
[asset],
pd.Timestamp(prev_dt, tz='UTC'))[0]
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = self.closes([asset],
pd.Timestamp(prev_dt, tz='UTC'))[0]
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
env : TradingEnvironment
The trading environment for the simulation. This includes the trading
calendar and benchmark data.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
"""
def __init__(self,
env,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None):
self.env = env
self.views = {}
self._asset_finder = env.asset_finder
self._carrays = {
'open': {},
'high': {},
'low': {},
'close': {},
'volume': {},
'sid': {},
}
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Cache of sid -> the first trading day of an asset.
self._asset_start_dates = {}
self._asset_end_dates = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._equity_daily_reader = equity_daily_reader
if self._equity_daily_reader is not None:
self._equity_history_loader = USEquityDailyHistoryLoader(
self.env,
self._equity_daily_reader,
self._adjustment_reader
)
self._equity_minute_reader = equity_minute_reader
self._future_daily_reader = future_daily_reader
self._future_minute_reader = future_minute_reader
self._first_trading_day = first_trading_day
if self._equity_minute_reader is not None:
self._equity_daily_aggregator = DailyHistoryAggregator(
self.env.open_and_closes.market_open,
self._equity_minute_reader)
self._equity_minute_history_loader = USEquityMinuteHistoryLoader(
self.env,
self._equity_minute_reader,
self._adjustment_reader
)
self.MINUTE_PRICE_ADJUSTMENT_FACTOR = \
self._equity_minute_reader._ohlc_inverse
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.env.days_in_range(
start=sim_params.period_start,
end=sim_params.period_end
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Before reindexing, save the earliest and latest dates
earliest_date = df.index[0]
latest_date = df.index[-1]
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
if not isinstance(identifier, Asset):
# for fake assets we need to store a start/end date
self._asset_start_dates[identifier] = earliest_date
self._asset_end_dates[identifier] = latest_date
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _open_minute_file(self, field, asset):
sid_str = str(int(asset))
try:
carray = self._carrays[field][sid_str]
except KeyError:
carray = self._carrays[field][sid_str] = \
self._get_ctable(asset)[field]
return carray
def _get_ctable(self, asset):
sid = int(asset)
if isinstance(asset, Future):
if self._future_minute_reader.sid_path_func is not None:
path = self._future_minute_reader.sid_path_func(
self._future_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._future_minute_reader.rootdir, sid)
elif isinstance(asset, Equity):
if self._equity_minute_reader.sid_path_func is not None:
path = self._equity_minute_reader.sid_path_func(
self._equity_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._equity_minute_reader.rootdir, sid)
else:
# TODO: Figure out if assets should be allowed if neither, and
# why this code path is being hit.
if self._equity_minute_reader.sid_path_func is not None:
path = self._equity_minute_reader.sid_path_func(
self._equity_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._equity_minute_reader.rootdir, sid)
return bcolz.open(path, mode='r')
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
if data_frequency == 'minute':
return self._equity_minute_reader.get_last_traded_dt(asset, dt)
elif data_frequency == 'daily':
return self._equity_daily_reader.get_last_traded_dt(asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and isinstance(asset, Asset))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def get_spot_value(self, asset, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
if self._is_extra_source(asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if dt < asset.start_date or \
(data_frequency == "daily" and dt > asset.end_date) or \
(data_frequency == "minute" and
normalize_date(dt) > asset.end_date):
if field == "volume":
return 0
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
day_to_use = dt
day_to_use = normalize_date(day_to_use)
return self._get_daily_data(asset, field, day_to_use)
else:
if isinstance(asset, Future):
return self._get_minute_spot_value_future(
asset, field, dt)
else:
if field == "last_traded":
return self._equity_minute_reader.get_last_traded_dt(
asset, dt
)
elif field == "price":
return self._get_minute_spot_value(asset, "close", dt,
True)
else:
return self._get_minute_spot_value(asset, field, dt)
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
split_adj_factor = lambda x: x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value_future(self, asset, column, dt):
# Futures bcolz files have 1440 bars per day (24 hours), 7 days a week.
# The file attributes contain the "start_dt" and "last_dt" fields,
# which represent the time period for this bcolz file.
# The start_dt is midnight of the first day that this future started
# trading.
# figure out the # of minutes between dt and this asset's start_dt
start_date = self._get_asset_start_date(asset)
minute_offset = int((dt - start_date).total_seconds() / 60)
if minute_offset < 0:
# asking for a date that is before the asset's start date, no dice
return 0.0
# then just index into the bcolz carray at that offset
carray = self._open_minute_file(column, asset)
result = carray[minute_offset]
# if there's missing data, go backwards until we run out of file
while result == 0 and minute_offset > 0:
minute_offset -= 1
result = carray[minute_offset]
if column != 'volume':
# FIXME switch to a futures reader
return result * 0.001
else:
return result
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
result = self._equity_minute_reader.get_value(
asset.sid, dt, column
)
if column == "volume":
if result == 0:
return 0
elif not ffill or not np.isnan(result):
# if we're not forward filling, or we found a result, return it
return result
# we are looking for price, and didn't find one. have to go hunting.
last_traded_dt = \
self._equity_minute_reader.get_last_traded_dt(asset, dt)
if last_traded_dt is pd.NaT:
# no last traded dt, bail
return np.nan
# get the value as of the last traded dt
result = self._equity_minute_reader.get_value(
asset.sid,
last_traded_dt,
column
)
if np.isnan(result):
return np.nan
if dt == last_traded_dt or dt.date() == last_traded_dt.date():
return result
# the value we found came from a different day, so we have to adjust
# the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, last_traded_dt,
dt, "minute", spot_value=result
)
def _get_daily_data(self, asset, column, dt):
if column == "last_traded":
last_traded_dt = \
self._equity_daily_reader.get_last_traded_dt(asset, dt)
if pd.isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
val = self._equity_daily_reader.spot_price(asset, dt, column)
if val == -1:
if column == "volume":
return 0
else:
return np.nan
else:
return val
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = self._equity_daily_reader.spot_price(
asset, found_dt, "close"
)
if value != -1:
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute",
spot_value=value
)
else:
found_dt -= tradingcalendar.trading_day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.env.trading_days
end_loc = self.env.trading_days.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < 0:
raise HistoryWindowStartsBeforeData(
first_trading_day=self.env.first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[bar_count].date(),
)
return tds[start_loc:end_loc + 1]
def _get_history_daily_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
days_for_window = self._get_days_for_window(end_dt.date(), bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
future_data = []
eq_assets = []
for asset in assets:
if isinstance(asset, Future):
future_data.append(self._get_history_daily_window_future(
asset, days_for_window, end_dt, field_to_use
))
else:
eq_assets.append(asset)
eq_data = self._get_history_daily_window_equities(
eq_assets, days_for_window, end_dt, field_to_use
)
if future_data:
# TODO: This case appears to be uncovered by testing.
data = np.concatenate(eq_data, np.array(future_data).T)
else:
data = eq_data
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
)
def _get_history_daily_window_future(self, asset, days_for_window,
end_dt, column):
# Since we don't have daily bcolz files for futures (yet), use minute
# bars to calculate the daily values.
data = []
data_groups = []
# get all the minutes for the days NOT including today
for day in days_for_window[:-1]:
minutes = self.env.market_minutes_for_day(day)
values_for_day = np.zeros(len(minutes), dtype=np.float64)
for idx, minute in enumerate(minutes):
minute_val = self._get_minute_spot_value_future(
asset, column, minute
)
values_for_day[idx] = minute_val
data_groups.append(values_for_day)
# get the minutes for today
last_day_minutes = pd.date_range(
start=self.env.get_open_and_close(end_dt)[0],
end=end_dt,
freq="T"
)
values_for_last_day = np.zeros(len(last_day_minutes), dtype=np.float64)
for idx, minute in enumerate(last_day_minutes):
minute_val = self._get_minute_spot_value_future(
asset, column, minute
)
values_for_last_day[idx] = minute_val
data_groups.append(values_for_last_day)
for group in data_groups:
if len(group) == 0:
continue
if column == 'volume':
data.append(np.sum(group))
elif column == 'open':
data.append(group[0])
elif column == 'close':
data.append(group[-1])
elif column == 'high':
data.append(np.amax(group))
elif column == 'low':
data.append(np.amin(group))
return data
def _get_history_daily_window_equities(
self, assets, days_for_window, end_dt, field_to_use):
ends_at_midnight = end_dt.hour == 0 and end_dt.minute == 0
if ends_at_midnight:
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window,
extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window[0:-1]
)
if field_to_use == 'open':
minute_value = self._equity_daily_aggregator.opens(
assets, end_dt)
elif field_to_use == 'high':
minute_value = self._equity_daily_aggregator.highs(
assets, end_dt)
elif field_to_use == 'low':
minute_value = self._equity_daily_aggregator.lows(
assets, end_dt)
elif field_to_use == 'close':
minute_value = self._equity_daily_aggregator.closes(
assets, end_dt)
elif field_to_use == 'volume':
minute_value = self._equity_daily_aggregator.volumes(
assets, end_dt)
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
mm = self.env.market_minutes
end_loc = mm.get_loc(end_dt)
start_loc = end_loc - bar_count + 1
if start_loc < 0:
suggested_start_day = (mm[bar_count] + self.env.trading_day).date()
raise HistoryWindowStartsBeforeData(
first_trading_day=self.env.first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day,
)
minutes_for_window = mm[start_loc:end_loc + 1]
asset_minute_data = self._get_minute_window_for_assets(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
)
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS:
raise ValueError("Invalid field: {0}".format(field))
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
data_frequency = 'minute'
elif frequency == "1d":
data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
dt_to_fill = df.index[0]
perspective_dt = df.index[-1]
assets_with_leading_nan = np.where(pd.isnull(df.iloc[0]))[0]
for missing_loc in assets_with_leading_nan:
asset = assets[missing_loc]
previous_dt = self.get_last_traded_dt(
asset, dt_to_fill, data_frequency)
if pd.isnull(previous_dt):
continue
previous_value = self.get_adjusted_value(
asset,
field,
previous_dt,
perspective_dt,
data_frequency,
)
df.iloc[0, missing_loc] = previous_value
df.fillna(method='ffill', inplace=True)
for asset in df.columns:
if df.index[-1] >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
series = df[asset]
series[series.index.normalize() > asset.end_date] = np.NaN
return df
def _get_minute_window_for_assets(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
asset : Asset
The asset whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
if isinstance(assets, Future):
return self._get_minute_window_for_future([assets], field,
minutes_for_window)
else:
# TODO: Make caller accept assets.
window = self._get_minute_window_for_equities(assets, field,
minutes_for_window)
return window
def _get_minute_window_for_future(self, asset, field, minutes_for_window):
# THIS IS TEMPORARY. For now, we are only exposing futures within
# equity trading hours (9:30 am to 4pm, Eastern). The easiest way to
# do this is to simply do a spot lookup for each desired minute.
return_data = np.zeros(len(minutes_for_window), dtype=np.float64)
for idx, minute in enumerate(minutes_for_window):
return_data[idx] = \
self._get_minute_spot_value_future(asset, field, minute)
# Note: an improvement could be to find the consecutive runs within
# minutes_for_window, and use them to read the underlying ctable
# more efficiently.
# Once futures are on 24-hour clock, then we can just grab all the
# requested minutes in one shot from the ctable.
# no adjustments for futures, yay.
return return_data
def _get_minute_window_for_equities(
self, assets, field, minutes_for_window):
return self._equity_minute_history_loader.history(assets,
minutes_for_window,
field)
def _apply_all_adjustments(self, data, asset, dts, field,
price_adj_factor=1.0):
"""
Internal method that applies all the necessary adjustments on the
given data array.
The adjustments are:
- splits
- if field != "volume":
- mergers
- dividends
- * 0.001
- any zero fields replaced with NaN
- all values rounded to 3 digits after the decimal point.
Parameters
----------
data : np.array
The data to be adjusted.
asset: Asset
The asset whose data is being adjusted.
dts: pd.DateTimeIndex
The list of minutes or days representing the desired window.
field: string
The field whose values are in the data array.
price_adj_factor: float
Factor with which to adjust OHLC values.
Returns
-------
None. The data array is modified in place.
"""
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
),
data,
dts,
field != 'volume'
)
if field != 'volume':
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
),
data,
dts,
True
)
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS"
),
data,
dts,
True
)
if price_adj_factor is not None:
data *= price_adj_factor
np.around(data, 3, out=data)
def _get_daily_window_for_sids(
self, assets, field, days_in_window, extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)))
else:
return_array = np.zeros((bar_count, len(assets)))
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._equity_history_loader.history(assets,
days_in_window,
field)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array
@staticmethod
def _apply_adjustments_to_window(adjustments_list, window_data,
dts_in_window, multiply):
if len(adjustments_list) == 0:
return
# advance idx to the correct spot in the adjustments list, based on
# when the window starts
idx = 0
while idx < len(adjustments_list) and dts_in_window[0] >\
adjustments_list[idx][0]:
idx += 1
# if we've advanced through all the adjustments, then there's nothing
# to do.
if idx == len(adjustments_list):
return
while idx < len(adjustments_list):
adjustment_to_apply = adjustments_list[idx]
if adjustment_to_apply[0] > dts_in_window[-1]:
break
range_end = dts_in_window.searchsorted(adjustment_to_apply[0])
if multiply:
window_data[0:range_end] *= adjustment_to_apply[1]
else:
window_data[0:range_end] /= adjustment_to_apply[1]
idx += 1
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments
def _check_is_currently_alive(self, asset, dt):
sid = int(asset)
if sid not in self._asset_start_dates:
self._get_asset_start_date(asset)
start_date = self._asset_start_dates[sid]
if self._asset_start_dates[sid] > dt:
raise NoTradeDataAvailableTooEarly(
sid=sid,
dt=normalize_date(dt),
start_dt=start_date
)
end_date = self._asset_end_dates[sid]
if self._asset_end_dates[sid] < dt:
raise NoTradeDataAvailableTooLate(
sid=sid,
dt=normalize_date(dt),
end_dt=end_date
)
def _get_asset_start_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_start_dates[asset]
def _get_asset_end_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_end_dates[asset]
def _ensure_asset_dates(self, asset):
sid = int(asset)
if sid not in self._asset_start_dates:
if self._first_trading_day is not None:
self._asset_start_dates[sid] = \
max(asset.start_date, self._first_trading_day)
else:
self._asset_start_dates[sid] = asset.start_date
self._asset_end_dates[sid] = asset.end_date
def get_splits(self, sids, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
sids : container
Sids for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(int, float)]
List of splits, where each split is a (sid, ratio) tuple.
"""
if self._adjustment_reader is None or not sids:
return {}
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in sids]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or \
(field in self._augmented_sources_map and
asset in self._augmented_sources_map[field])
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
@weak_lru_cache(20)
def _get_minute_count_for_transform(self, ending_minute, days_count):
# cache size picked somewhat loosely. this code exists purely to
# handle deprecated API.
# bars is the number of days desired. we have to translate that
# into the number of minutes we want.
# we get all the minutes for the last (bars - 1) days, then add
# all the minutes so far today. the +2 is to account for ignoring
# today, and the previous day, in doing the math.
previous_day = self.env.previous_trading_day(ending_minute)
days = self.env.days_in_range(
self.env.add_trading_days(-days_count + 2, previous_day),
previous_day,
)
minutes_count = \
sum(210 if day in self.env.early_closes else 390 for day in days)
# add the minutes for today
today_open = self.env.get_open_and_close(ending_minute)[0]
minutes_count += \
((ending_minute - today_open).total_seconds() // 60) + 1
return minutes_count
def get_simple_transform(self, asset, transform_name, dt, data_frequency,
bars=None):
if transform_name == "returns":
# returns is always calculated over the last 2 days, regardless
# of the simulation's data frequency.
hst = self.get_history_window(
[asset], dt, 2, "1d", "price", ffill=True
)[asset]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
if bars is None:
raise ValueError("bars cannot be None!")
if data_frequency == "minute":
freq_str = "1m"
calculated_bar_count = self._get_minute_count_for_transform(
dt, bars
)
else:
freq_str = "1d"
calculated_bar_count = bars
price_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "price", ffill=True
)[asset]
if transform_name == "mavg":
return nanmean(price_arr)
elif transform_name == "stddev":
return nanstd(price_arr, ddof=1)
elif transform_name == "vwap":
volume_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "volume",
ffill=True
)[asset]
vol_sum = nansum(volume_arr)
try:
ret = nansum(price_arr * volume_arr) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
| apache-2.0 |
kingvuplus/italysat-enigma3 | tools/host_tools/FormatConverter/satxml.py | 112 | 2759 | import os
from datasource import datasource
from xml.dom import minidom
from xml.dom.minidom import Document
from input import inputText
class satxml(datasource):
def __init__(self, filename = "satellites.xml"):
self.filename = filename
datasource.__init__(self)
if not os.path.isfile(filename):
print "File %s doesn't exist. Creating it." % filename
def getStatus(self):
text = datasource.getStatus(self)
return text
def getCapabilities(self):
return [("set filename", self.setFilename), ("read file", self.read), ("write file", self.write), ("print all", self.printAll)]
def getName(self):
return "satellites.xml"
def setFilename(self):
print "Please give a filename <satellites.xml>:"
filename = inputText()
if filename == "":
self.filename = "satellites.xml"
else:
self.filename = filename
print "Filename set to %s" % self.filename
def read(self):
basicsatxml = minidom.parse(self.filename)
for sat in basicsatxml.firstChild.childNodes:
if sat.nodeType == sat.ELEMENT_NODE and sat.localName == "sat":
print sat.localName
satname = str(sat.getAttribute("name"))
satpos = str(sat.getAttribute("position"))
self.addSat(satname, satpos)
for transponder in sat.childNodes:
if transponder.nodeType == transponder.ELEMENT_NODE and transponder.localName == "transponder":
parameters = {}
paramlist = ["frequency", "symbol_rate", "polarization", "fec", "system", "modulation", "tsid", "onid"]
for param in paramlist:
entry = str(transponder.getAttribute(param))
if entry != "":
parameters[param] = entry
if len(parameters.keys()) > 1:
self.addTransponder(satpos, parameters)
print self.transponderlist
def write(self):
satxml = Document()
satellites = satxml.createElement("satellites")
satxml.appendChild(satellites)
satlist = self.transponderlist.keys()
print self.transponderlist
satlist.sort()
for sat in satlist:
xmlsat = satxml.createElement("sat")
xmlsat.setAttribute("name", self.satnames[sat])
xmlsat.setAttribute("flags", "1")
xmlsat.setAttribute("position", sat)
satellites.appendChild(xmlsat)
transponders = self.transponderlist[sat]
transponders.sort(key = lambda a: a["frequency"])
for transponder in transponders:
xmltransponder = satxml.createElement("transponder")
paramlist = ["frequency", "symbol_rate", "polarization", "fec", "system", "modulation", "tsid", "onid"]
for param in paramlist:
if transponder.has_key(param):
xmltransponder.setAttribute(param, transponder[param])
xmlsat.appendChild(xmltransponder)
prettyxml = satxml.toprettyxml()
print prettyxml
file = open(self.filename, "w")
file.write(prettyxml)
file.close()
| gpl-2.0 |
trinitysoulstars/astralturf | lyriscisssor.py | 1 | 2800 | import nltk
from nltk import FreqDist
import json
import csv
print "* Loading corpus"
#raw = gutenberg.raw('melville-moby_dick.txt')
#raw = gutenberg.raw('bible-kjv.txt')
#raw = gutenberg.raw('tss-lyrics.txt')
lines = []
with open("tss-lyrics.txt", 'r') as raw:
lines = raw.read()
print "* Tokenizing"
#tokens = nltk.word_tokenize(lines)
tokens = nltk.word_tokenize(lines)
print "* Tagging parts of speech"
# Save this to strip articles later
parts_of_speech = nltk.pos_tag(tokens)
print "* Converting POS list into a dict for lookup"
# TODO -- fix this. this is going to fuck up on homonyms
parts_of_speech = dict(parts_of_speech)
# You can ban other parts of speech by adding their tags to this list.
# You can find out what the part-of-speech tags mean by using code like
# this:
# >>> print nltk.help.upenn_tagset('DT')
# DT: determiner
# all an another any both del each either every half la many much nary
# neither no some such that the them these this those
banned_parts_of_speech = [
'DT',
'IN',
'CC',
'TO',
'PRP',
'PRP$',
]
banned_words = [
'Chorus',
'chorus',
'is',
'has',
'had',
'have',
'there',
'so',
'So',
'on',
'On',
'did',
'am',
'are'
'Is',
'be',
'my',
'My',
'can',
'Can',
'was',
'of',
'Of',
'OF',
'OH',
'oh',
'Oh',
'the',
'THE',
'The',
'that',
'That',
'when',
'When',
'what',
'What',
'who',
'Who',
'how',
'How',
'his',
'His',
'were',
'Why',
'why',
'then',
'Then',
'Does',
'does',
'O',
'do',
'Do',
'Go',
'go',
]
print "* Stripping stuff we don't want"
# Strip punctuation and banned parts of speech
tokens = [
token for token in tokens if (
# Kill punctuation
token.isalpha() and
# Kill parts of speech that we don't want.
not parts_of_speech[token] in banned_parts_of_speech and
not token in banned_words #and
#len(token) > 4
)
]
print "* Building frequency distribution"
words = FreqDist(tokens)
N = 1000
def showWords(N=1000):
print "* Printing top %i words" % N
f = open('corpus.txt', 'wb')
writer = csv.writer(f)
for i, pair in enumerate(words.items()):
word, count = pair
row = word, count, parts_of_speech[word]
#row = "%r, %r, %r" % (word, count, parts_of_speech[word])
#row = json.dumps([word, count, parts_of_speech[word]], separators=(',',':'))
writer.writerow(row)
print "%r appeared %i times. Its part of speech is %r" % (
word, count, parts_of_speech[word],
)
if i > N:
break
f.close()
return (word, count, parts_of_speech)
showWords()
| agpl-3.0 |
stevekuznetsov/ansible | test/runner/lib/changes.py | 39 | 5484 | """Detect changes in Ansible code."""
from __future__ import absolute_import, print_function
import re
import os
from lib.util import (
ApplicationError,
SubprocessError,
MissingEnvironmentVariable,
CommonConfig,
display,
)
from lib.http import (
HttpClient,
urlencode,
)
from lib.git import (
Git,
)
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason):
"""
:type branch: str
:type reason: str
"""
message = 'Invalid branch: %s\n%s' % (branch, reason)
super(InvalidBranch, self).__init__(message)
self.branch = branch
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
def __init__(self, message):
"""
:type message: str
"""
super(ChangeDetectionNotSupported, self).__init__(message)
class ShippableChanges(object):
"""Change information for Shippable build."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(git.get_diff_names([self.branch]))
else:
merge_runs = self.get_merge_runs(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(merge_runs)
if last_successful_commit:
self.paths = sorted(git.get_diff_names([last_successful_commit, self.commit]))
else:
# tracked files (including unchanged)
self.paths = sorted(git.get_file_names(['--cached']))
def get_merge_runs(self, project_id, branch):
"""
:type project_id: str
:type branch: str
:rtype: list[dict]
"""
params = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
client = HttpClient(self.args, always=True)
response = client.get('https://api.shippable.com/runs?%s' % urlencode(params))
return response.json()
@staticmethod
def get_last_successful_commit(merge_runs):
"""
:type merge_runs: dict | list[dict]
:rtype: str
"""
if 'id' in merge_runs and merge_runs['id'] == 4004:
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return None
merge_runs = sorted(merge_runs, key=lambda r: r['createdAt'])
known_commits = set()
last_successful_commit = None
for merge_run in merge_runs:
commit_sha = merge_run['commitSha']
if commit_sha not in known_commits:
known_commits.add(commit_sha)
if merge_run['statusCode'] == 30:
last_successful_commit = commit_sha
return last_successful_commit
class LocalChanges(object):
"""Change information for local work."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
self.current_branch = git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(git.get_diff_names([]))
@staticmethod
def is_official_branch(name):
"""
:type name: str
:rtype: bool
"""
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| gpl-3.0 |
MwanzanFelipe/rockletonfortune | lib/django/db/backends/postgresql/version.py | 632 | 1517 | """
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL #.#.#
# EnterpriseDB #.#
# PostgreSQL #.# beta#
# PostgreSQL #.#beta#
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 90304 for 9.3.4. The last two digits will be 00 in the case of
releases (e.g., 90400 for 'PostgreSQL 9.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
| bsd-3-clause |
abtink/openthread | tests/scripts/thread-cert/border_router/test_multi_thread_networks.py | 1 | 5473 | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import config
import thread_cert
# Test description:
# This test verifies bi-directional connectivity accross multiple Thread networks.
#
# Topology:
# -------------(eth)----------------
# | |
# BR1 BR2
# | |
# ROUTER1 ROUTER2
#
# Thread Net1 Thread Net2
#
BR1 = 1
ROUTER1 = 2
BR2 = 3
ROUTER2 = 4
CHANNEL1 = 18
CHANNEL2 = 19
class MultiThreadNetworks(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR1: {
'name': 'BR_1',
'allowlist': [ROUTER1],
'is_otbr': True,
'version': '1.2',
'channel': CHANNEL1,
'router_selection_jitter': 1,
},
ROUTER1: {
'name': 'Router_1',
'allowlist': [BR1],
'version': '1.2',
'channel': CHANNEL1,
'router_selection_jitter': 1,
},
BR2: {
'name': 'BR_2',
'allowlist': [ROUTER2],
'is_otbr': True,
'version': '1.2',
'channel': CHANNEL2,
'router_selection_jitter': 1,
},
ROUTER2: {
'name': 'Router_2',
'allowlist': [BR2],
'version': '1.2',
'channel': CHANNEL2,
'router_selection_jitter': 1,
},
}
def test(self):
self.nodes[BR1].start()
self.simulator.go(5)
self.assertEqual('leader', self.nodes[BR1].get_state())
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[ROUTER1].get_state())
self.nodes[BR2].start()
self.simulator.go(5)
self.assertEqual('leader', self.nodes[BR2].get_state())
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[ROUTER2].get_state())
self.collect_ipaddrs()
logging.info("BR1 addrs: %r", self.nodes[BR1].get_addrs())
logging.info("ROUTER1 addrs: %r", self.nodes[ROUTER1].get_addrs())
logging.info("BR2 addrs: %r", self.nodes[BR2].get_addrs())
logging.info("ROUTER2 addrs: %r", self.nodes[ROUTER2].get_addrs())
self.assertTrue(len(self.nodes[BR1].get_prefixes()) == 1)
self.assertTrue(len(self.nodes[ROUTER1].get_prefixes()) == 1)
self.assertTrue(len(self.nodes[BR2].get_prefixes()) == 1)
self.assertTrue(len(self.nodes[ROUTER2].get_prefixes()) == 1)
br1_omr_prefix = self.nodes[BR1].get_prefixes()[0]
br2_omr_prefix = self.nodes[BR2].get_prefixes()[0]
self.assertNotEqual(br1_omr_prefix, br2_omr_prefix)
# Each BR should independently register an external route for the on-link prefix
# and OMR prefix in another Thread Network.
self.assertTrue(len(self.nodes[BR1].get_routes()) == 2)
self.assertTrue(len(self.nodes[ROUTER1].get_routes()) == 2)
self.assertTrue(len(self.nodes[BR2].get_routes()) == 2)
self.assertTrue(len(self.nodes[ROUTER2].get_routes()) == 2)
br1_external_routes = self.nodes[BR1].get_routes()
br2_external_routes = self.nodes[BR2].get_routes()
br1_external_routes.sort()
br2_external_routes.sort()
self.assertNotEqual(br1_external_routes, br2_external_routes)
self.assertTrue(len(self.nodes[ROUTER1].get_ip6_address(config.ADDRESS_TYPE.OMR)) == 1)
self.assertTrue(len(self.nodes[ROUTER2].get_ip6_address(config.ADDRESS_TYPE.OMR)) == 1)
self.assertTrue(self.nodes[ROUTER1].ping(self.nodes[ROUTER2].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]))
self.assertTrue(self.nodes[ROUTER2].ping(self.nodes[ROUTER1].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
praekelt/django-football365 | football365/migrations/0001_initial.py | 1 | 1330 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Call'
db.create_table('football365_call', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('call_type', self.gf('django.db.models.fields.CharField')(max_length=32)),
('football365_service_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('football365', ['Call'])
def backwards(self, orm):
# Deleting model 'Call'
db.delete_table('football365_call')
models = {
'football365.call': {
'Meta': {'ordering': "('title',)", 'object_name': 'Call'},
'call_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'football365_service_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['football365'] | bsd-3-clause |
albatrossandco/brubeck_cms | brubeck/articles/views.py | 1 | 4971 | # Imports from standard libraries
from datetime import date, datetime, time, timedelta
# Imports from Django
from django.core.paginator import EmptyPage, InvalidPage, Paginator
from django.contrib.sites.models import Site
from django.http import Http404, HttpResponsePermanentRedirect
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.cache import cache_page
# Imports from Brubeck
from brubeck.articles.models import Article, Correction
from brubeck.mapping.views import detail as map_detail
@cache_page(60 * 5)
def archive(request, year=None, month=None, day=None, page=1):
"""
Shows a paginated list of articles by year, month or day.
Arguments:
'year'
Optional.
'month'
Optional.
'day'
Optional.
'page'
Optional.
Context:
'archive_name'
A string with the name of the archive to display.
'archive_page'
A Paginator.page instance with object_list containing the articles
to display.
"""
try:
page = int(page)
if year:
year = int(year)
if month:
month = int(month)
if day:
day = int(day)
except ValueError:
raise Http404
site = Site.objects.get_current()
try:
articles = Article.get_published.filter(section__publication__site=site)
except:
raise Http404
if not year:
articles = articles
archive_name = "Article archive"
elif not month:
articles = articles.filter(pub_date__year=year)
archive_name = "Articles from %s" % year
elif not day:
articles = articles.filter(pub_date__year=year, pub_date__month=month)
archive_name = "Articles from %s" % date(year, month, 1).strftime("%B %Y")
else:
articles = articles.filter(pub_date=date(year, month, day))
archive_name = "Articles from %s" % date(year, month, day).strftime("%B %d, %Y")
paginator = Paginator(articles, 20)
try:
archive_page = paginator.page(page)
except (EmptyPage, InvalidPage):
raise Http404
url_base = '/stories/'
if year:
url_base += '%s/' % year
if month:
url_base += '%s/' % month
if day:
url_base += '%s/' % day
next_page_url = '%sp%s/' % (url_base, page + 1)
previous_page_url = '%sp%s/' % (url_base, page - 1)
page = {
'archive_name': archive_name,
'archive_page': archive_page,
'next_page_url': next_page_url,
'previous_page_url': previous_page_url
}
return render_to_response('articles/archive.html', page, context_instance=RequestContext(request))
@cache_page(60 * 5)
def detail(request, year=None, month=None, day=None, slug=None, mode=None):
"""
Shows a particular article or its associated photos and graphics.
"""
site = Site.objects.get_current()
try:
article = Article.get_published.filter(section__publication__site=site).filter(pub_date__year=int(year), pub_date__month=int(month), pub_date__day=int(day)).get(slug=slug)
except Article.DoesNotExist:
raise Http404
images = []
images.extend(article.photos.all())
images.extend(article.editorial_cartoons.all())
images.extend(article.graphics.all())
multimedia = []
multimedia.extend(article.videos.all())
multimedia.extend(article.slideshows.all())
multimedia.extend(article.audio_clips.all())
multimedia.extend(article.podcast_episodes.all())
if article.map:
map_data = map_detail(request, slug=article.map.slug, mode='context')
else:
map_data = None
if article.type == 'column':
try:
article.mugshot = article.byline[0].mugshot
except:
article.mugshot = None
else:
article.mugshot = None
article.attached_audio = False
for item in article.attached_files.all():
if item.get_file_extension() == 'mp3':
article.attached_audio = True
page = {
'article': article,
'images': images,
'map_data': map_data,
'multimedia': multimedia
}
if mode == 'images':
return render_to_response('articles/detail_images.html', page, context_instance=RequestContext(request))
else:
return render_to_response('articles/detail.html', page, context_instance=RequestContext(request))
def corrections(request):
"""
Shows corrections from the past two weeks.
"""
TWOWEEKSAGO = date.today() - timedelta(14)
corrections = Correction.objects.filter(date_corrected__gte=TWOWEEKSAGO)
page = {
'corrections': corrections
}
return render_to_response('articles/correction_list.html', page, context_instance=RequestContext(request))
| bsd-3-clause |
Kami/libcloud | libcloud/common/dimensiondata.py | 3 | 58274 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dimension Data Common Components
"""
from base64 import b64encode
from time import sleep
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# from distutils.version import LooseVersion
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, XmlResponse, RawResponse
from libcloud.compute.base import Node
from libcloud.utils.py3 import basestring
from libcloud.utils.xml import findtext
from libcloud.compute.types import LibcloudError, InvalidCredsError
# Roadmap / TODO:
#
# 1.0 - Copied from OpSource API, named provider details.
# setup a few variables to represent all of the DimensionData cloud namespaces
NAMESPACE_BASE = "http://oec.api.opsource.net/schemas"
ORGANIZATION_NS = NAMESPACE_BASE + "/organization"
SERVER_NS = NAMESPACE_BASE + "/server"
NETWORK_NS = NAMESPACE_BASE + "/network"
DIRECTORY_NS = NAMESPACE_BASE + "/directory"
GENERAL_NS = NAMESPACE_BASE + "/general"
BACKUP_NS = NAMESPACE_BASE + "/backup"
# API 2.0 Namespaces and URNs
TYPES_URN = "urn:didata.com:api:cloud:types"
# API end-points
API_ENDPOINTS = {
'dd-na': {
'name': 'North America (NA)',
'host': 'api-na.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au': {
'name': 'Australia (AU)',
'host': 'api-au.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au-gov': {
'name': 'Australia Canberra ACT (AU)',
'host': 'api-canberra.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-af': {
'name': 'Africa (AF)',
'host': 'api-mea.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-latam': {
'name': 'South America (LATAM)',
'host': 'api-latam.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-canada': {
'name': 'Canada (CA)',
'host': 'api-canada.dimensiondata.com',
'vendor': 'DimensionData'
},
'is-na': {
'name': 'North America (NA)',
'host': 'usapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-eu': {
'name': 'Europe (EU)',
'host': 'euapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-au': {
'name': 'Australia (AU)',
'host': 'auapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-af': {
'name': 'Africa (AF)',
'host': 'meaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-ap': {
'name': 'Asia Pacific (AP)',
'host': 'apapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-latam': {
'name': 'South America (LATAM)',
'host': 'latamapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-canada': {
'name': 'Canada (CA)',
'host': 'canadaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'ntta-na': {
'name': 'North America (NA)',
'host': 'cloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-eu': {
'name': 'Europe (EU)',
'host': 'eucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-au': {
'name': 'Australia (AU)',
'host': 'aucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-af': {
'name': 'Africa (AF)',
'host': 'sacloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-ap': {
'name': 'Asia Pacific (AP)',
'host': 'hkcloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'cisco-na': {
'name': 'North America (NA)',
'host': 'iaas-api-na.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-eu': {
'name': 'Europe (EU)',
'host': 'iaas-api-eu.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-au': {
'name': 'Australia (AU)',
'host': 'iaas-api-au.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-af': {
'name': 'Africa (AF)',
'host': 'iaas-api-mea.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-ap': {
'name': 'Asia Pacific (AP)',
'host': 'iaas-api-ap.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-latam': {
'name': 'South America (LATAM)',
'host': 'iaas-api-sa.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-canada': {
'name': 'Canada (CA)',
'host': 'iaas-api-ca.cisco-ccs.com',
'vendor': 'Cisco'
},
'med1-il': {
'name': 'Israel (IL)',
'host': 'api.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-na': {
'name': 'North America (NA)',
'host': 'api-na.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-au': {
'name': 'Australia (AU)',
'host': 'api-au.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-af': {
'name': 'Africa (AF)',
'host': 'api-af.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-latam': {
'name': 'South America (LATAM)',
'host': 'api-sa.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-canada': {
'name': 'Canada (CA)',
'host': 'api-ca.cloud.med-1.com',
'vendor': 'Med-1'
},
'indosat-id': {
'name': 'Indonesia (ID)',
'host': 'iaas-api.indosat.com',
'vendor': 'Indosat'
},
'indosat-na': {
'name': 'North America (NA)',
'host': 'iaas-usapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-eu': {
'name': 'Europe (EU)',
'host': 'iaas-euapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-au': {
'name': 'Australia (AU)',
'host': 'iaas-auapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-af': {
'name': 'Africa (AF)',
'host': 'iaas-afapi.indosat.com',
'vendor': 'Indosat'
},
'bsnl-in': {
'name': 'India (IN)',
'host': 'api.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-na': {
'name': 'North America (NA)',
'host': 'usapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-eu': {
'name': 'Europe (EU)',
'host': 'euapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-au': {
'name': 'Australia (AU)',
'host': 'auapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-af': {
'name': 'Africa (AF)',
'host': 'afapi.bsnlcloud.com',
'vendor': 'BSNL'
}
}
# Default API end-point for the base connection class.
DEFAULT_REGION = 'dd-na'
BAD_CODE_XML_ELEMENTS = (
('responseCode', SERVER_NS),
('responseCode', TYPES_URN),
('result', GENERAL_NS)
)
BAD_MESSAGE_XML_ELEMENTS = (
('message', SERVER_NS),
('message', TYPES_URN),
('resultDetail', GENERAL_NS)
)
def dd_object_to_id(obj, obj_type, id_value='id'):
"""
Takes in a DD object or string and prints out it's id
This is a helper method, as many of our functions can take either an object
or a string, and we need an easy way of converting them
:param obj: The object to get the id for
:type obj: ``object``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:rtype: ``str``
"""
if isinstance(obj, obj_type):
return getattr(obj, id_value)
elif isinstance(obj, (basestring)):
return obj
else:
raise TypeError(
"Invalid type %s looking for basestring or %s"
% (type(obj).__name__, obj_type.__name__)
)
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# This is a temporary workaround.
def LooseVersion(version):
return float(version)
class NetworkDomainServicePlan(object):
ESSENTIALS = "ESSENTIALS"
ADVANCED = "ADVANCED"
class DimensionDataRawResponse(RawResponse):
pass
class DimensionDataResponse(XmlResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError(self.body)
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
body = self.parse_body()
if self.status == httplib.BAD_REQUEST:
for response_code in BAD_CODE_XML_ELEMENTS:
code = findtext(body, response_code[0], response_code[1])
if code is not None:
break
for message in BAD_MESSAGE_XML_ELEMENTS:
message = findtext(body, message[0], message[1])
if message is not None:
break
raise DimensionDataAPIException(code=code,
msg=message,
driver=self.connection.driver)
if self.status is not httplib.OK:
raise DimensionDataAPIException(code=self.status,
msg=body,
driver=self.connection.driver)
return self.body
class DimensionDataAPIException(LibcloudError):
def __init__(self, code, msg, driver):
self.code = code
self.msg = msg
self.driver = driver
def __str__(self):
return "%s: %s" % (self.code, self.msg)
def __repr__(self):
return ("<DimensionDataAPIException: code='%s', msg='%s'>" %
(self.code, self.msg))
class DimensionDataConnection(ConnectionUserAndKey):
"""
Connection class for the DimensionData driver
"""
api_path_version_1 = '/oec'
api_path_version_2 = '/caas'
api_version_1 = 0.9
# Earliest version supported
oldest_api_version = '2.2'
# Latest version supported
latest_api_version = '2.4'
# Default api version
active_api_version = '2.4'
_orgId = None
responseCls = DimensionDataResponse
rawResponseCls = DimensionDataRawResponse
allow_insecure = False
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None,
api_version=None, **conn_kwargs):
super(DimensionDataConnection, self).__init__(
user_id=user_id,
key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout,
proxy_url=proxy_url)
if conn_kwargs['region']:
self.host = conn_kwargs['region']['host']
if api_version:
if LooseVersion(api_version) < LooseVersion(
self.oldest_api_version):
msg = 'API Version specified is too old. No longer ' \
'supported. Please upgrade to the latest version {}' \
.format(self.active_api_version)
raise DimensionDataAPIException(code=None,
msg=msg,
driver=self.driver)
elif LooseVersion(api_version) > LooseVersion(
self.latest_api_version):
msg = 'Unsupported API Version. The version specified is ' \
'not release yet. Please use the latest supported ' \
'version {}' \
.format(self.active_api_version)
raise DimensionDataAPIException(code=None,
msg=msg,
driver=self.driver)
else:
# Overwrite default version using the version user specified
self.active_api_version = api_version
def add_default_headers(self, headers):
headers['Authorization'] = \
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
self.key))).decode('utf-8'))
headers['Content-Type'] = 'application/xml'
return headers
def request_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s" % (self.api_path_version_1,
self.api_version_1, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_api_2(self, path, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s/%s" % (self.api_path_version_2,
self.active_api_version, path, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def raw_request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers, raw=True)
def request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_2(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def paginated_request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET',
page_size=250):
"""
A paginated request to the MCP2.0 API
This essentially calls out to request_with_orgId_api_2 for each page
and yields the response to make a generator
This generator can be looped through to grab all the pages.
:param action: The resource to access (i.e. 'network/vlan')
:type action: ``str``
:param params: Parameters to give to the action
:type params: ``dict`` or ``None``
:param data: The data payload to be added to the request
:type data: ``str``
:param headers: Additional header to be added to the request
:type headers: ``str`` or ``dict`` or ``None``
:param method: HTTP Method for the request (i.e. 'GET', 'POST')
:type method: ``str``
:param page_size: The size of each page to be returned
Note: Max page size in MCP2.0 is currently 250
:type page_size: ``int``
"""
if params is None:
params = {}
params['pageSize'] = page_size
resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
yield resp
if len(resp) <= 0:
return
pcount = resp.get('pageCount') # pylint: disable=no-member
psize = resp.get('pageSize') # pylint: disable=no-member
pnumber = resp.get('pageNumber') # pylint: disable=no-member
while int(pcount) >= int(psize):
params['pageNumber'] = int(pnumber) + 1
resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
pcount = resp.get('pageCount') # pylint: disable=no-member
psize = resp.get('pageSize') # pylint: disable=no-member
pnumber = resp.get('pageNumber') # pylint: disable=no-member
yield resp
def get_resource_path_api_1(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_1, self.api_version_1,
self._get_orgId()))
def get_resource_path_api_2(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_2, self.active_api_version,
self._get_orgId()))
def wait_for_state(self, state, func, poll_interval=2, timeout=60, *args,
**kwargs):
"""
Wait for the function which returns a instance with field status/state
to match.
Keep polling func until one of the desired states is matched
:param state: Either the desired state (`str`) or a `list` of states
:type state: ``str`` or ``list``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:param poll_interval: The number of seconds to wait between checks
:type poll_interval: `int`
:param timeout: The total number of seconds to wait to reach a state
:type timeout: `int`
:param args: The arguments for func
:type args: Positional arguments
:param kwargs: The arguments for func
:type kwargs: Keyword arguments
:return: Result from the calling function.
"""
cnt = 0
result = None
object_state = None
while cnt < timeout / poll_interval:
result = func(*args, **kwargs)
if isinstance(result, Node):
object_state = result.state
else:
object_state = result.status
if object_state is state or str(object_state) in state:
return result
sleep(poll_interval)
cnt += 1
msg = 'Status check for object %s timed out' % (result)
raise DimensionDataAPIException(code=object_state,
msg=msg,
driver=self.driver)
def _get_orgId(self):
"""
Send the /myaccount API request to DimensionData cloud and parse the
'orgId' from the XML response object. We need the orgId to use most
of the other API functions
"""
if self._orgId is None:
body = self.request_api_1('myaccount').object
self._orgId = findtext(body, 'orgId', DIRECTORY_NS)
return self._orgId
def get_account_details(self):
"""
Get the details of this account
:rtype: :class:`DimensionDataAccountDetails`
"""
body = self.request_api_1('myaccount').object
return DimensionDataAccountDetails(
user_name=findtext(body, 'userName', DIRECTORY_NS),
full_name=findtext(body, 'fullName', DIRECTORY_NS),
first_name=findtext(body, 'firstName', DIRECTORY_NS),
last_name=findtext(body, 'lastName', DIRECTORY_NS),
email=findtext(body, 'emailAddress', DIRECTORY_NS))
class DimensionDataAccountDetails(object):
"""
Dimension Data account class details
"""
def __init__(self, user_name, full_name, first_name, last_name, email):
self.user_name = user_name
self.full_name = full_name
self.first_name = first_name
self.last_name = last_name
self.email = email
class DimensionDataStatus(object):
"""
DimensionData API pending operation status class
action, request_time, user_name, number_of_steps, update_time,
step.name, step.number, step.percent_complete, failure_reason,
"""
def __init__(self, action=None, request_time=None, user_name=None,
number_of_steps=None, update_time=None, step_name=None,
step_number=None, step_percent_complete=None,
failure_reason=None):
self.action = action
self.request_time = request_time
self.user_name = user_name
self.number_of_steps = number_of_steps
self.update_time = update_time
self.step_name = step_name
self.step_number = step_number
self.step_percent_complete = step_percent_complete
self.failure_reason = failure_reason
def __repr__(self):
return (('<DimensionDataStatus: action=%s, request_time=%s, '
'user_name=%s, number_of_steps=%s, update_time=%s, '
'step_name=%s, step_number=%s, '
'step_percent_complete=%s, failure_reason=%s>')
% (self.action, self.request_time, self.user_name,
self.number_of_steps, self.update_time, self.step_name,
self.step_number, self.step_percent_complete,
self.failure_reason))
class DimensionDataNetwork(object):
"""
DimensionData network with location.
"""
def __init__(self, id, name, description, location, private_net,
multicast, status):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.private_net = private_net
self.multicast = multicast
self.status = status
def __repr__(self):
return (('<DimensionDataNetwork: id=%s, name=%s, description=%s, '
'location=%s, private_net=%s, multicast=%s>')
% (self.id, self.name, self.description, self.location,
self.private_net, self.multicast))
class DimensionDataNetworkDomain(object):
"""
DimensionData network domain with location.
"""
def __init__(self, id, name, description, location, status, plan):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.status = status
self.plan = plan
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, name=%s, '
'description=%s, location=%s, status=%s, plan=%s>')
% (self.id, self.name, self.description, self.location,
self.status, self.plan))
class DimensionDataPublicIpBlock(object):
"""
DimensionData Public IP Block with location.
"""
def __init__(self, id, base_ip, size, location, network_domain,
status):
self.id = str(id)
self.base_ip = base_ip
self.size = size
self.location = location
self.network_domain = network_domain
self.status = status
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, base_ip=%s, '
'size=%s, location=%s, status=%s>')
% (self.id, self.base_ip, self.size, self.location,
self.status))
class DimensionDataServerCpuSpecification(object):
"""
A class that represents the specification of the CPU(s) for a
node
"""
def __init__(self, cpu_count, cores_per_socket, performance):
"""
Instantiate a new :class:`DimensionDataServerCpuSpecification`
:param cpu_count: The number of CPUs
:type cpu_count: ``int``
:param cores_per_socket: The number of cores per socket, the
recommendation is 1
:type cores_per_socket: ``int``
:param performance: The performance type, e.g. HIGHPERFORMANCE
:type performance: ``str``
"""
self.cpu_count = cpu_count
self.cores_per_socket = cores_per_socket
self.performance = performance
def __repr__(self):
return (('<DimensionDataServerCpuSpecification: '
'cpu_count=%s, cores_per_socket=%s, '
'performance=%s>')
% (self.cpu_count, self.cores_per_socket, self.performance))
class DimensionDataServerDisk(object):
"""
A class that represents the disk on a server
"""
def __init__(self, id=None, scsi_id=None, size_gb=None, speed=None,
state=None):
"""
Instantiate a new :class:`DimensionDataServerDisk`
:param id: The id of the disk
:type id: ``str``
:param scsi_id: Representation for scsi
:type scsi_id: ``int``
:param size_gb: Size of the disk
:type size_gb: ``int``
:param speed: Speed of the disk (i.e. STANDARD)
:type speed: ``str``
:param state: State of the disk (i.e. PENDING)
:type state: ``str``
"""
self.id = id
self.scsi_id = scsi_id
self.size_gb = size_gb
self.speed = speed
self.state = state
def __repr__(self):
return (('<DimensionDataServerDisk: '
'id=%s, size_gb=%s')
% (self.id, self.size_gb))
class DimensionDataServerVMWareTools(object):
"""
A class that represents the VMWareTools for a node
"""
def __init__(self, status, version_status, api_version):
"""
Instantiate a new :class:`DimensionDataServerVMWareTools` object
:param status: The status of VMWare Tools
:type status: ``str``
:param version_status: The status for the version of VMWare Tools
(i.e NEEDS_UPGRADE)
:type version_status: ``str``
:param api_version: The API version of VMWare Tools
:type api_version: ``str``
"""
self.status = status
self.version_status = version_status
self.api_version = api_version
def __repr__(self):
return (('<DimensionDataServerVMWareTools '
'status=%s, version_status=%s, '
'api_version=%s>')
% (self.status, self.version_status, self.api_version))
class DimensionDataFirewallRule(object):
"""
DimensionData Firewall Rule for a network domain
"""
def __init__(self, id, name, action, location, network_domain,
status, ip_version, protocol, source, destination,
enabled):
self.id = str(id)
self.name = name
self.action = action
self.location = location
self.network_domain = network_domain
self.status = status
self.ip_version = ip_version
self.protocol = protocol
self.source = source
self.destination = destination
self.enabled = enabled
def __repr__(self):
return (('<DimensionDataFirewallRule: id=%s, name=%s, '
'action=%s, location=%s, network_domain=%s, '
'status=%s, ip_version=%s, protocol=%s, source=%s, '
'destination=%s, enabled=%s>')
% (self.id, self.name, self.action, self.location,
self.network_domain, self.status, self.ip_version,
self.protocol, self.source, self.destination,
self.enabled))
class DimensionDataFirewallAddress(object):
"""
The source or destination model in a firewall rule
"""
def __init__(self, any_ip, ip_address, ip_prefix_size,
port_begin, port_end, address_list_id,
port_list_id):
self.any_ip = any_ip
self.ip_address = ip_address
self.ip_prefix_size = ip_prefix_size
self.port_list_id = port_list_id
self.port_begin = port_begin
self.port_end = port_end
self.address_list_id = address_list_id
self.port_list_id = port_list_id
def __repr__(self):
return (
'<DimensionDataFirewallAddress: any_ip=%s, ip_address=%s, '
'ip_prefix_size=%s, port_begin=%s, port_end=%s, '
'address_list_id=%s, port_list_id=%s>'
% (self.any_ip, self.ip_address, self.ip_prefix_size,
self.port_begin, self.port_end, self.address_list_id,
self.port_list_id))
class DimensionDataNatRule(object):
"""
An IP NAT rule in a network domain
"""
def __init__(self, id, network_domain, internal_ip, external_ip, status):
self.id = id
self.network_domain = network_domain
self.internal_ip = internal_ip
self.external_ip = external_ip
self.status = status
def __repr__(self):
return (('<DimensionDataNatRule: id=%s, status=%s>')
% (self.id, self.status))
class DimensionDataAntiAffinityRule(object):
"""
Anti-Affinity rule for DimensionData
An Anti-Affinity rule ensures that servers in the rule will
not reside on the same VMware ESX host.
"""
def __init__(self, id, node_list):
"""
Instantiate a new :class:`DimensionDataAntiAffinityRule`
:param id: The ID of the Anti-Affinity rule
:type id: ``str``
:param node_list: List of node ids that belong in this rule
:type node_list: ``list`` of ``str``
"""
self.id = id
self.node_list = node_list
def __repr__(self):
return (('<DimensionDataAntiAffinityRule: id=%s>')
% (self.id))
class DimensionDataVlan(object):
"""
DimensionData VLAN.
"""
def __init__(self, id, name, description, location, network_domain,
status, private_ipv4_range_address, private_ipv4_range_size,
ipv6_range_address, ipv6_range_size, ipv4_gateway,
ipv6_gateway):
"""
Initialize an instance of ``DimensionDataVlan``
:param id: The ID of the VLAN
:type id: ``str``
:param name: The name of the VLAN
:type name: ``str``
:param description: Plan text description of the VLAN
:type description: ``str``
:param location: The location (data center) of the VLAN
:type location: ``NodeLocation``
:param network_domain: The Network Domain that owns this VLAN
:type network_domain: :class:`DimensionDataNetworkDomain`
:param status: The status of the VLAN
:type status: :class:`DimensionDataStatus`
:param private_ipv4_range_address: The host address of the VLAN
IP space
:type private_ipv4_range_address: ``str``
:param private_ipv4_range_size: The size (e.g. '24') of the VLAN
as a CIDR range size
:type private_ipv4_range_size: ``int``
:param ipv6_range_address: The host address of the VLAN
IP space
:type ipv6_range_address: ``str``
:param ipv6_range_size: The size (e.g. '32') of the VLAN
as a CIDR range size
:type ipv6_range_size: ``int``
:param ipv4_gateway: The IPv4 default gateway address
:type ipv4_gateway: ``str``
:param ipv6_gateway: The IPv6 default gateway address
:type ipv6_gateway: ``str``
"""
self.id = str(id)
self.name = name
self.location = location
self.description = description
self.network_domain = network_domain
self.status = status
self.private_ipv4_range_address = private_ipv4_range_address
self.private_ipv4_range_size = private_ipv4_range_size
self.ipv6_range_address = ipv6_range_address
self.ipv6_range_size = ipv6_range_size
self.ipv4_gateway = ipv4_gateway
self.ipv6_gateway = ipv6_gateway
def __repr__(self):
return (('<DimensionDataVlan: id=%s, name=%s, '
'description=%s, location=%s, status=%s>')
% (self.id, self.name, self.description,
self.location, self.status))
class DimensionDataPool(object):
"""
DimensionData VIP Pool.
"""
def __init__(self, id, name, description, status, load_balance_method,
health_monitor_id, service_down_action, slow_ramp_time):
"""
Initialize an instance of ``DimensionDataPool``
:param id: The ID of the pool
:type id: ``str``
:param name: The name of the pool
:type name: ``str``
:param description: Plan text description of the pool
:type description: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param load_balance_method: The load balancer method
:type load_balance_method: ``str``
:param health_monitor_id: The ID of the health monitor
:type health_monitor_id: ``str``
:param service_down_action: Action to take when pool is down
:type service_down_action: ``str``
:param slow_ramp_time: The ramp-up time for service recovery
:type slow_ramp_time: ``int``
"""
self.id = str(id)
self.name = name
self.description = description
self.status = status
self.load_balance_method = load_balance_method
self.health_monitor_id = health_monitor_id
self.service_down_action = service_down_action
self.slow_ramp_time = slow_ramp_time
def __repr__(self):
return (('<DimensionDataPool: id=%s, name=%s, '
'description=%s, status=%s>')
% (self.id, self.name, self.description,
self.status))
class DimensionDataPoolMember(object):
"""
DimensionData VIP Pool Member.
"""
def __init__(self, id, name, status, ip, port, node_id):
"""
Initialize an instance of ``DimensionDataPoolMember``
:param id: The ID of the pool member
:type id: ``str``
:param name: The name of the pool member
:type name: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the pool member
:type ip: ``str``
:param port: The port of the pool member
:type port: ``int``
:param node_id: The ID of the associated node
:type node_id: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.port = port
self.node_id = node_id
def __repr__(self):
return (('<DimensionDataPoolMember: id=%s, name=%s, '
'ip=%s, status=%s, port=%s, node_id=%s>')
% (self.id, self.name,
self.ip, self.status, self.port,
self.node_id))
class DimensionDataVIPNode(object):
def __init__(self, id, name, status, ip, connection_limit='10000',
connection_rate_limit='10000'):
"""
Initialize an instance of :class:`DimensionDataVIPNode`
:param id: The ID of the node
:type id: ``str``
:param name: The name of the node
:type name: ``str``
:param status: The status of the node
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the node
:type ip: ``str``
:param connection_limit: The total connection limit for the node
:type connection_limit: ``int``
:param connection_rate_limit: The rate limit for the node
:type connection_rate_limit: ``int``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.connection_limit = connection_limit
self.connection_rate_limit = connection_rate_limit
def __repr__(self):
return (('<DimensionDataVIPNode: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataVirtualListener(object):
"""
DimensionData Virtual Listener.
"""
def __init__(self, id, name, status, ip):
"""
Initialize an instance of :class:`DimensionDataVirtualListener`
:param id: The ID of the listener
:type id: ``str``
:param name: The name of the listener
:type name: ``str``
:param status: The status of the listener
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the listener
:type ip: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
def __repr__(self):
return (('<DimensionDataVirtualListener: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataDefaultHealthMonitor(object):
"""
A default health monitor for a VIP (node, pool or listener)
"""
def __init__(self, id, name, node_compatible, pool_compatible):
"""
Initialize an instance of :class:`DimensionDataDefaultHealthMonitor`
:param id: The ID of the monitor
:type id: ``str``
:param name: The name of the monitor
:type name: ``str``
:param node_compatible: Is a monitor capable of monitoring nodes
:type node_compatible: ``bool``
:param pool_compatible: Is a monitor capable of monitoring pools
:type pool_compatible: ``bool``
"""
self.id = id
self.name = name
self.node_compatible = node_compatible
self.pool_compatible = pool_compatible
def __repr__(self):
return (('<DimensionDataDefaultHealthMonitor: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataPersistenceProfile(object):
"""
Each Persistence Profile declares the combination of Virtual Listener
type and protocol with which it is
compatible and whether or not it is compatible as a
Fallback Persistence Profile.
"""
def __init__(self, id, name, compatible_listeners, fallback_compatible):
"""
Initialize an instance of :class:`DimensionDataPersistenceProfile`
:param id: The ID of the profile
:type id: ``str``
:param name: The name of the profile
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
:param fallback_compatible: Is capable as a fallback profile
:type fallback_compatible: ``bool``
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
self.fallback_compatible = fallback_compatible
def __repr__(self):
return (('<DimensionDataPersistenceProfile: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataDefaultiRule(object):
"""
A default iRule for a network domain, can be applied to a listener
"""
def __init__(self, id, name, compatible_listeners):
"""
Initialize an instance of :class:`DimensionDataDefaultiRule`
:param id: The ID of the iRule
:type id: ``str``
:param name: The name of the iRule
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
def __repr__(self):
return (('<DimensionDataDefaultiRule: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataVirtualListenerCompatibility(object):
"""
A compatibility preference for a persistence profile or iRule
specifies which virtual listener types this profile or iRule can be
applied to.
"""
def __init__(self, type, protocol):
self.type = type
self.protocol = protocol
def __repr__(self):
return (('<DimensionDataVirtualListenerCompatibility: '
'type=%s, protocol=%s>')
% (self.type, self.protocol))
class DimensionDataBackupDetails(object):
"""
Dimension Data Backup Details represents information about
a targets backups configuration
"""
def __init__(self, asset_id, service_plan, status, clients=None):
"""
Initialize an instance of :class:`DimensionDataBackupDetails`
:param asset_id: Asset identification for backups
:type asset_id: ``str``
:param service_plan: The service plan for backups. i.e (Essentials)
:type service_plan: ``str``
:param status: The overall status this backup target.
i.e. (unregistered)
:type status: ``str``
:param clients: Backup clients attached to this target
:type clients: ``list`` of :class:`DimensionDataBackupClient`
"""
self.asset_id = asset_id
self.service_plan = service_plan
self.status = status
self.clients = clients
def __repr__(self):
return (('<DimensionDataBackupDetails: id=%s>')
% (self.asset_id))
class DimensionDataBackupClient(object):
"""
An object that represents a backup client
"""
def __init__(self, id, type, status,
schedule_policy, storage_policy, download_url,
alert=None, running_job=None):
"""
Initialize an instance of :class:`DimensionDataBackupClient`
:param id: Unique ID for the client
:type id: ``str``
:param type: The type of client that this client is
:type type: :class:`DimensionDataBackupClientType`
:param status: The states of this particular backup client.
i.e. (Unregistered)
:type status: ``str``
:param schedule_policy: The schedule policy for this client
NOTE: Dimension Data only sends back the name
of the schedule policy, no further details
:type schedule_policy: ``str``
:param storage_policy: The storage policy for this client
NOTE: Dimension Data only sends back the name
of the storage policy, no further details
:type storage_policy: ``str``
:param download_url: The download url for this client
:type download_url: ``str``
:param alert: The alert configured for this backup client (optional)
:type alert: :class:`DimensionDataBackupClientAlert`
:param alert: The running job for the client (optional)
:type alert: :class:`DimensionDataBackupClientRunningJob`
"""
self.id = id
self.type = type
self.status = status
self.schedule_policy = schedule_policy
self.storage_policy = storage_policy
self.download_url = download_url
self.alert = alert
self.running_job = running_job
def __repr__(self):
return (('<DimensionDataBackupClient: id=%s>')
% (self.id))
class DimensionDataBackupClientAlert(object):
"""
An alert for a backup client
"""
def __init__(self, trigger, notify_list=[]):
"""
Initialize an instance of :class:`DimensionDataBackupClientAlert`
:param trigger: Trigger type for the client i.e. ON_FAILURE
:type trigger: ``str``
:param notify_list: List of email addresses that are notified
when the alert is fired
:type notify_list: ``list`` of ``str``
"""
self.trigger = trigger
self.notify_list = notify_list
def __repr__(self):
return (('<DimensionDataBackupClientAlert: trigger=%s>')
% (self.trigger))
class DimensionDataBackupClientRunningJob(object):
"""
A running job for a given backup client
"""
def __init__(self, id, status, percentage=0):
"""
Initialize an instance of :class:`DimensionDataBackupClientRunningJob`
:param id: The unqiue ID of the job
:type id: ``str``
:param status: The status of the job i.e. Waiting
:type status: ``str``
:param percentage: The percentage completion of the job
:type percentage: ``int``
"""
self.id = id
self.percentage = percentage
self.status = status
def __repr__(self):
return (('<DimensionDataBackupClientRunningJob: id=%s>')
% (self.id))
class DimensionDataBackupClientType(object):
"""
A client type object for backups
"""
def __init__(self, type, is_file_system, description):
"""
Initialize an instance of :class:`DimensionDataBackupClientType`
:param type: The type of client i.e. (FA.Linux, MySQL, ect.)
:type type: ``str``
:param is_file_system: The name of the iRule
:type is_file_system: ``bool``
:param description: Description of the client
:type description: ``str``
"""
self.type = type
self.is_file_system = is_file_system
self.description = description
def __repr__(self):
return (('<DimensionDataBackupClientType: type=%s>')
% (self.type))
class DimensionDataBackupStoragePolicy(object):
"""
A representation of a storage policy
"""
def __init__(self, name, retention_period, secondary_location):
"""
Initialize an instance of :class:`DimensionDataBackupStoragePolicy`
:param name: The name of the storage policy i.e. 14 Day Storage Policy
:type name: ``str``
:param retention_period: How long to keep the backup in days
:type retention_period: ``int``
:param secondary_location: The secondary location i.e. Primary
:type secondary_location: ``str``
"""
self.name = name
self.retention_period = retention_period
self.secondary_location = secondary_location
def __repr__(self):
return (('<DimensionDataBackupStoragePolicy: name=%s>')
% (self.name))
class DimensionDataBackupSchedulePolicy(object):
"""
A representation of a schedule policy
"""
def __init__(self, name, description):
"""
Initialize an instance of :class:`DimensionDataBackupSchedulePolicy`
:param name: The name of the policy i.e 12AM - 6AM
:type name: ``str``
:param description: Short summary of the details of the policy
:type description: ``str``
"""
self.name = name
self.description = description
def __repr__(self):
return (('<DimensionDataBackupSchedulePolicy: name=%s>')
% (self.name))
class DimensionDataTag(object):
"""
A representation of a Tag in Dimension Data
A Tag first must have a Tag Key, then an asset is tag with
a key and an option value. Tags can be queried later to filter assets
and also show up on usage report if so desired.
"""
def __init__(self, asset_type, asset_id, asset_name,
datacenter, key, value):
"""
Initialize an instance of :class:`DimensionDataTag`
:param asset_type: The type of asset. Current asset types:
SERVER, VLAN, NETWORK_DOMAIN, CUSTOMER_IMAGE,
PUBLIC_IP_BLOCK, ACCOUNT
:type asset_type: ``str``
:param asset_id: The GUID of the asset that is tagged
:type asset_id: ``str``
:param asset_name: The name of the asset that is tagged
:type asset_name: ``str``
:param datacenter: The short datacenter name of the tagged asset
:type datacenter: ``str``
:param key: The tagged key
:type key: :class:`DimensionDataTagKey`
:param value: The tagged value
:type value: ``None`` or ``str``
"""
self.asset_type = asset_type
self.asset_id = asset_id
self.asset_name = asset_name
self.datacenter = datacenter
self.key = key
self.value = value
def __repr__(self):
return (('<DimensionDataTag: asset_name=%s, tag_name=%s, value=%s>')
% (self.asset_name, self.key.name, self.value))
class DimensionDataTagKey(object):
"""
A representation of a Tag Key in Dimension Data
A tag key is required to tag an asset
"""
def __init__(self, id, name, description,
value_required, display_on_report):
"""
Initialize an instance of :class:`DimensionDataTagKey`
:param id: GUID of the tag key
:type id: ``str``
:param name: Name of the tag key
:type name: ``str``
:param description: Description of the tag key
:type description: ``str``
:param value_required: If a value is required for this tag key
:type value_required: ``bool``
:param display_on_report: If this tag key should be displayed on
usage reports
:type display_on_report: ``bool``
"""
self.id = id
self.name = name
self.description = description
self.value_required = value_required
self.display_on_report = display_on_report
def __repr__(self):
return (('<DimensionDataTagKey: name=%s>')
% (self.name))
class DimensionDataIpAddressList(object):
"""
DimensionData IP Address list
"""
def __init__(self, id, name, description, ip_version,
ip_address_collection,
state, create_time, child_ip_address_lists=None):
""""
Initialize an instance of :class:`DimensionDataIpAddressList`
:param id: GUID of the IP Address List key
:type id: ``str``
:param name: Name of the IP Address List
:type name: ``str``
:param description: Description of the IP Address List
:type description: ``str``
:param ip_version: IP version. E.g. IPV4, IPV6
:type ip_version: ``str``
:param ip_address_collection: Collection of DimensionDataIpAddress
:type ip_address_collection: ``List``
:param state: IP Address list state
:type state: ``str``
:param create_time: IP Address List created time
:type create_time: ``date time``
:param child_ip_address_lists: List of IP address list to be included
:type child_ip_address_lists: List
of :class:'DimensionDataIpAddressList'
"""
self.id = id
self.name = name
self.description = description
self.ip_version = ip_version
self.ip_address_collection = ip_address_collection
self.state = state
self.create_time = create_time
self.child_ip_address_lists = child_ip_address_lists
def __repr__(self):
return ('<DimensionDataIpAddressList: id=%s, name=%s, description=%s, '
'ip_version=%s, ip_address_collection=%s, state=%s, '
'create_time=%s, child_ip_address_lists=%s>'
% (self.id, self.name, self.description, self.ip_version,
self.ip_address_collection, self.state, self.create_time,
self.child_ip_address_lists))
class DimensionDataChildIpAddressList(object):
"""
DimensionData Child IP Address list
"""
def __init__(self, id, name):
""""
Initialize an instance of :class:`DimensionDataChildIpAddressList`
:param id: GUID of the IP Address List key
:type id: ``str``
:param name: Name of the IP Address List
:type name: ``str``
"""
self.id = id
self.name = name
def __repr__(self):
return ('<DimensionDataChildIpAddressList: id=%s, name=%s>'
% (self.id, self.name))
class DimensionDataIpAddress(object):
"""
A representation of IP Address in Dimension Data
"""
def __init__(self, begin, end=None, prefix_size=None):
"""
Initialize an instance of :class:`DimensionDataIpAddress`
:param begin: IP Address Begin
:type begin: ``str``
:param end: IP Address end
:type end: ``str``
:param prefixSize: IP Address prefix size
:type prefixSize: ``int``
"""
self.begin = begin
self.end = end
self.prefix_size = prefix_size
def __repr__(self):
return ('<DimensionDataIpAddress: begin=%s, end=%s, prefix_size=%s>'
% (self.begin, self.end, self.prefix_size))
class DimensionDataPortList(object):
"""
DimensionData Port list
"""
def __init__(self, id, name, description, port_collection,
child_portlist_list,
state, create_time):
""""
Initialize an instance of :class:`DimensionDataPortList`
:param id: GUID of the Port List key
:type id: ``str``
:param name: Name of the Port List
:type name: ``str``
:param description: Description of the Port List
:type description: ``str``
:param port_collection: Collection of DimensionDataPort
:type port_collection: ``List``
:param child_portlist_list: Collection of DimensionDataChildPort
:type child_portlist_list: ``List``
:param state: Port list state
:type state: ``str``
:param create_time: Port List created time
:type create_time: ``date time``
"""
self.id = id
self.name = name
self.description = description
self.port_collection = port_collection
self.child_portlist_list = child_portlist_list
self.state = state
self.create_time = create_time
def __repr__(self):
return (
"<DimensionDataPortList: id=%s, name=%s, description=%s, "
"port_collection=%s, child_portlist_list=%s, state=%s, "
"create_time=%s>"
% (self.id, self.name, self.description,
self.port_collection, self.child_portlist_list, self.state,
self.create_time))
class DimensionDataChildPortList(object):
"""
DimensionData Child Port list
"""
def __init__(self, id, name):
""""
Initialize an instance of :class:`DimensionDataChildIpAddressList`
:param id: GUID of the child port list key
:type id: ``str``
:param name: Name of the child port List
:type name: ``str``
"""
self.id = id
self.name = name
def __repr__(self):
return ('<DimensionDataChildPortList: id=%s, name=%s>'
% (self.id, self.name))
class DimensionDataPort(object):
"""
A representation of Port in Dimension Data
"""
def __init__(self, begin, end=None):
"""
Initialize an instance of :class:`DimensionDataPort`
:param begin: Port Number Begin
:type begin: ``str``
:param end: Port Number end
:type end: ``str``
"""
self.begin = begin
self.end = end
def __repr__(self):
return ('<DimensionDataPort: begin=%s, end=%s>'
% (self.begin, self.end))
class DimensionDataNic(object):
"""
A representation of Network Adapter in Dimension Data
"""
def __init__(self, private_ip_v4=None, vlan=None,
network_adapter_name=None):
"""
Initialize an instance of :class:`DimensionDataNic`
:param private_ip_v4: IPv4
:type private_ip_v4: ``str``
:param vlan: Network VLAN
:type vlan: class: DimensionDataVlan or ``str``
:param network_adapter_name: Network Adapter Name
:type network_adapter_name: ``str``
"""
self.private_ip_v4 = private_ip_v4
self.vlan = vlan
self.network_adapter_name = network_adapter_name
def __repr__(self):
return ('<DimensionDataNic: private_ip_v4=%s, vlan=%s,'
'network_adapter_name=%s>'
% (self.private_ip_v4, self.vlan, self.network_adapter_name))
| apache-2.0 |
Cuuuurzel/KiPyCalc | sympy/liealgebras/weyl_group.py | 17 | 14811 | # -*- coding: utf-8 -*-
from sympy.core import Basic, Rational
from sympy.core.numbers import igcd
from .cartan_type import CartanType
from sympy.mpmath import fac
from operator import itemgetter
from itertools import groupby
from sympy.matrices import Matrix, eye
class WeylGroup(Basic):
"""
For each semisimple Lie group, we have a Weyl group. It is a subgroup of
the isometry group of the root system. Specifically, it’s the subgroup
that is generated by reflections through the hyperplanes orthogonal to
the roots. Therefore, Weyl groups are reflection groups, and so a Weyl
group is a finite Coxeter group.
"""
def __new__(cls, cartantype):
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def generators(self):
"""
This method creates the generating reflections of the Weyl group for
a given Lie algebra. For a Lie algebra of rank n, there are n
different generating reflections. This function returns them as
a list.
Example
=======
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("F4")
>>> c.generators()
['r1', 'r2', 'r3', 'r4']
"""
n = self.cartan_type.rank()
generators = []
for i in range(1, n+1):
reflection = "r"+str(i)
generators.append(reflection)
return generators
def group_order(self):
"""
This method returns the order of the Weyl group.
For types A, B, C, D, and E the order depends on
the rank of the Lie algebra. For types F and G,
the order is fixed.
Example
=======
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("D4")
>>> c.group_order()
192.0
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return fac(n+1)
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
return fac(n)*(2**n)
if self.cartan_type.series == "D":
return fac(n)*(2**(n-1))
if self.cartan_type.series == "E":
if n == 6:
return 51840
if n == 7:
return 2903040
if n == 8:
return 696729600
if self.cartan_type.series == "F":
return 1152
if self.cartan_type.series == "G":
return 12
def group_name(self):
"""
This method returns some general information about the Weyl group for
a given Lie algebra. It returns the name of the group and the elements
it acts on, if relevant.
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return "S"+str(n+1) + ": the symmetric group acting on " + str(n+1) + " elements."
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
return "The hyperoctahedral group acting on " + str(2*n) + " elements."
if self.cartan_type.series == "D":
return "The symmetry group of the " + str(n) + "-dimensional demihypercube."
if self.cartan_type.series == "E":
if n == 6:
return "The symmetry group of the 6-polytope."
if n == 7:
return "The symmetry group of the 7-polytope."
if n == 8:
return "The symmetry group of the 8-polytope."
if self.cartan_type.series == "F":
return "The symmetry group of the 24-cell, or icositetrachoron."
if self.cartan_type.series == "G":
return "D6, the dihedral group of order 12, and symmetry group of the hexagon."
def element_order(self, weylelt):
"""
This method returns the order of a given Weyl group element, which should
be specified by the user in the form of products of the generating
reflections, i.e. of the form r1*r2 etc.
For types A-F, this method current works by taking the matrix form of
the specified element, and then finding what power of the matrix is the
identity. It then returns this power.
Example
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> b = WeylGroup("B4")
>>> b.element_order('r1*r4*r2')
4
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n+1):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "D":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "E":
a = self.matrix_form(weylelt)
order = 1
while a != eye(8):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "G":
elts = list(weylelt)
reflections = elts[1::3]
m = self.delete_doubles(reflections)
while self.delete_doubles(m) != m:
m = self.delete_doubles(m)
reflections = m
if len(reflections) % 2 == 1:
return 2
elif len(reflections) == 0:
return 1
else:
if len(reflections) == 1:
return 2
else:
m = len(reflections) / 2
lcm = (6 * m)/ igcd(m, 6)
order = lcm / m
return order
if self.cartan_type.series == 'F':
a = self.matrix_form(weylelt)
order = 1
while a != eye(4):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
def delete_doubles(self, reflections):
"""
This is a helper method for determining the order of an element in the
Weyl group of G2. It takes a Weyl element and if repeated simple reflections
in it, it deletes them.
"""
counter = 0
copy = list(reflections)
for elt in copy:
if counter < len(copy)-1:
if copy[counter + 1] == elt:
del copy[counter]
del copy[counter]
counter += 1
return copy
def matrix_form(self, weylelt):
"""
This method takes input from the user in the form of products of the
generating reflections, and returns the matrix corresponding to the
element of the Weyl group. Since each element of the Weyl group is
a reflection of some type, there is a corresponding matrix representation.
This method uses the standard representation for all the generating
reflections.
Example
=======
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> f = WeylGroup("F4")
>>> f.matrix_form('r2*r3')
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1],
[0, 0, 1, 0]])
"""
elts = list(weylelt)
reflections = elts[1::3]
n = self.cartan_type.rank()
if self.cartan_type.series == 'A':
matrixform = eye(n+1)
for elt in reflections:
a = int(elt)
mat = eye(n+1)
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'D':
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a < n:
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
else:
mat[n-2, n-1] = -1
mat[n-2, n-2] = 0
mat[n-1, n-2] = -1
mat[n-1, n-1] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'G':
matrixform = eye(3)
for elt in reflections:
a = int(elt)
if a == 1:
gen1 = Matrix([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
matrixform *= gen1
else:
gen2 = Matrix([[Rational(2, 3), Rational(2, 3), -Rational(1, 3)],
[Rational(2, 3), Rational(-1, 3), Rational(2, 3)], [Rational(-1, 3),
Rational(2, 3), Rational(2, 3)]])
matrixform *= gen2
return matrixform
if self.cartan_type.series == 'F':
matrixform = eye(4)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
matrixform *= mat
elif a == 2:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
matrixform *= mat
elif a == 3:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
matrixform *= mat
else:
mat = Matrix([[Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2)],
[Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]])
matrixform *= mat
return matrixform
if self.cartan_type.series == 'E':
matrixform = eye(8)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[Rational(3, 4), Rational(1, 4), Rational(1, 4), Rational(1, 4),
Rational(1, 4), Rational(1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(3, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(3, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(3, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(3, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-3, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4)]])
matrixform *= mat
elif a == 2:
mat = eye(8)
mat[0, 0] = 0
mat[0, 1] = -1
mat[1, 0] = -1
mat[1, 1] = 0
matrixform *= mat
else:
mat = eye(8)
mat[a-3, a-3] = 0
mat[a-3, a-2] = 1
mat[a-2, a-3] = 1
mat[a-2, a-2] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'B' or self.cartan_type.series == 'C':
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a == 1:
mat[0, 0] = -1
matrixform *= mat
else:
mat[a - 2, a - 2] = 0
mat[a-2, a-1] = 1
mat[a - 1, a - 2] = 1
mat[a -1, a - 1] = 0
matrixform *= mat
return matrixform
def coxeter_diagram(self):
"""
This method returns the Coxeter diagram corresponding to a Weyl group.
The Coxeter diagram can be obtained from a Lie algebra's Dynkin diagram
by deleting all arrows; the Coxeter diagram is the undirected graph.
The vertices of the Coxeter diagram represent the generating reflections
of the Weyl group, , s_i. An edge is drawn between s_i and s_j if the order
m(i, j) of s_i*s_j is greater than two. If there is one edge, the order
m(i, j) is 3. If there are two edges, the order m(i, j) is 4, and if there
are three edges, the order m(i, j) is 6.
Example
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("B3")
>>> print(c.coxeter_diagram())
0---0===0
1 2 3
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A" or self.cartan_type.series == "D" or self.cartan_type.series == "E":
return self.cartan_type.dynkin_diagram()
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
diag = "---".join("0" for i in range(1, n)) + "===0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
if self.cartan_type.series == "F":
diag = "0---0===0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag
if self.cartan_type.series == "G":
diag = "0≡≡≡0\n1 2"
return diag
| mit |
oceanfly/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
Baz2013/blog_demo | leetcode/easy/pascal_triangle.py | 1 | 1246 | # -*- coding:utf-8 -*-
# 118. Pascal's Triangle QuestionEditorial Solution My Submissions
# Difficulty: Easy
# Contributors: Admin
# Given numRows, generate the first numRows of Pascal's triangle.
#
# For example, given numRows = 5,
# Return
#
# [
# [1],
# [1,1],
# [1,2,1],
# [1,3,3,1],
# [1,4,6,4,1]
# ]
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = []
if numRows == 0:
return res
elif numRows == 1:
res.append([1])
return res
else:
res.append([1])
res.append([1, 1])
i = 2
while i < numRows:
tmp = list()
tmp.append(1)
pre_lst = res[i - 1]
for n in range(len(pre_lst)):
if n + 1 <= len(pre_lst) - 1:
tmp.append(pre_lst[n] + pre_lst[n + 1])
else:
tmp.append(1)
res.append(tmp)
i += 1
return res
if __name__ == '__main__':
s = Solution()
print s.generate(5)
print s.generate(3)
print s.generate(2)
print s.generate(1)
print s.generate(0)
| gpl-3.0 |
gojira/tensorflow | tensorflow/contrib/coder/python/layers/entropybottleneck.py | 9 | 30102 | # -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Entropy bottleneck layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.coder.python.ops import coder_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
class EntropyBottleneck(base_layer.Layer):
"""Entropy bottleneck layer.
This layer can be used to model the entropy (the amount of information
conveyed) of the tensor passing through it. During training, this can be used
to impose a (soft) entropy constraint on its activations, limiting the amount
of information flowing through the layer. Note that this is distinct from
other types of bottlenecks, which reduce the dimensionality of the space, for
example. Dimensionality reduction does not limit the amount of information,
and does not enable efficient data compression per se.
After training, this layer can be used to compress any input tensor to a
string, which may be written to a file, and to decompress a file which it
previously generated back to a reconstructed tensor (possibly on a different
machine having access to the same model checkpoint). The entropies estimated
during training or evaluation are approximately equal to the average length of
the strings in bits.
The layer implements a flexible probability density model to estimate entropy,
which is described in the appendix of the paper (please cite the paper if you
use this code for scientific work):
"Variational image compression with a scale hyperprior"
Johannes Ballé, David Minnen, Saurabh Singh, Sung Jin Hwang, Nick Johnston
https://arxiv.org/abs/1802.01436
The layer assumes that the input tensor is at least 2D, with a batch dimension
at the beginning and a channel dimension as specified by `data_format`. The
layer trains an independent probability density model for each channel, but
assumes that across all other dimensions, the inputs are i.i.d. (independent
and identically distributed). Because the entropy (and hence, average
codelength) is a function of the densities, this assumption may have a direct
effect on the compression performance.
Because data compression always involves discretization, the outputs of the
layer are generally only approximations of its inputs. During training,
discretization is modeled using additive uniform noise to ensure
differentiability. The entropies computed during training are differential
entropies. During evaluation, the data is actually quantized, and the
entropies are discrete (Shannon entropies). To make sure the approximated
tensor values are good enough for practical purposes, the training phase must
be used to balance the quality of the approximation with the entropy, by
adding an entropy term to the training loss, as in the following example.
Here, we use the entropy bottleneck to compress the latent representation of
an autoencoder. The data vectors `x` in this case are 4D tensors in
`'channels_last'` format (for example, 16x16 pixel grayscale images).
The layer always produces exactly one auxiliary loss and one update op which
are only significant for compression and decompression. To use the compression
feature, the auxiliary loss must be minimized during or after training. After
that, the update op must be executed at least once. Here, we simply attach
them to the main training step.
Training:
```
# Build autoencoder.
x = tf.placeholder(tf.float32, shape=[None, 16, 16, 1])
y = forward_transform(x)
entropy_bottleneck = EntropyBottleneck()
y_, likelihoods = entropy_bottleneck(y, training=True)
x_ = backward_transform(y_)
# Information content (= predicted codelength) in bits of each batch element
# (note that taking the natural logarithm and dividing by `log(2)` is
# equivalent to taking base-2 logarithms):
bits = tf.reduce_sum(tf.log(likelihoods), axis=(1, 2, 3)) / -np.log(2)
# Squared difference of each batch element:
squared_error = tf.reduce_sum(tf.squared_difference(x, x_), axis=(1, 2, 3))
# The loss is a weighted sum of mean squared error and entropy (average
# information content), where the weight controls the trade-off between
# approximation error and entropy.
main_loss = 0.5 * tf.reduce_mean(squared_error) + tf.reduce_mean(bits)
# Minimize loss and auxiliary loss, and execute update op.
main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
main_step = optimizer.minimize(main_loss)
# 1e-2 is a good starting point for the learning rate of the auxiliary loss,
# assuming Adam is used.
aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-2)
aux_step = optimizer.minimize(entropy_bottleneck.losses[0])
step = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])
```
Evaluation:
```
# Build autoencoder.
x = tf.placeholder(tf.float32, shape=[None, 16, 16, 1])
y = forward_transform(x)
y_, likelihoods = EntropyBottleneck()(y, training=False)
x_ = backward_transform(y_)
# Information content (= predicted codelength) in bits of each batch element:
bits = tf.reduce_sum(tf.log(likelihoods), axis=(1, 2, 3)) / -np.log(2)
# Squared difference of each batch element:
squared_error = tf.reduce_sum(tf.squared_difference(x, x_), axis=(1, 2, 3))
# The loss is a weighted sum of mean squared error and entropy (average
# information content), where the weight controls the trade-off between
# approximation error and entropy.
loss = 0.5 * tf.reduce_mean(squared_error) + tf.reduce_mean(bits)
```
To be able to compress the bottleneck tensor and decompress it in a different
session, or on a different machine, you need three items:
- The compressed representations stored as strings.
- The shape of the bottleneck for these string representations as a `Tensor`,
as well as the number of channels of the bottleneck at graph construction
time.
- The checkpoint of the trained model that was used for compression. Note:
It is crucial that the auxiliary loss produced by this layer is minimized
during or after training, and that the update op is run after training and
minimization of the auxiliary loss, but *before* the checkpoint is saved.
Compression:
```
x = tf.placeholder(tf.float32, shape=[None, 16, 16, 1])
y = forward_transform(x)
strings = EntropyBottleneck().compress(y)
shape = tf.shape(y)[1:]
```
Decompression:
```
strings = tf.placeholder(tf.string, shape=[None])
shape = tf.placeholder(tf.int32, shape=[3])
entropy_bottleneck = EntropyBottleneck(dtype=tf.float32)
y_ = entropy_bottleneck.decompress(strings, shape, channels=5)
x_ = backward_transform(y_)
```
Here, we assumed that the tensor produced by the forward transform has 5
channels.
The above four use cases can also be implemented within the same session (i.e.
on the same `EntropyBottleneck` instance), for testing purposes, etc., by
calling the object more than once.
Arguments:
init_scale: Float. A scaling factor determining the initial width of the
probability densities. This should be chosen big enough so that the
range of values of the layer inputs roughly falls within the interval
[`-init_scale`, `init_scale`] at the beginning of training.
filters: An iterable of ints, giving the number of filters at each layer of
the density model. Generally, the more filters and layers, the more
expressive is the density model in terms of modeling more complicated
distributions of the layer inputs. For details, refer to the paper
referenced above. The default is `[3, 3, 3]`, which should be sufficient
for most practical purposes.
tail_mass: Float, between 0 and 1. The bottleneck layer automatically
determines the range of input values that should be represented based on
their frequency of occurrence. Values occurring in the tails of the
distributions will be clipped to that range during compression.
`tail_mass` determines the amount of probability mass in the tails which
is cut off in the worst case. For example, the default value of `1e-9`
means that at most 1 in a billion input samples will be clipped to the
range.
optimize_integer_offset: Boolean. Typically, the input values of this layer
are floats, which means that quantization during evaluation can be
performed with an arbitrary offset. By default, the layer determines that
offset automatically. In special situations, such as when it is known that
the layer will receive only full integer values during evaluation, it can
be desirable to set this argument to `False` instead, in order to always
quantize to full integer values.
likelihood_bound: Float. If positive, the returned likelihood values are
ensured to be greater than or equal to this value. This prevents very
large gradients with a typical entropy loss (defaults to 1e-9).
range_coder_precision: Integer, between 1 and 16. The precision of the range
coder used for compression and decompression. This trades off computation
speed with compression efficiency, where 16 is the slowest but most
efficient setting. Choosing lower values may increase the average
codelength slightly compared to the estimated entropies.
data_format: Either `'channels_first'` or `'channels_last'` (default).
trainable: Boolean. Whether the layer should be trained.
name: String. The name of the layer.
dtype: Default dtype of the layer's parameters (default of `None` means use
the type of the first input).
Read-only properties:
init_scale: See above.
filters: See above.
tail_mass: See above.
optimize_integer_offset: See above.
likelihood_bound: See above.
range_coder_precision: See above.
data_format: See above.
name: String. See above.
dtype: See above.
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and non-trainable.
updates: List of update ops of this layer. Always contains exactly one
update op, which must be run once after the last training step, before
`compress` or `decompress` is used.
losses: List of losses added by this layer. Always contains exactly one
auxiliary loss, which must be added to the training loss.
Mutable properties:
trainable: Boolean. Whether the layer should be trained.
input_spec: Optional `InputSpec` object specifying the constraints on inputs
that can be accepted by the layer.
"""
def __init__(self, init_scale=10, filters=(3, 3, 3), tail_mass=1e-9,
optimize_integer_offset=True, likelihood_bound=1e-9,
range_coder_precision=16, data_format="channels_last", **kwargs):
super(EntropyBottleneck, self).__init__(**kwargs)
self._init_scale = float(init_scale)
self._filters = tuple(int(f) for f in filters)
self._tail_mass = float(tail_mass)
if not 0 < self.tail_mass < 1:
raise ValueError(
"`tail_mass` must be between 0 and 1, got {}.".format(self.tail_mass))
self._optimize_integer_offset = bool(optimize_integer_offset)
self._likelihood_bound = float(likelihood_bound)
self._range_coder_precision = int(range_coder_precision)
self._data_format = data_format
self._channel_axis(2) # trigger ValueError early
self.input_spec = base_layer.InputSpec(min_ndim=2)
@property
def init_scale(self):
return self._init_scale
@property
def filters(self):
return self._filters
@property
def tail_mass(self):
return self._tail_mass
@property
def optimize_integer_offset(self):
return self._optimize_integer_offset
@property
def likelihood_bound(self):
return self._likelihood_bound
@property
def range_coder_precision(self):
return self._range_coder_precision
@property
def data_format(self):
return self._data_format
def _channel_axis(self, ndim):
try:
return {"channels_first": 1, "channels_last": ndim - 1}[self.data_format]
except KeyError:
raise ValueError("Unsupported `data_format` for {} layer: {}.".format(
self.__class__.__name__, self.data_format))
def _logits_cumulative(self, inputs, stop_gradient):
"""Evaluate logits of the cumulative densities.
Args:
inputs: The values at which to evaluate the cumulative densities, expected
to be a `Tensor` of shape `(channels, 1, batch)`.
stop_gradient: Boolean. Whether to add `array_ops.stop_gradient` calls so
that the gradient of the output with respect to the density model
parameters is disconnected (the gradient with respect to `inputs` is
left untouched).
Returns:
A `Tensor` of the same shape as `inputs`, containing the logits of the
cumulative densities evaluated at the given inputs.
"""
logits = inputs
for i in range(len(self.filters) + 1):
matrix = self._matrices[i]
if stop_gradient:
matrix = array_ops.stop_gradient(matrix)
logits = math_ops.matmul(matrix, logits)
bias = self._biases[i]
if stop_gradient:
bias = array_ops.stop_gradient(bias)
logits += bias
if i < len(self._factors):
factor = self._factors[i]
if stop_gradient:
factor = array_ops.stop_gradient(factor)
logits += factor * math_ops.tanh(logits)
return logits
def build(self, input_shape):
"""Builds the layer.
Creates the variables for the network modeling the densities, creates the
auxiliary loss estimating the median and tail quantiles of the densities,
and then uses that to create the probability mass functions and the update
op that produces the discrete cumulative density functions used by the range
coder.
Args:
input_shape: Shape of the input tensor, used to get the number of
channels.
Raises:
ValueError: if `input_shape` doesn't specify the length of the channel
dimension.
"""
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._channel_axis(input_shape.ndims)
channels = input_shape[channel_axis].value
if channels is None:
raise ValueError("The channel dimension of the inputs must be defined.")
self.input_spec = base_layer.InputSpec(
ndim=input_shape.ndims, axes={channel_axis: channels})
filters = (1,) + self.filters + (1,)
scale = self.init_scale ** (1 / (len(self.filters) + 1))
# Create variables.
self._matrices = []
self._biases = []
self._factors = []
for i in range(len(self.filters) + 1):
init = np.log(np.expm1(1 / scale / filters[i + 1]))
matrix = self.add_variable(
"matrix_{}".format(i), dtype=self.dtype,
shape=(channels, filters[i + 1], filters[i]),
initializer=init_ops.Constant(init))
matrix = nn.softplus(matrix)
self._matrices.append(matrix)
bias = self.add_variable(
"bias_{}".format(i), dtype=self.dtype,
shape=(channels, filters[i + 1], 1),
initializer=init_ops.RandomUniform(-.5, .5))
self._biases.append(bias)
if i < len(self.filters):
factor = self.add_variable(
"factor_{}".format(i), dtype=self.dtype,
shape=(channels, filters[i + 1], 1),
initializer=init_ops.Zeros())
factor = math_ops.tanh(factor)
self._factors.append(factor)
# To figure out what range of the densities to sample, we need to compute
# the quantiles given by `tail_mass / 2` and `1 - tail_mass / 2`. Since we
# can't take inverses of the cumulative directly, we make it an optimization
# problem:
# `quantiles = argmin(|logit(cumulative) - target|)`
# where `target` is `logit(tail_mass / 2)` or `logit(1 - tail_mass / 2)`.
# Taking the logit (inverse of sigmoid) of the cumulative makes the
# representation of the right target more numerically stable.
# Numerically stable way of computing logits of `tail_mass / 2`
# and `1 - tail_mass / 2`.
target = np.log(2 / self.tail_mass - 1)
# Compute lower and upper tail quantile as well as median.
target = constant_op.constant([-target, 0, target], dtype=self.dtype)
def quantiles_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
assert tuple(shape[1:]) == (1, 3)
init = constant_op.constant(
[[[-self.init_scale, 0, self.init_scale]]], dtype=dtype)
return array_ops.tile(init, (shape[0], 1, 1))
quantiles = self.add_variable(
"quantiles", shape=(channels, 1, 3), dtype=self.dtype,
initializer=quantiles_initializer)
logits = self._logits_cumulative(quantiles, stop_gradient=True)
loss = math_ops.reduce_sum(abs(logits - target))
self.add_loss(loss, inputs=None)
# Save medians for `call`, `compress`, and `decompress`.
self._medians = quantiles[:, :, 1:2]
if not self.optimize_integer_offset:
self._medians = math_ops.round(self._medians)
# Largest distance observed between lower tail quantile and median,
# or between median and upper tail quantile.
minima = math_ops.reduce_max(self._medians - quantiles[:, :, 0:1])
maxima = math_ops.reduce_max(quantiles[:, :, 2:3] - self._medians)
minmax = math_ops.maximum(minima, maxima)
minmax = math_ops.ceil(minmax)
minmax = math_ops.maximum(minmax, 1)
# Sample the density up to `minmax` around the median.
samples = math_ops.range(-minmax, minmax + 1, dtype=self.dtype)
samples += self._medians
half = constant_op.constant(.5, dtype=self.dtype)
# We strip the sigmoid from the end here, so we can use the special rule
# below to only compute differences in the left tail of the sigmoid.
# This increases numerical stability (see explanation in `call`).
lower = self._logits_cumulative(samples - half, stop_gradient=True)
upper = self._logits_cumulative(samples + half, stop_gradient=True)
# Flip signs if we can move more towards the left tail of the sigmoid.
sign = -math_ops.sign(math_ops.add_n([lower, upper]))
pmf = abs(math_ops.sigmoid(sign * upper) - math_ops.sigmoid(sign * lower))
# Add tail masses to first and last bin of pmf, as we clip values for
# compression, meaning that out-of-range values get mapped to these bins.
pmf = array_ops.concat([
math_ops.add_n([pmf[:, 0, :1], math_ops.sigmoid(lower[:, 0, :1])]),
pmf[:, 0, 1:-1],
math_ops.add_n([pmf[:, 0, -1:], math_ops.sigmoid(-upper[:, 0, -1:])]),
], axis=-1)
self._pmf = pmf
cdf = coder_ops.pmf_to_quantized_cdf(
pmf, precision=self.range_coder_precision)
def cdf_getter(*args, **kwargs):
del args, kwargs # ignored
return variable_scope.get_variable(
"quantized_cdf", dtype=dtypes.int32, initializer=cdf,
trainable=False, validate_shape=False, collections=())
# Need to provide a fake shape here since add_variable insists on it.
self._quantized_cdf = self.add_variable(
"quantized_cdf", shape=(channels, 1), dtype=dtypes.int32,
getter=cdf_getter, trainable=False)
update_op = state_ops.assign(
self._quantized_cdf, cdf, validate_shape=False)
self.add_update(update_op, inputs=None)
super(EntropyBottleneck, self).build(input_shape)
def call(self, inputs, training):
"""Pass a tensor through the bottleneck.
Args:
inputs: The tensor to be passed through the bottleneck.
training: Boolean. If `True`, returns a differentiable approximation of
the inputs, and their likelihoods under the modeled probability
densities. If `False`, returns the quantized inputs and their
likelihoods under the corresponding probability mass function. These
quantities can't be used for training, as they are not differentiable,
but represent actual compression more closely.
Returns:
values: `Tensor` with the same shape as `inputs` containing the perturbed
or quantized input values.
likelihood: `Tensor` with the same shape as `inputs` containing the
likelihood of `values` under the modeled probability distributions.
Raises:
ValueError: if `inputs` has different `dtype` or number of channels than
a previous set of inputs the model was invoked with earlier.
"""
inputs = ops.convert_to_tensor(inputs)
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
half = constant_op.constant(.5, dtype=self.dtype)
# Convert to (channels, 1, batch) format by commuting channels to front
# and then collapsing.
order = list(range(ndim))
order.pop(channel_axis)
order.insert(0, channel_axis)
values = array_ops.transpose(inputs, order)
shape = array_ops.shape(values)
values = array_ops.reshape(values, (shape[0], 1, -1))
# Add noise or quantize.
if training:
noise = random_ops.random_uniform(array_ops.shape(values), -half, half)
values = math_ops.add_n([values, noise])
elif self.optimize_integer_offset:
values = math_ops.round(values - self._medians) + self._medians
else:
values = math_ops.round(values)
# Evaluate densities.
# We can use the special rule below to only compute differences in the left
# tail of the sigmoid. This increases numerical stability: sigmoid(x) is 1
# for large x, 0 for small x. Subtracting two numbers close to 0 can be done
# with much higher precision than subtracting two numbers close to 1.
lower = self._logits_cumulative(values - half, stop_gradient=False)
upper = self._logits_cumulative(values + half, stop_gradient=False)
# Flip signs if we can move more towards the left tail of the sigmoid.
sign = -math_ops.sign(math_ops.add_n([lower, upper]))
sign = array_ops.stop_gradient(sign)
likelihood = abs(
math_ops.sigmoid(sign * upper) - math_ops.sigmoid(sign * lower))
if self.likelihood_bound > 0:
likelihood_bound = constant_op.constant(
self.likelihood_bound, dtype=self.dtype)
# TODO(jballe): Override gradients.
likelihood = math_ops.maximum(likelihood, likelihood_bound)
# Convert back to input tensor shape.
order = list(range(1, ndim))
order.insert(channel_axis, 0)
values = array_ops.reshape(values, shape)
values = array_ops.transpose(values, order)
likelihood = array_ops.reshape(likelihood, shape)
likelihood = array_ops.transpose(likelihood, order)
if not context.executing_eagerly():
values_shape, likelihood_shape = self.compute_output_shape(inputs.shape)
values.set_shape(values_shape)
likelihood.set_shape(likelihood_shape)
return values, likelihood
def compress(self, inputs):
"""Compress inputs and store their binary representations into strings.
Args:
inputs: `Tensor` with values to be compressed.
Returns:
String `Tensor` vector containing the compressed representation of each
batch element of `inputs`.
"""
with ops.name_scope(self._name_scope()):
inputs = ops.convert_to_tensor(inputs)
if not self.built:
# Check input assumptions set before layer building, e.g. input rank.
self._assert_input_compatibility(inputs)
if self.dtype is None:
self._dtype = inputs.dtype.base_dtype.name
self.build(inputs.shape)
# Check input assumptions set after layer building, e.g. input shape.
if not context.executing_eagerly():
self._assert_input_compatibility(inputs)
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
# Tuple of slices for expanding dimensions of tensors below.
slices = ndim * [None] + [slice(None)]
slices[channel_axis] = slice(None)
slices = tuple(slices)
# Expand dimensions of CDF to input dimensions, keeping the channels along
# the right dimension.
cdf = self._quantized_cdf[slices[1:]]
num_levels = array_ops.shape(cdf)[-1] - 1
# Bring inputs to the right range by centering the range on the medians.
half = constant_op.constant(.5, dtype=self.dtype)
medians = array_ops.squeeze(self._medians, [1, 2])
offsets = (math_ops.cast(num_levels // 2, self.dtype) + half) - medians
# Expand offsets to input dimensions and add to inputs.
values = inputs + offsets[slices[:-1]]
# Clip to range and cast to integers. Because we have added .5 above, and
# all values are positive, the cast effectively implements rounding.
values = math_ops.maximum(values, half)
values = math_ops.minimum(
values, math_ops.cast(num_levels, self.dtype) - half)
values = math_ops.cast(values, dtypes.int16)
def loop_body(tensor):
return coder_ops.range_encode(
tensor, cdf, precision=self.range_coder_precision)
strings = functional_ops.map_fn(
loop_body, values, dtype=dtypes.string, back_prop=False)
if not context.executing_eagerly():
strings.set_shape(inputs.shape[:1])
return strings
def decompress(self, strings, shape, channels=None):
"""Decompress values from their compressed string representations.
Args:
strings: A string `Tensor` vector containing the compressed data.
shape: A `Tensor` vector of int32 type. Contains the shape of the tensor
to be decompressed, excluding the batch dimension.
channels: Integer. Specifies the number of channels statically. Needs only
be set if the layer hasn't been built yet (i.e., this is the first input
it receives).
Returns:
The decompressed `Tensor`. Its shape will be equal to `shape` prepended
with the batch dimension from `strings`.
Raises:
ValueError: If the length of `shape` isn't available at graph construction
time.
"""
with ops.name_scope(self._name_scope()):
strings = ops.convert_to_tensor(strings)
shape = ops.convert_to_tensor(shape)
if self.built:
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
if channels is None:
channels = self.input_spec.axes[channel_axis]
else:
if not (shape.shape.is_fully_defined() and shape.shape.ndims == 1):
raise ValueError("`shape` must be a vector with known length.")
ndim = shape.shape[0].value + 1
channel_axis = self._channel_axis(ndim)
input_shape = ndim * [None]
input_shape[channel_axis] = channels
self.build(input_shape)
# Tuple of slices for expanding dimensions of tensors below.
slices = ndim * [None] + [slice(None)]
slices[channel_axis] = slice(None)
slices = tuple(slices)
# Expand dimensions of CDF to input dimensions, keeping the channels along
# the right dimension.
cdf = self._quantized_cdf[slices[1:]]
num_levels = array_ops.shape(cdf)[-1] - 1
def loop_body(string):
return coder_ops.range_decode(
string, shape, cdf, precision=self.range_coder_precision)
outputs = functional_ops.map_fn(
loop_body, strings, dtype=dtypes.int16, back_prop=False)
outputs = math_ops.cast(outputs, self.dtype)
medians = array_ops.squeeze(self._medians, [1, 2])
offsets = math_ops.cast(num_levels // 2, self.dtype) - medians
outputs -= offsets[slices[:-1]]
if not context.executing_eagerly():
outputs_shape = ndim * [None]
outputs_shape[0] = strings.shape[0]
outputs_shape[channel_axis] = channels
outputs.set_shape(outputs_shape)
return outputs
def visualize(self):
"""Multi-channel visualization of densities as images.
Creates and returns an image summary visualizing the current probabilty
density estimates. The image contains one row for each channel. Within each
row, the pixel intensities are proportional to probability values, and each
row is centered on the median of the corresponding distribution.
Returns:
The created image summary.
"""
with ops.name_scope(self._name_scope()):
image = self._pmf
image *= 255 / math_ops.reduce_max(image, axis=1, keepdims=True)
image = math_ops.cast(image + .5, dtypes.uint8)
image = image[None, :, :, None]
return summary.image("pmf", image, max_outputs=1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
return input_shape, input_shape
| apache-2.0 |
nevillegrech/stdl | src/STDL/Tests/DependsOnTest.py | 1 | 4539 | import unittest
import sys
sys.path=['..'] + sys.path
from Parser import Parser # code from module you're testing
from DependsOn import *
from Exceptions import *
class SimpleTests(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.parser = Parser()
self.dependsOn = DependsOn()
def testPopulate1(self):
cut=self.dependsOn
cut.tokens=[]
cut.populate()
assert len(cut)==0, len(cut)
assert not cut
def testPopulate2(self):
cut=self.dependsOn
test="""\
dependsOn x1,x2,y1,y2:
<%
c1=new ArrayList();
c1.add(new Point(%x1%,%y1%));
c1.add(new Point(%x2%,%y2%));
%>
<% %>
out:
returns >= (x1+y1) - (x2-y2)
error:
throws > 3"""
cut.tokens=self.parser.dependsOn.parseString(test)[0]
cut.populate()
assert len(cut.subPartitions)==2
assert len(cut)==4,len(cut)
assert len(cut.outPartitionCheck)==1
assert len(cut.errorPartitionCheck)==1
def testPopulateErr(self):
cut=self.dependsOn
test="""\
dependsOn x1,x2,y1,y2:
<%
c1=new ArrayList();
c1.add(new Point(%x1%,%y1%));
c1.add(new Point(%x2%,%y2%));
%>
error:
throws > 3
returns < 2"""
cut.tokens=self.parser.dependsOn.parseString(test)[0]
self.assertRaises(SemanticException,cut.populate)
def getTestSuite1(self):
#Mock Objects
out1=PartitionCheck()
out1.notEmpty, out1.throws, out1.valid, out1.out=True, False, True, True
check1=PartitionCheckItem()
check1.addReturns, check1.code, check1.comparator = True, 3, '>'
check2=PartitionCheckItem()
check2.addReturns, check2.code, check2.comparator = True, 4, '>'
out1.checkItems=[check1,check2]
out2=PartitionCheck()
out2.notEmpty, out2.throws, out2.valid, out2.out=True, False, True, True
out2.checkItems=[check2]
check1=PartitionCheckItem()
check1.addReturns, check1.code, check1.comparator = True, 2, '<'
check2=PartitionCheckItem()
check2.addReturns, check2.code, check2.comparator = True, 4, '<'
dic1=[out1,TestCaseValue(5,0,True,'a'),TestCaseValue(3,0,True,'b'),TestCaseValue(4,0,True,'c')]
dic2=[out2,TestCaseValue(5,0,True,'a'),TestCaseValue(4,0,True,'b'),TestCaseValue(2,1,True,'c')]
return [dic1,dic2]
def testPutValues1(self):
testSuite=self.getTestSuite1()
#DependsOn object
cut=self.dependsOn
test='''\
dependsOn a,b:
d==34
d<a + b
out:
returns > d
'''
cut.inputName='d'
cut.tokens=self.parser.dependsOn.parseString(test)[0]
cut.populate()
cut.valueStart=2
testSuite[0].append(TestCaseValue(index=3))
testSuite[1].append(TestCaseValue(index=3))
cut.putValues(testSuite[0],4)
cut.putValues(testSuite[1],4)
assert testSuite[0][4].value==7,testSuite[0][4]
assert testSuite[1][4].value==8,testSuite[1][4]
assert len(testSuite[0][0].checkItems)==3,len(testSuite[0][0].checkItems)
assert len(testSuite[1][0].checkItems)==2,len(testSuite[1][0].checkItems)
def testPutValues2(self):
testSuite=self.getTestSuite1()
#DependsOn object
cut=self.dependsOn
test='''\
dependsOn a,b:
d<a + b
d==a+2
d==b-2
out:
returns > d
error:
returns < 0
returns < b - a
'''
cut.inputName='d'
cut.tokens=self.parser.dependsOn.parseString(test)[0]
cut.populate()
cut.valueStart=2
testSuite[0].append(TestCaseValue(index=3))
testSuite[1].append(TestCaseValue(index=4))
cut.putValues(testSuite[0],4)
cut.putValues(testSuite[1],4)
assert testSuite[0][4].value==7,testSuite[0][4]
assert testSuite[1][4].value==2,testSuite[1][4]
#Check out partition check here
assert len(testSuite[0][0].checkItems)==3
assert len(testSuite[1][0].checkItems)==2
if __name__ == "__main__":
unittest.main() # run all tests | epl-1.0 |
SukkoPera/Arduino-Sensoria | python/server2.py | 1 | 1152 | #!/usr/bin/env python
import time
import server
import argparse
DEFAULT_NAME = "Server2"
class OutdoorSensor (server.TemperatureSensor):
def __init__ (self):
super (OutdoorSensor, self).__init__ ("OS", "Outdoor Sensor", "20171127")
class OutdoorLight (server.ControlledRelayActuator):
def __init__ (self):
super (OutdoorLight, self).__init__ ("KF", "Outdoor Light", "20171127")
parser = argparse.ArgumentParser ( description = 'Simulate some Sensoria transducers')
parser.add_argument ('-n', '--name', help = 'Server name')
parser.add_argument ('-p', '--port', type = int, default = None, help = 'UDP port to listen on')
parser.add_argument ('-a', '--advertise', type = int, default = None, help = 'Interval between server advertisement messages', metavar = "SECONDS")
args = parser.parse_args ()
os = OutdoorSensor ()
ol = OutdoorLight ()
if args.port:
listener = server.CommandListener (args.name, args.port)
else:
listener = server.CommandListener (args.name)
listener.register_sensor (os)
listener.register_sensor (ol)
if args.advertise:
listener.setAdvertiseInterval (args.advertise)
listener.start ()
while True:
time.sleep (1)
| gpl-3.0 |
ytc301/ec2-auto-scaler | providers/aws.py | 3 | 7385 | import logging
from datetime import timedelta
from datetime import datetime
import time
from boto import ec2
from boto.ec2 import cloudwatch
from errors import ScaleError, InstanceLaunchTimeOut, SpotRequestTimeOut
from providers import Providers
class AWS(Providers):
def __init__(self, access_key_id, secret_access_key, region_name):
self._ec2_conn = ec2.connect_to_region(region_name,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
logging.info('Initialized aws connection to %s' % region_name)
self._cloudwatch_conn = cloudwatch.connect_to_region(region_name,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
logging.info('Initialized cloud watch connection to %s' % region_name)
self._region_name = region_name
def get_connection(self):
return self._ec2_conn
def get_cloudwatch_connection(self):
return self._cloudwatch_conn
def get_instances(self, security_group=None):
conn = self.get_connection()
reservations = conn.get_all_instances()
instances = [inst for resv in reservations
for inst in resv.instances
if inst.state == 'running']
logging.info('Found %s running instances' % len(instances))
if security_group is not None:
logging.info('looking for instances in sg:%s...' % security_group)
instances_in_security_group = []
for inst in instances:
groups = []
for group in inst.groups:
groups.append(group.name)
if security_group in groups:
instances_in_security_group.append(inst)
logging.info('Found %s instances' % len(instances_in_security_group))
return instances_in_security_group
def get_instance_by_id(self, id):
conn = self.get_connection()
reservations = conn.get_all_instances([id])
for resv in reservations:
for instance in resv.instances:
return instance
def wait_for_run(self, instance, timeout=60, interval=5):
trial = timeout / interval
logging.info('Waiting for instance to launch...')
for _ in xrange(trial):
instance.update()
logging.info('Checking... Current State: %s', instance.state)
if instance.state == 'running':
logging.info('Instance running')
break
time.sleep(interval)
else:
logging.error('Cancelling launch due to time out.')
instance.terminate()
raise InstanceLaunchTimeOut()
return instance
def launch_instance(self, instance_properties):
conn = self.get_connection()
resv = conn.run_instances(
instance_properties.ami,
instance_type=instance_properties.type,
security_groups=[instance_properties.security_group],
placement=instance_properties.availability_zone,
key_name=instance_properties.key_pair_name)
for instance in resv.instances:
self.wait_for_run(instance)
conn.create_tags([instance.id],
{'Name': 'auto-' + str(datetime.now())})
instance.update()
return instance
def get_spot_request_by_id(self, id):
conn = self.get_connection()
requests = conn.get_all_spot_instance_requests([id])
for request in requests:
return request
def spot_price(self, instance_properties, hours=6):
conn = self.get_connection()
prices = conn.get_spot_price_history(
start_time=(datetime.utcnow() -
timedelta(hours=hours)).isoformat(),
end_time=datetime.utcnow().isoformat(),
instance_type=instance_properties.type,
product_description='Linux/UNIX',
availability_zone=instance_properties.availability_zone)
spot_price = sum(price.price for price in prices) / len(prices)
logging.info('Spot price seems to be: %s' % spot_price)
return spot_price
def wait_for_fulfill(self, request, timeout=3000, interval=15):
trial = timeout / interval
logging.info('Waiting for request to complete...')
for _ in xrange(trial):
request = self.get_spot_request_by_id(request.id)
logging.info('Checking... Current State: %s', request.state)
if request.state == 'active':
logging.info('Spot request active')
break
time.sleep(interval)
else:
logging.error('Cancelling spot request due to time out.')
request.cancel()
raise SpotRequestTimeOut()
return request
def launch_spot_instance(self, instance_properties):
conn = self.get_connection()
price = self.spot_price(instance_properties) * 3
logging.info('Requesting spot instance with bid %s ' % price)
requests = conn.request_spot_instances(
price=price,
image_id=instance_properties.ami,
count=1,
instance_type=instance_properties.type,
security_groups=[instance_properties.security_group],
placement=instance_properties.availability_zone,
key_name=instance_properties.key_pair_name)
for request in requests:
request = self.wait_for_fulfill(request)
instance = self.get_instance_by_id(request.instance_id)
self.wait_for_run(instance)
conn.create_tags([instance.id],
{'Name': 'auto-' + str(datetime.now())})
instance.update()
return instance
def cpu_utilization(self, instances, minutes=10):
logging.info('In cpu_utilization()')
logging.info('Getting cloudwatch connection')
conn = self.get_cloudwatch_connection()
stat_sum = 0.0
logging.info('Getting CPU Utilization for instances in list')
for instance in instances:
stats = conn.get_metric_statistics(
period=60,
start_time=datetime.utcnow() -
timedelta(minutes=minutes + 5),
end_time=datetime.utcnow(),
metric_name='CPUUtilization',
namespace='AWS/EC2',
statistics=['Average'],
dimensions={'InstanceId': instance.id})
if stats:
stat_sum += sum(stat['Average'] for stat in stats) / len(stats)
else:
raise ScaleError('Stat seems empty.')
try:
avg_cluster_utilization = stat_sum / len(instances)
except ZeroDivisionError:
raise ScaleError('Cluster has no nodes')
logging.info('Avg cluster utilization is %s' % avg_cluster_utilization)
return avg_cluster_utilization | mit |
sitigrema/sitigrema.github.io | build.py | 1 | 10927 | #!/usr/bin/env python3
import os
import shutil
import json
import yaml
from PIL import Image
from nxtools import *
class GremaProduct():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
@property
def data_dir(self):
return self.parent.data_dir
@property
def site_dir(self):
return self.parent.site_dir
@property
def data_path(self):
return os.path.join(self.data_dir, self.parent.parent.slug, self.parent.slug, self.slug + ".txt")
@property
def image_path(self):
return os.path.join(self.data_dir, self.parent.parent.slug, self.parent.slug, self.slug + ".jpg")
@property
def has_image(self):
return os.path.exists(self.image_path)
@property
def meta(self):
group_slug = self.parent.slug
cat_slug = self.parent.parent.slug
return {
"slug" : self.slug,
"title" : self.title,
"group_slug" : group_slug,
"group_title" : self.parent.title,
"cat_slug" : cat_slug,
"cat_title" : self.parent.parent.title,
"has_image" : self.has_image,
"image" : os.path.join("/products", cat_slug, group_slug, "{}.jpg".format(self.slug)) if self.has_image else "false"
}
def build(self, root_dir):
#output_dir = os.path.join(self.site_dir, "products", self.meta["cat_slug"], self.meta["group_slug"])
if not os.path.exists(self.data_path):
logging.warning("{} data file does not exist".format(self.data_path))
return
# read description and pricelist
description = ""
description_done = False
product_text = ""
pricelist = []
for pline in open(self.data_path).readlines():
r = pline.split(":")
if len(r) == 2 and r[1].strip().isdigit():
pricelist.append(r)
continue
if pline.strip() == "":
description_done = True
if not description_done:
description += pline
product_text += pline
description = description.replace("\n", "")
# write file
with open(os.path.join(root_dir, self.meta["slug"] + ".md"), "w") as f:
f.write("---\nlayout: product\n")
for key in self.meta:
f.write("{} : {}\n".format(key, self.meta[key]))
f.write("description : {}\n".format(description))
if pricelist:
f.write("pricing:\n")
for v, c in pricelist:
f.write(" - variant : {}\n".format(v.strip()))
f.write(" price : {}\n".format(c.strip()))
f.write("---\n")
f.write("\n{}\n\n".format(product_text.strip()))
# create images
if self.has_image:
original_image = Image.open(self.image_path)
image_full_path = os.path.join(root_dir, "{}.jpg".format(self.slug))
image_thumb_path = os.path.join(root_dir, "{}_tn.jpg".format(self.slug))
if os.path.exists(image_full_path):
image_full = original_image.resize((800, 500), Image.ANTIALIAS)
image_full.save(image_full_path)
if not os.path.exists(image_thumb_path):
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
class GremaProductGroup():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
self.products = []
def get_product(self, query):
for product in self.products:
if product.title == query or product.slug == query:
return product
@property
def description(self):
return "TODO: group description"
@property
def data_dir(self):
return self.parent.data_dir
@property
def group_dir(self):
return os.path.join(self.data_dir, self.parent.slug, self.slug)
@property
def site_dir(self):
return self.parent.site_dir
@property
def meta(self):
return {
"title" : self.title,
"slug" : self.slug,
"group_slug" : self.slug, # kvuli zvyraznovani v sidebaru
"cat_slug" : self.parent.slug,
"has_index" : os.path.exists(os.path.join(self.group_dir, "index.txt")),
"has_image" : os.path.exists(os.path.join(self.group_dir, "index.jpg"))
}
@property
def map(self):
result = {key : self.meta[key] for key in self.meta}
result["products"] = [product.meta for product in self.products]
return result
def build(self, root_dir):
group_dir = os.path.join(root_dir, self.slug)
if not os.path.exists(group_dir):
os.makedirs(group_dir)
# Create group index page
with open(os.path.join(group_dir, "index.md"), "w") as f:
f.write("---\nlayout: product_group\n")
for key in self.meta:
f.write("{} : {}\n".format(key, self.meta[key]))
f.write("products:\n")
for product in self.products:
f.write(" - slug: {}\n".format(product.slug))
f.write(" title: {}\n".format(product.title))
f.write(" has_image: {}\n".format(product.has_image))
f.write("---\n\n")
index_path = os.path.join(self.data_dir, self.parent.slug, self.slug, "index.txt")
if os.path.exists(index_path):
f.write(open(index_path).read())
# Convert index image
index_image_path = os.path.join(self.data_dir, self.parent.slug, self.slug, "index.jpg")
if os.path.exists(index_image_path):
original_image = Image.open(index_image_path)
image_full_path = os.path.join(group_dir, "index.jpg")
image_thumb_path = os.path.join(group_dir, "index_tn.jpg")
image_full = original_image.resize((800, 500), Image.ANTIALIAS)
image_full.save(image_full_path)
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
# Build products
for product in self.products:
product.build(group_dir)
class GremaCategory():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
self.load_groups()
def get_product(self, query):
for group in self.groups:
product = group.get_product(query)
if product:
return product
@property
def data_dir(self):
return self.parent.data_dir
@property
def site_dir(self):
return self.parent.site_dir
@property
def map(self):
return {
"title" : self.title,
"slug" : self.slug,
"groups" : [group.map for group in self.groups if (group.products or group.meta["has_index"])]
}
def load_groups(self):
self.groups = []
index_path = os.path.join(self.data_dir, "index-{}.yml".format(self.slug))
if not os.path.exists(index_path):
logging.error("{} does not exist".format(index_path))
return
data = yaml.safe_load(open(index_path))
if not data:
logging.error("No data in {}".format(index_path))
return
for group_title in data.keys():
logging.debug("Creating category {}".format(group_title))
group = GremaProductGroup(self, group_title)
if data[group_title]:
for product_title in data[group_title]:
product = GremaProduct(group, product_title)
group.products.append(product)
self.groups.append(group)
def build(self, root_dir):
category_dir = os.path.join(root_dir, self.slug)
if not os.path.exists(category_dir):
os.makedirs(category_dir)
for group in self.groups:
group.build(category_dir)
class GremaSite():
def __init__(self):
self.data_dir = "_source"
self.site_dir = "."
self.load_categories()
def get_product(self, query):
for category in self.categories:
product = category.get_product(query)
if product:
return product
def load_categories(self):
self.categories = []
index_path = os.path.join(self.data_dir, "index.yml")
if not os.path.exists(index_path):
return
for category_title in yaml.safe_load(open(index_path))["categories"]:
category_title = to_unicode(category_title)
self.categories.append(GremaCategory(self, category_title))
def build(self):
product_map = []
root_dir = os.path.join(self.site_dir, "products")
for category in self.categories:
logging.info("Creating category {}".format(category.title))
category.build(root_dir)
cmap = category.map
if cmap["groups"]:
product_map.append(cmap)
product_map_path = os.path.join(self.site_dir, "_data", "products.yml")
with open(product_map_path, 'w') as outfile:
outfile.write(
yaml.dump(product_map)
)
with open("data.json","w") as f:
json.dump(product_map, f)
# Default thumbnail
original_image = Image.open(os.path.join(self.data_dir, "default.png"))
image_full_path = os.path.join(self.site_dir, "static", "default.jpg")
image_thumb_path = os.path.join(self.site_dir, "static", "default_tn.jpg")
image_full = original_image.resize((640, 400), Image.ANTIALIAS)
image_full.save(image_full_path)
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
def import_products(site, data_dir):
for fname in os.listdir(data_dir):
if os.path.splitext(fname)[1] != ".txt":
continue
product_source_path = os.path.join(data_dir, fname)
base_name = get_base_name(fname)
image_source_path = os.path.join(data_dir, base_name + ".jpg")
product = site.get_product(base_name)
if not product:
continue
product_dir = os.path.dirname(product.data_path)
if not os.path.exists(product_dir):
os.makedirs(product_dir)
shutil.copy2(product_source_path, product.data_path)
if os.path.exists(image_source_path):
shutil.copy2(image_source_path, product.image_path)
if __name__ == "__main__":
grema = GremaSite()
grema.build()
| mit |
wrouesnel/ansible | lib/ansible/module_utils/cloudstack.py | 10 | 23642 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, René Moser <[email protected]>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import time
from ansible.module_utils._text import to_text, to_native
try:
from cs import CloudStack, CloudStackException, read_config
HAS_LIB_CS = True
except ImportError:
HAS_LIB_CS = False
CS_HYPERVISORS = [
'KVM', 'kvm',
'VMware', 'vmware',
'BareMetal', 'baremetal',
'XenServer', 'xenserver',
'LXC', 'lxc',
'HyperV', 'hyperv',
'UCS', 'ucs',
'OVM', 'ovm',
'Simulator', 'simulator',
]
if sys.version_info > (3,):
long = int
def cs_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')),
api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD')),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT')),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
)
def cs_required_together():
return [['api_key', 'api_secret']]
class AnsibleCloudStack:
def __init__(self, module):
if not HAS_LIB_CS:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
'diff': {
'before': dict(),
'after': dict()
}
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
# these keys will be compared case sensitive in self.has_changed()
self.case_sensitive_keys = [
'id',
'displaytext',
'displayname',
'description',
]
self.module = module
self._cs = None
# Helper for VPCs
self._vpc_networks_ids = None
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.network = None
self.vpc = None
self.zone = None
self.vm = None
self.vm_default_nic = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
self.network_acl = None
@property
def cs(self):
if self._cs is None:
api_config = self.get_api_config()
self._cs = CloudStack(**api_config)
return self._cs
def get_api_config(self):
api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION')
try:
config = read_config(api_region)
except KeyError:
config = {}
api_config = {
'endpoint': self.module.params.get('api_url') or config.get('endpoint'),
'key': self.module.params.get('api_key') or config.get('key'),
'secret': self.module.params.get('api_secret') or config.get('secret'),
'timeout': self.module.params.get('api_timeout') or config.get('timeout') or 10,
'method': self.module.params.get('api_http_method') or config.get('method') or 'get',
}
self.result.update({
'api_region': api_region,
'api_url': api_config['endpoint'],
'api_key': api_config['key'],
'api_timeout': api_config['timeout'],
'api_http_method': api_config['method'],
})
if not all([api_config['endpoint'], api_config['key'], api_config['secret']]):
self.fail_json(msg="Missing api credentials: can not authenticate")
return api_config
def fail_json(self, **kwargs):
self.result.update(kwargs)
self.module.fail_json(**self.result)
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
def has_changed(self, want_dict, current_dict, only_keys=None, skip_diff_for_keys=None):
result = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(value, (int, float, long, complex)):
# ensure we compare the same type
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, float):
current_dict[key] = float(current_dict[key])
elif isinstance(value, long):
current_dict[key] = long(current_dict[key])
elif isinstance(value, complex):
current_dict[key] = complex(current_dict[key])
if value != current_dict[key]:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
result = True
else:
before_value = to_text(current_dict[key])
after_value = to_text(value)
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if before_value != after_value:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
# Test for diff in case insensitive way
elif before_value.lower() != after_value.lower():
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
else:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = None
self.result['diff']['after'][key] = to_text(value)
result = True
return result
def _get_by_key(self, key=None, my_dict=None):
if my_dict is None:
my_dict = {}
if key:
if key in my_dict:
return my_dict[key]
self.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def query_api(self, command, **args):
try:
res = getattr(self.cs, command)(**args)
if 'errortext' in res:
self.fail_json(msg="Failed: '%s'" % res['errortext'])
except CloudStackException as e:
self.fail_json(msg='CloudStackException: %s' % to_native(e))
except Exception as e:
self.fail_json(msg=to_native(e))
return res
def get_network_acl(self, key=None):
if self.network_acl is None:
args = {
'name': self.module.params.get('network_acl'),
'vpcid': self.get_vpc(key='id'),
}
network_acls = self.query_api('listNetworkACLLists', **args)
if network_acls:
self.network_acl = network_acls['networkacllist'][0]
self.result['network_acl'] = self.network_acl['name']
if self.network_acl:
return self._get_by_key(key, self.network_acl)
else:
self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl'))
def get_vpc(self, key=None):
"""Return a VPC dictionary or the value of given key of."""
if self.vpc:
return self._get_by_key(key, self.vpc)
vpc = self.module.params.get('vpc')
if not vpc:
vpc = os.environ.get('CLOUDSTACK_VPC')
if not vpc:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
if not vpcs:
self.fail_json(msg="No VPCs available.")
for v in vpcs['vpc']:
if vpc in [v['name'], v['displaytext'], v['id']]:
# Fail if the identifyer matches more than one VPC
if self.vpc:
self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc)
else:
self.vpc = v
self.result['vpc'] = v['name']
if self.vpc:
return self._get_by_key(key, self.vpc)
self.fail_json(msg="VPC '%s' not found" % vpc)
def is_vpc_network(self, network_id):
"""Returns True if network is in VPC."""
# This is an efficient way to query a lot of networks at a time
if self._vpc_networks_ids is None:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
self._vpc_networks_ids = []
if vpcs:
for vpc in vpcs['vpc']:
for n in vpc.get('network', []):
self._vpc_networks_ids.append(n['id'])
return network_id in self._vpc_networks_ids
def get_network(self, key=None):
"""Return a network dictionary or the value of given key of."""
if self.network:
return self._get_by_key(key, self.network)
network = self.module.params.get('network')
if not network:
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name)
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id')
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.fail_json(msg="No networks available.")
for n in networks['network']:
# ignore any VPC network if vpc param is not given
if 'vpcid' in n and not self.get_vpc(key='id'):
continue
if network in [n['displaytext'], n['name'], n['id']]:
self.result['network'] = n['name']
self.network = n
return self._get_by_key(key, self.network)
self.fail_json(msg="Network '%s' not found" % network)
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
project = os.environ.get('CLOUDSTACK_PROJECT')
if not project:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id')
}
projects = self.query_api('listProjects', **args)
if projects:
for p in projects['project']:
if project.lower() in [p['name'].lower(), p['id']]:
self.result['project'] = p['name']
self.project = p
return self._get_by_key(key, self.project)
self.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.fail_json(msg="IP address param 'ip_address' is required")
args = {
'ipaddress': ip_address,
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.query_api('listPublicIpAddresses', **args)
if not ip_addresses:
self.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
if not vm_guest_ip:
return default_nic['ipaddress']
for secondary_ip in default_nic['secondaryip']:
if vm_guest_ip == secondary_ip['ipaddress']:
return vm_guest_ip
self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
def get_vm_default_nic(self):
if self.vm_default_nic:
return self.vm_default_nic
nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id'))
if nics:
for n in nics['nic']:
if n['isdefault']:
self.vm_default_nic = n
return self.vm_default_nic
self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
def get_vm(self, key=None, filter_zone=True):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.fail_json(msg="Virtual machine param 'vm' is required")
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id') if filter_zone else None,
}
vms = self.query_api('listVirtualMachines', **args)
if vms:
for v in vms['virtualmachine']:
if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.vm = v
return self._get_by_key(key, self.vm)
self.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_disk_offering(self, key=None):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
# Do not add domain filter for disk offering listing.
disk_offerings = self.query_api('listDiskOfferings')
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [d['displaytext'], d['name'], d['id']]:
return self._get_by_key(key, d)
self.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
if not zone:
zone = os.environ.get('CLOUDSTACK_ZONE')
zones = self.query_api('listZones')
if not zones:
self.fail_json(msg="No zones available. Please create a zone first")
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
self.result['zone'] = self.zone['name']
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone.lower() in [z['name'].lower(), z['id']]:
self.result['zone'] = z['name']
self.zone = z
return self._get_by_key(key, self.zone)
self.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.query_api('listOsTypes')
if os_types:
for o in os_types['ostype']:
if os_type in [o['description'], o['id']]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.query_api('listHypervisors')
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
account = os.environ.get('CLOUDSTACK_ACCOUNT')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.fail_json(msg="Account must be specified with Domain")
args = {
'name': account,
'domainid': self.get_domain(key='id'),
'listall': True
}
accounts = self.query_api('listAccounts', **args)
if accounts:
self.account = accounts['account'][0]
self.result['account'] = self.account['name']
return self._get_by_key(key, self.account)
self.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
domain = os.environ.get('CLOUDSTACK_DOMAIN')
if not domain:
return None
args = {
'listall': True,
}
domains = self.query_api('listDomains', **args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]:
self.domain = d
self.result['domain'] = d['path']
return self._get_by_key(key, self.domain)
self.fail_json(msg="Domain '%s' not found" % domain)
def query_tags(self, resource, resource_type):
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
}
tags = self.query_api('listTags', **args)
return self.get_tags(resource=tags, key='tag')
def get_tags(self, resource=None, key='tags'):
existing_tags = []
for tag in resource.get(key) or []:
existing_tags.append({'key': tag['key'], 'value': tag['value']})
return existing_tags
def _process_tags(self, resource, resource_type, tags, operation="create"):
if tags:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
'tags': tags,
}
if operation == "create":
response = self.query_api('createTags', **args)
else:
response = self.query_api('deleteTags', **args)
self.poll_job(response)
def _tags_that_should_exist_or_be_updated(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in tags if tag not in existing_tags]
def _tags_that_should_not_exist(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in existing_tags if tag not in tags]
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.query_api('listCapabilities')
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.query_api('queryAsyncJobResult', jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def get_result(self, resource):
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.items():
if search_key in resource:
self.result[return_key] = resource[search_key]
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.items():
if search_key in resource:
self.result[return_key] = int(resource[search_key])
if 'tags' in resource:
self.result['tags'] = resource['tags']
return self.result
| gpl-3.0 |
andrew-aladev/samba-talloc-debug | buildtools/wafadmin/Tools/qt4.py | 16 | 14672 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"""
Qt4 support
If QT4_ROOT is given (absolute path), the configuration will look in it first
This module also demonstrates how to add tasks dynamically (when the build has started)
"""
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml = False
ContentHandler = object
else:
has_xml = True
import os, sys
import ccroot, cxx
import TaskGen, Task, Utils, Runner, Options, Node, Configure
from TaskGen import taskgen, feature, after, extension
from Logs import error
from Constants import *
MOC_H = ['.h', '.hpp', '.hxx', '.hh']
EXT_RCC = ['.qrc']
EXT_UI = ['.ui']
EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C']
class qxx_task(Task.Task):
"A cpp task that may create a moc task dynamically"
before = ['cxx_link', 'static_link']
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.moc_done = 0
def scan(self):
(nodes, names) = ccroot.scan(self)
# for some reasons (variants) the moc node may end in the list of node deps
for x in nodes:
if x.name.endswith('.moc'):
nodes.remove(x)
names.append(x.relpath_gen(self.inputs[0].parent))
return (nodes, names)
def runnable_status(self):
if self.moc_done:
# if there is a moc task, delay the computation of the file signature
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
# the moc file enters in the dependency calculation
# so we need to recompute the signature when the moc file is present
self.signature()
return Task.Task.runnable_status(self)
else:
# yes, really, there are people who generate cxx files
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
self.add_moc_tasks()
return ASK_LATER
def add_moc_tasks(self):
node = self.inputs[0]
tree = node.__class__.bld
try:
# compute the signature once to know if there is a moc file to create
self.signature()
except KeyError:
# the moc file may be referenced somewhere else
pass
else:
# remove the signature, it must be recomputed with the moc task
delattr(self, 'cache_sig')
moctasks=[]
mocfiles=[]
variant = node.variant(self.env)
try:
tmp_lst = tree.raw_deps[self.unique_id()]
tree.raw_deps[self.unique_id()] = []
except KeyError:
tmp_lst = []
for d in tmp_lst:
if not d.endswith('.moc'): continue
# paranoid check
if d in mocfiles:
error("paranoia owns")
continue
# process that base.moc only once
mocfiles.append(d)
# find the extension (performed only when the .cpp has changes)
base2 = d[:-4]
for path in [node.parent] + self.generator.env['INC_PATHS']:
tree.rescan(path)
vals = getattr(Options.options, 'qt_header_ext', '') or MOC_H
for ex in vals:
h_node = path.find_resource(base2 + ex)
if h_node:
break
else:
continue
break
else:
raise Utils.WafError("no header found for %s which is a moc file" % str(d))
m_node = h_node.change_ext('.moc')
tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), m_node.name)] = h_node
# create the task
task = Task.TaskBase.classes['moc'](self.env, normal=0)
task.set_inputs(h_node)
task.set_outputs(m_node)
generator = tree.generator
generator.outstanding.insert(0, task)
generator.total += 1
moctasks.append(task)
# remove raw deps except the moc files to save space (optimization)
tmp_lst = tree.raw_deps[self.unique_id()] = mocfiles
# look at the file inputs, it is set right above
lst = tree.node_deps.get(self.unique_id(), ())
for d in lst:
name = d.name
if name.endswith('.moc'):
task = Task.TaskBase.classes['moc'](self.env, normal=0)
task.set_inputs(tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), name)]) # 1st element in a tuple
task.set_outputs(d)
generator = tree.generator
generator.outstanding.insert(0, task)
generator.total += 1
moctasks.append(task)
# simple scheduler dependency: run the moc task before others
self.run_after = moctasks
self.moc_done = 1
run = Task.TaskBase.classes['cxx'].__dict__['run']
def translation_update(task):
outs = [a.abspath(task.env) for a in task.outputs]
outs = " ".join(outs)
lupdate = task.env['QT_LUPDATE']
for x in task.inputs:
file = x.abspath(task.env)
cmd = "%s %s -ts %s" % (lupdate, file, outs)
Utils.pprint('BLUE', cmd)
task.generator.bld.exec_command(cmd)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf = []
self.files = []
def startElement(self, name, attrs):
if name == 'file':
self.buf = []
def endElement(self, name):
if name == 'file':
self.files.append(''.join(self.buf))
def characters(self, cars):
self.buf.append(cars)
def scan(self):
"add the dependency on the files referenced in the qrc"
node = self.inputs[0]
parser = make_parser()
curHandler = XMLHandler()
parser.setContentHandler(curHandler)
fi = open(self.inputs[0].abspath(self.env))
parser.parse(fi)
fi.close()
nodes = []
names = []
root = self.inputs[0].parent
for x in curHandler.files:
nd = root.find_resource(x)
if nd: nodes.append(nd)
else: names.append(x)
return (nodes, names)
@extension(EXT_RCC)
def create_rcc_task(self, node):
"hook for rcc files"
rcnode = node.change_ext('_rc.cpp')
rcctask = self.create_task('rcc', node, rcnode)
cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o'))
self.compiled_tasks.append(cpptask)
return cpptask
@extension(EXT_UI)
def create_uic_task(self, node):
"hook for uic tasks"
uictask = self.create_task('ui4', node)
uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])]
return uictask
class qt4_taskgen(cxx.cxx_taskgen):
def __init__(self, *k, **kw):
cxx.cxx_taskgen.__init__(self, *k, **kw)
self.features.append('qt4')
@extension('.ts')
def add_lang(self, node):
"""add all the .ts file into self.lang"""
self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
@feature('qt4')
@after('apply_link')
def apply_qt4(self):
if getattr(self, 'lang', None):
update = getattr(self, 'update', None)
lst=[]
trans=[]
for l in self.to_list(self.lang):
if not isinstance(l, Node.Node):
l = self.path.find_resource(l+'.ts')
t = self.create_task('ts2qm', l, l.change_ext('.qm'))
lst.append(t.outputs[0])
if update:
trans.append(t.inputs[0])
trans_qt4 = getattr(Options.options, 'trans_qt4', False)
if update and trans_qt4:
# we need the cpp files given, except the rcc task we create after
# FIXME may be broken
u = Task.TaskCmd(translation_update, self.env, 2)
u.inputs = [a.inputs[0] for a in self.compiled_tasks]
u.outputs = trans
if getattr(self, 'langname', None):
t = Task.TaskBase.classes['qm2rcc'](self.env)
t.set_inputs(lst)
t.set_outputs(self.path.find_or_declare(self.langname+'.qrc'))
t.path = self.path
k = create_rcc_task(self, t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
self.env.append_value('MOC_FLAGS', self.env._CXXDEFFLAGS)
self.env.append_value('MOC_FLAGS', self.env._CXXINCFLAGS)
@extension(EXT_QT4)
def cxx_hook(self, node):
# create the compilation task: cpp or cc
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task = self.create_task('qxx', node, node.change_ext(obj_ext))
self.compiled_tasks.append(task)
return task
def process_qm2rcc(task):
outfile = task.outputs[0].abspath(task.env)
f = open(outfile, 'w')
f.write('<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n')
for k in task.inputs:
f.write(' <file>')
#f.write(k.name)
f.write(k.path_to_parent(task.path))
f.write('</file>\n')
f.write('</qresource>\n</RCC>')
f.close()
b = Task.simple_task_type
b('moc', '${QT_MOC} ${MOC_FLAGS} ${SRC} ${MOC_ST} ${TGT}', color='BLUE', vars=['QT_MOC', 'MOC_FLAGS'], shell=False)
cls = b('rcc', '${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath(env)} ${RCC_ST} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', after="qm2rcc", shell=False)
cls.scan = scan
b('ui4', '${QT_UIC} ${SRC} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', shell=False)
b('ts2qm', '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}', color='BLUE', before='qm2rcc', shell=False)
Task.task_type_from_func('qm2rcc', vars=[], func=process_qm2rcc, color='BLUE', before='rcc', after='ts2qm')
def detect_qt4(conf):
env = conf.env
opt = Options.options
qtdir = getattr(opt, 'qtdir', '')
qtbin = getattr(opt, 'qtbin', '')
qtlibs = getattr(opt, 'qtlibs', '')
useframework = getattr(opt, 'use_qt4_osxframework', True)
paths = []
# the path to qmake has been given explicitely
if qtbin:
paths = [qtbin]
# the qt directory has been given - we deduce the qt binary path
if not qtdir:
qtdir = conf.environ.get('QT4_ROOT', '')
qtbin = os.path.join(qtdir, 'bin')
paths = [qtbin]
# no qtdir, look in the path and in /usr/local/Trolltech
if not qtdir:
paths = os.environ.get('PATH', '').split(os.pathsep)
paths.append('/usr/share/qt4/bin/')
try:
lst = os.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
# keep the highest version
qtdir = '/usr/local/Trolltech/%s/' % lst[0]
qtbin = os.path.join(qtdir, 'bin')
paths.append(qtbin)
# at the end, try to find qmake in the paths given
# keep the one with the highest version
cand = None
prev_ver = ['4', '0', '0']
for qmk in ['qmake-qt4', 'qmake4', 'qmake']:
qmake = conf.find_program(qmk, path_list=paths)
if qmake:
try:
version = Utils.cmd_output([qmake, '-query', 'QT_VERSION']).strip()
except ValueError:
pass
else:
if version:
new_ver = version.split('.')
if new_ver > prev_ver:
cand = qmake
prev_ver = new_ver
if cand:
qmake = cand
else:
conf.fatal('could not find qmake for qt4')
conf.env.QMAKE = qmake
qtincludes = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_HEADERS']).strip()
qtdir = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_PREFIX']).strip() + os.sep
qtbin = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_BINS']).strip() + os.sep
if not qtlibs:
try:
qtlibs = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_LIBS']).strip() + os.sep
except ValueError:
qtlibs = os.path.join(qtdir, 'lib')
def find_bin(lst, var):
for f in lst:
ret = conf.find_program(f, path_list=paths)
if ret:
env[var]=ret
break
vars = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtWebKit Qt3Support".split()
find_bin(['uic-qt3', 'uic3'], 'QT_UIC3')
find_bin(['uic-qt4', 'uic'], 'QT_UIC')
if not env['QT_UIC']:
conf.fatal('cannot find the uic compiler for qt4')
try:
version = Utils.cmd_output(env['QT_UIC'] + " -version 2>&1").strip()
except ValueError:
conf.fatal('your uic compiler is for qt3, add uic for qt4 to your path')
version = version.replace('Qt User Interface Compiler ','')
version = version.replace('User Interface Compiler for Qt', '')
if version.find(" 3.") != -1:
conf.check_message('uic version', '(too old)', 0, option='(%s)'%version)
sys.exit(1)
conf.check_message('uic version', '', 1, option='(%s)'%version)
find_bin(['moc-qt4', 'moc'], 'QT_MOC')
find_bin(['rcc'], 'QT_RCC')
find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE')
find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE')
env['UIC3_ST']= '%s -o %s'
env['UIC_ST'] = '%s -o %s'
env['MOC_ST'] = '-o'
env['ui_PATTERN'] = 'ui_%s.h'
env['QT_LRELEASE_FLAGS'] = ['-silent']
vars_debug = [a+'_debug' for a in vars]
try:
conf.find_program('pkg-config', var='pkgconfig', path_list=paths, mandatory=True)
except Configure.ConfigurationError:
for lib in vars_debug+vars:
uselib = lib.upper()
d = (lib.find('_debug') > 0) and 'd' or ''
# original author seems to prefer static to shared libraries
for (pat, kind) in ((conf.env.staticlib_PATTERN, 'STATIC'), (conf.env.shlib_PATTERN, '')):
conf.check_message_1('Checking for %s %s' % (lib, kind))
for ext in ['', '4']:
path = os.path.join(qtlibs, pat % (lib + d + ext))
if os.path.exists(path):
env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
conf.check_message_2('ok ' + path, 'GREEN')
break
path = os.path.join(qtbin, pat % (lib + d + ext))
if os.path.exists(path):
env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
conf.check_message_2('ok ' + path, 'GREEN')
break
else:
conf.check_message_2('not found', 'YELLOW')
continue
break
env.append_unique('LIBPATH_' + uselib, qtlibs)
env.append_unique('CPPPATH_' + uselib, qtincludes)
env.append_unique('CPPPATH_' + uselib, qtincludes + os.sep + lib)
else:
for i in vars_debug+vars:
try:
conf.check_cfg(package=i, args='--cflags --libs --silence-errors', path=conf.env.pkgconfig)
except ValueError:
pass
# the libpaths are set nicely, unfortunately they make really long command-lines
# remove the qtcore ones from qtgui, etc
def process_lib(vars_, coreval):
for d in vars_:
var = d.upper()
if var == 'QTCORE': continue
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if lib in core: continue
accu.append(lib)
env['LIBPATH_'+var] = accu
process_lib(vars, 'LIBPATH_QTCORE')
process_lib(vars_debug, 'LIBPATH_QTCORE_DEBUG')
# rpath if wanted
want_rpath = getattr(Options.options, 'want_rpath', 1)
if want_rpath:
def process_rpath(vars_, coreval):
for d in vars_:
var = d.upper()
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if var != 'QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var] = accu
process_rpath(vars, 'LIBPATH_QTCORE')
process_rpath(vars_debug, 'LIBPATH_QTCORE_DEBUG')
env['QTLOCALE'] = str(env['PREFIX'])+'/share/locale'
def detect(conf):
detect_qt4(conf)
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
opt.add_option('--header-ext',
type='string',
default='',
help='header extension for moc files',
dest='qt_header_ext')
for i in 'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i, type='string', default='', dest=i)
if sys.platform == "darwin":
opt.add_option('--no-qt4-framework', action="store_false", help='do not use the framework version of Qt4 in OS X', dest='use_qt4_osxframework',default=True)
opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False)
| gpl-3.0 |
OpenCode/l10n-italy | __unported__/l10n_it_prima_nota_cassa/__openerp__.py | 3 | 1526 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011-2013 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italian Localisation - Prima Nota Cassa',
'version': '0.1',
'category': 'Localisation/Italy',
'description': """Accounting reports - Prima Nota Cassa - Webkit""",
'author': 'OpenERP Italian Community',
'website': 'http://www.openerp-italia.org',
'license': 'AGPL-3',
"depends" : ['account', 'report_webkit'],
"init_xml" : [
],
"update_xml" : [
'reports.xml',
'wizard/wizard_print_prima_nota_cassa.xml',
],
"demo_xml" : [],
"active": False,
'installable': False
}
| agpl-3.0 |
ethanhlc/streamlink | examples/gst-player.py | 3 | 3875 | #!/usr/bin/env python
from __future__ import print_function
import sys
import gi
from gi.repository import GObject as gobject, Gst as gst
from streamlink import Streamlink, StreamError, PluginError, NoPluginError
def exit(msg):
print(msg, file=sys.stderr)
sys.exit()
class StreamlinkPlayer(object):
def __init__(self):
self.fd = None
self.mainloop = gobject.MainLoop()
# This creates a playbin pipeline and using the appsrc source
# we can feed it our stream data
self.pipeline = gst.ElementFactory.make("playbin", None)
self.pipeline.set_property("uri", "appsrc://")
# When the playbin creates the appsrc source it will call
# this callback and allow us to configure it
self.pipeline.connect("source-setup", self.on_source_setup)
# Creates a bus and set callbacks to receive errors
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_eos)
self.bus.connect("message::error", self.on_error)
def exit(self, msg):
self.stop()
exit(msg)
def stop(self):
# Stop playback and exit mainloop
self.pipeline.set_state(gst.State.NULL)
self.mainloop.quit()
# Close the stream
if self.fd:
self.fd.close()
def play(self, stream):
# Attempt to open the stream
try:
self.fd = stream.open()
except StreamError as err:
self.exit("Failed to open stream: {0}".format(err))
# Start playback
self.pipeline.set_state(gst.State.PLAYING)
self.mainloop.run()
def on_source_setup(self, element, source):
# When this callback is called the appsrc expects
# us to feed it more data
source.connect("need-data", self.on_source_need_data)
def on_source_need_data(self, source, length):
# Attempt to read data from the stream
try:
data = self.fd.read(length)
except IOError as err:
self.exit("Failed to read data from stream: {0}".format(err))
# If data is empty it's the end of stream
if not data:
source.emit("end-of-stream")
return
# Convert the Python bytes into a GStreamer Buffer
# and then push it to the appsrc
buf = gst.Buffer.new_wrapped(data)
source.emit("push-buffer", buf)
def on_eos(self, bus, msg):
# Stop playback on end of stream
self.stop()
def on_error(self, bus, msg):
# Print error message and exit on error
error = msg.parse_error()[1]
self.exit(error)
def main():
if len(sys.argv) < 3:
exit("Usage: {0} <url> <quality>".format(sys.argv[0]))
# Initialize and check GStreamer version
gi.require_version("Gst", "1.0")
gobject.threads_init()
gst.init(None)
# Collect arguments
url = sys.argv[1]
quality = sys.argv[2]
# Create the Streamlink session
streamlink = Streamlink()
# Enable logging
streamlink.set_loglevel("info")
streamlink.set_logoutput(sys.stdout)
# Attempt to fetch streams
try:
streams = streamlink.streams(url)
except NoPluginError:
exit("Streamlink is unable to handle the URL '{0}'".format(url))
except PluginError as err:
exit("Plugin error: {0}".format(err))
if not streams:
exit("No streams found on URL '{0}'".format(url))
# Look for specified stream
if quality not in streams:
exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))
# We found the stream
stream = streams[quality]
# Create the player and start playback
player = StreamlinkPlayer()
# Blocks until playback is done
player.play(stream)
if __name__ == "__main__":
main()
| bsd-2-clause |
bukalov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/workspace_mock.py | 191 | 1871 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MockWorkspace(object):
def find_unused_filename(self, directory, name, extension, search_limit=10):
return "%s/%s.%s" % (directory, name, extension)
def create_zip(self, zip_path, source_path):
self.zip_path = zip_path
self.source_path = source_path
return object() # Something that is not None
| bsd-3-clause |
fujunwei/chromium-crosswalk | ppapi/generators/idl_node.py | 103 | 13144 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Nodes for PPAPI IDL AST"""
#
# IDL Node
#
# IDL Node defines the IDLAttribute and IDLNode objects which are constructed
# by the parser as it processes the various 'productions'. The IDLAttribute
# objects are assigned to the IDLNode's property dictionary instead of being
# applied as children of The IDLNodes, so they do not exist in the final tree.
# The AST of IDLNodes is the output from the parsing state and will be used
# as the source data by the various generators.
#
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_propertynode import IDLPropertyNode
from idl_release import IDLRelease, IDLReleaseMap
# IDLAttribute
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLAttribute(object):
def __init__(self, name, value):
self.cls = 'ExtAttribute'
self.name = name
self.value = value
def __str__(self):
return '%s=%s' % (self.name, self.value)
#
# IDLNode
#
# This class implements the AST tree, providing the associations between
# parents and children. It also contains a namepsace and propertynode to
# allow for look-ups. IDLNode is derived from IDLRelease, so it is
# version aware.
#
class IDLNode(IDLRelease):
# Set of object IDLNode types which have a name and belong in the namespace.
NamedSet = set(['Enum', 'EnumItem', 'File', 'Function', 'Interface',
'Member', 'Param', 'Struct', 'Type', 'Typedef'])
def __init__(self, cls, filename, lineno, pos, children=None):
# Initialize with no starting or ending Version
IDLRelease.__init__(self, None, None)
self.cls = cls
self.lineno = lineno
self.pos = pos
self._filename = filename
self._deps = {}
self.errors = 0
self.namespace = None
self.typelist = None
self.parent = None
self._property_node = IDLPropertyNode()
self._unique_releases = None
# A list of unique releases for this node
self.releases = None
# A map from any release, to the first unique release
self.first_release = None
# self._children is a list of children ordered as defined
self._children = []
# Process the passed in list of children, placing ExtAttributes into the
# property dictionary, and nodes into the local child list in order. In
# addition, add nodes to the namespace if the class is in the NamedSet.
if children:
for child in children:
if child.cls == 'ExtAttribute':
self.SetProperty(child.name, child.value)
else:
self.AddChild(child)
def __str__(self):
name = self.GetName()
if name is None:
name = ''
return '%s(%s)' % (self.cls, name)
def Location(self):
"""Return a file and line number for where this node was defined."""
return '%s(%d)' % (self._filename, self.lineno)
def Error(self, msg):
"""Log an error for this object."""
self.errors += 1
ErrOut.LogLine(self._filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
filenode = self.GetProperty('FILE')
if filenode:
errcnt = filenode.GetProperty('ERRORS')
if not errcnt:
errcnt = 0
filenode.SetProperty('ERRORS', errcnt + 1)
def Warning(self, msg):
"""Log a warning for this object."""
WarnOut.LogLine(self._filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
def GetName(self):
return self.GetProperty('NAME')
def Dump(self, depth=0, comments=False, out=sys.stdout):
"""Dump this object and its children"""
if self.cls in ['Comment', 'Copyright']:
is_comment = True
else:
is_comment = False
# Skip this node if it's a comment, and we are not printing comments
if not comments and is_comment:
return
tab = ''.rjust(depth * 2)
if is_comment:
out.write('%sComment\n' % tab)
for line in self.GetName().split('\n'):
out.write('%s "%s"\n' % (tab, line))
else:
ver = IDLRelease.__str__(self)
if self.releases:
release_list = ': ' + ' '.join(self.releases)
else:
release_list = ': undefined'
out.write('%s%s%s%s\n' % (tab, self, ver, release_list))
if self.typelist:
out.write('%s Typelist: %s\n' % (tab, self.typelist.GetReleases()[0]))
properties = self._property_node.GetPropertyList()
if properties:
out.write('%s Properties\n' % tab)
for p in properties:
if is_comment and p == 'NAME':
# Skip printing the name for comments, since we printed above already
continue
out.write('%s %s : %s\n' % (tab, p, self.GetProperty(p)))
for child in self._children:
child.Dump(depth+1, comments=comments, out=out)
def IsA(self, *typelist):
"""Check if node is of a given type."""
return self.cls in typelist
def GetListOf(self, *keys):
"""Get a list of objects for the given key(s)."""
out = []
for child in self._children:
if child.cls in keys:
out.append(child)
return out
def GetOneOf(self, *keys):
"""Get an object for the given key(s)."""
out = self.GetListOf(*keys)
if out:
return out[0]
return None
def SetParent(self, parent):
self._property_node.AddParent(parent)
self.parent = parent
def AddChild(self, node):
node.SetParent(self)
self._children.append(node)
# Get a list of all children
def GetChildren(self):
return self._children
def GetType(self, release):
if not self.typelist:
return None
return self.typelist.FindRelease(release)
def GetDeps(self, release, visited=None):
visited = visited or set()
# If this release is not valid for this object, then done.
if not self.IsRelease(release) or self.IsA('Comment', 'Copyright'):
return set([])
# If we have cached the info for this release, return the cached value
deps = self._deps.get(release, None)
if deps is not None:
return deps
# If we are already visited, then return
if self in visited:
return set([self])
# Otherwise, build the dependency list
visited |= set([self])
deps = set([self])
# Get child deps
for child in self.GetChildren():
deps |= child.GetDeps(release, visited)
visited |= set(deps)
# Get type deps
typeref = self.GetType(release)
if typeref:
deps |= typeref.GetDeps(release, visited)
self._deps[release] = deps
return deps
def GetVersion(self, release):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetVersion(release)
def GetUniqueReleases(self, releases):
"""Return the unique set of first releases corresponding to input
Since we are returning the corresponding 'first' version for a
release, we may return a release version prior to the one in the list."""
my_min, my_max = self.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
return []
out = set()
for rel in releases:
remapped = self.first_release[rel]
if not remapped:
continue
out |= set([remapped])
# Cache the most recent set of unique_releases
self._unique_releases = sorted(out)
return self._unique_releases
def LastRelease(self, release):
# Get the most recent release from the most recently generated set of
# cached unique releases.
if self._unique_releases and self._unique_releases[-1] > release:
return False
return True
def GetRelease(self, version):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetRelease(version)
def _GetReleaseList(self, releases, visited=None):
visited = visited or set()
if not self.releases:
# If we are unversionable, then return first available release
if self.IsA('Comment', 'Copyright', 'Label'):
self.releases = []
return self.releases
# Generate the first and if deprecated within this subset, the
# last release for this node
my_min, my_max = self.GetMinMax(releases)
if my_max != releases[-1]:
my_releases = set([my_min, my_max])
else:
my_releases = set([my_min])
r = self.GetRelease(self.GetProperty('version'))
if not r in my_releases:
my_releases |= set([r])
# Break cycle if we reference ourselves
if self in visited:
return [my_min]
visited |= set([self])
# Files inherit all their releases from items in the file
if self.IsA('AST', 'File'):
my_releases = set()
# Visit all children
child_releases = set()
# Exclude sibling results from parent visited set
cur_visits = visited
for child in self._children:
child_releases |= set(child._GetReleaseList(releases, cur_visits))
visited |= set(child_releases)
# Visit my type
type_releases = set()
if self.typelist:
type_list = self.typelist.GetReleases()
for typenode in type_list:
type_releases |= set(typenode._GetReleaseList(releases, cur_visits))
type_release_list = sorted(type_releases)
if my_min < type_release_list[0]:
type_node = type_list[0]
self.Error('requires %s in %s which is undefined at %s.' % (
type_node, type_node._filename, my_min))
for rel in child_releases | type_releases:
if rel >= my_min and rel <= my_max:
my_releases |= set([rel])
self.releases = sorted(my_releases)
return self.releases
def BuildReleaseMap(self, releases):
unique_list = self._GetReleaseList(releases)
_, my_max = self.GetMinMax(releases)
self.first_release = {}
last_rel = None
for rel in releases:
if rel in unique_list:
last_rel = rel
self.first_release[rel] = last_rel
if rel == my_max:
last_rel = None
def SetProperty(self, name, val):
self._property_node.SetProperty(name, val)
def GetProperty(self, name):
return self._property_node.GetProperty(name)
def GetPropertyLocal(self, name):
return self._property_node.GetPropertyLocal(name)
def NodeIsDevOnly(self):
"""Returns true iff a node is only in dev channel."""
return self.GetProperty('dev_version') and not self.GetProperty('version')
def DevInterfaceMatchesStable(self, release):
"""Returns true if an interface has an equivalent stable version."""
assert(self.IsA('Interface'))
for child in self.GetListOf('Member'):
unique = child.GetUniqueReleases([release])
if not unique or not child.InReleases([release]):
continue
if child.NodeIsDevOnly():
return False
return True
#
# IDLFile
#
# A specialized version of IDLNode which tracks errors and warnings.
#
class IDLFile(IDLNode):
def __init__(self, name, children, errors=0):
attrs = [IDLAttribute('NAME', name),
IDLAttribute('ERRORS', errors)]
if not children:
children = []
IDLNode.__init__(self, 'File', name, 1, 0, attrs + children)
# TODO(teravest): Why do we set release map like this here? This looks
# suspicious...
self.release_map = IDLReleaseMap([('M13', 1.0, 'stable')])
#
# Tests
#
def StringTest():
errors = 0
name_str = 'MyName'
text_str = 'MyNode(%s)' % name_str
name_node = IDLAttribute('NAME', name_str)
node = IDLNode('MyNode', 'no file', 1, 0, [name_node])
if node.GetName() != name_str:
ErrOut.Log('GetName returned >%s< not >%s<' % (node.GetName(), name_str))
errors += 1
if node.GetProperty('NAME') != name_str:
ErrOut.Log('Failed to get name property.')
errors += 1
if str(node) != text_str:
ErrOut.Log('str() returned >%s< not >%s<' % (str(node), text_str))
errors += 1
if not errors:
InfoOut.Log('Passed StringTest')
return errors
def ChildTest():
errors = 0
child = IDLNode('child', 'no file', 1, 0)
parent = IDLNode('parent', 'no file', 1, 0, [child])
if child.parent != parent:
ErrOut.Log('Failed to connect parent.')
errors += 1
if [child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren.')
errors += 1
if child != parent.GetOneOf('child'):
ErrOut.Log('Failed GetOneOf(child)')
errors += 1
if parent.GetOneOf('bogus'):
ErrOut.Log('Failed GetOneOf(bogus)')
errors += 1
if not parent.IsA('parent'):
ErrOut.Log('Expecting parent type')
errors += 1
parent = IDLNode('parent', 'no file', 1, 0, [child, child])
if [child, child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren2.')
errors += 1
if not errors:
InfoOut.Log('Passed ChildTest')
return errors
def Main():
errors = StringTest()
errors += ChildTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
yaroslavprogrammer/django | tests/forms_tests/tests/test_media.py | 131 | 45513 | # -*- coding: utf-8 -*-
from django.forms import TextInput, Media, TextInput, CharField, Form, MultiWidget
from django.template import Template, Context
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
STATIC_URL=None,
MEDIA_URL='http://media.example.com/media/',
)
class FormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
@override_settings(
STATIC_URL='http://media.example.com/static/',
MEDIA_URL='http://media.example.com/media/',
)
class StaticFormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
| bsd-3-clause |
UK992/servo | tests/wpt/web-platform-tests/webdriver/tests/take_screenshot/user_prompts.py | 13 | 3735 | # META: timeout=long
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_png, assert_success
from tests.support.inline import inline
def take_screenshot(session):
return session.transport.send(
"GET", "session/{session_id}/screenshot".format(**vars(session)))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = take_screenshot(session)
value = assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert_png(value)
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = take_screenshot(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = take_screenshot(session)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 |
Spiderlover/Toontown | otp/launcher/DummyLauncherBase.py | 5 | 4454 | from pandac.PandaModules import *
import string
from direct.showbase.MessengerGlobal import *
from direct.showbase.DirectObject import DirectObject
from direct.showbase.EventManagerGlobal import *
from direct.task.TaskManagerGlobal import *
from direct.task.Task import Task
class DummyLauncherBase:
def __init__(self):
self.logPrefix = ''
self._downloadComplete = False
self.phaseComplete = {}
for phase in self.LauncherPhases:
self.phaseComplete[phase] = 0
self.firstPhase = self.LauncherPhases[0]
self.finalPhase = self.LauncherPhases[-1]
self.launcherFileDbHash = HashVal()
self.serverDbFileHash = HashVal()
self.setPandaErrorCode(0)
self.setServerVersion('dev')
def isDummy(self):
return 1
def startFakeDownload(self):
if ConfigVariableBool('fake-downloads', 0).getValue():
duration = ConfigVariableDouble('fake-download-duration', 60).getValue()
self.fakeDownload(duration)
else:
for phase in self.LauncherPhases:
self.phaseComplete[phase] = 100
self.downloadDoneTask(None)
return
def isTestServer(self):
return base.config.GetBool('is-test-server', 0)
def setPhaseCompleteArray(self, newPhaseComplete):
self.phaseComplete = newPhaseComplete
def setPhaseComplete(self, phase, percent):
self.phaseComplete[phase] = percent
def getPhaseComplete(self, phase):
return self.phaseComplete[phase] >= 100
def setPandaWindowOpen(self):
self.windowOpen = 1
def setPandaErrorCode(self, code):
self.pandaErrorCode = code
def getPandaErrorCode(self):
return self.pandaErrorCode
def setDisconnectDetailsNormal(self):
self.disconnectCode = 0
self.disconnectMsg = 'normal'
def setDisconnectDetails(self, newCode, newMsg):
self.disconnectCode = newCode
self.disconnectMsg = newMsg
def setServerVersion(self, version):
self.ServerVersion = version
def getServerVersion(self):
return self.ServerVersion
def getIsNewInstallation(self):
return base.config.GetBool('new-installation', 0)
def setIsNotNewInstallation(self):
pass
def getLastLogin(self):
if hasattr(self, 'lastLogin'):
return self.lastLogin
return ''
def setLastLogin(self, login):
self.lastLogin = login
def setUserLoggedIn(self):
self.userLoggedIn = 1
def setPaidUserLoggedIn(self):
self.paidUserLoggedIn = 1
def getGameServer(self):
return '206.16.11.19'
def getAccountServer(self):
return ''
def getDeployment(self):
return 'US'
def getBlue(self):
return None
def getPlayToken(self):
return None
def getDISLToken(self):
return None
def fakeDownloadPhaseTask(self, task):
percentComplete = min(100, int(round(task.time / float(task.timePerPhase) * 100)))
self.setPhaseComplete(task.phase, percentComplete)
messenger.send('launcherPercentPhaseComplete', [task.phase,
percentComplete,
0,
0])
if percentComplete >= 100.0:
messenger.send('phaseComplete-' + `(task.phase)`)
return Task.done
else:
return Task.cont
def downloadDoneTask(self, task):
self._downloadComplete = True
messenger.send('launcherAllPhasesComplete')
return Task.done
def fakeDownload(self, timePerPhase):
self.phaseComplete = {1: 100,
2: 100,
3: 0,
3.5: 0,
4: 0,
5: 0,
5.5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
11: 0,
12: 0,
13: 0}
phaseTaskList = []
firstPhaseIndex = self.LauncherPhases.index(self.firstPhase)
for phase in self.LauncherPhases[firstPhaseIndex:]:
phaseTask = Task(self.fakeDownloadPhaseTask, 'phaseDownload' + str(phase))
phaseTask.timePerPhase = timePerPhase
phaseTask.phase = phase
phaseTaskList.append(phaseTask)
phaseTaskList.append(Task(self.downloadDoneTask))
downloadSequence = Task.sequence(*phaseTaskList)
taskMgr.remove('downloadSequence')
taskMgr.add(downloadSequence, 'downloadSequence')
| mit |
schets/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
randynobx/ansible | lib/ansible/modules/network/panos/panos_lic.py | 78 | 4915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
miconof/headphones | lib/unidecode/x06a.py | 252 | 4674 | data = (
'Di ', # 0x00
'Zhuang ', # 0x01
'Le ', # 0x02
'Lang ', # 0x03
'Chen ', # 0x04
'Cong ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Qing ', # 0x08
'Shuang ', # 0x09
'Fan ', # 0x0a
'Tong ', # 0x0b
'Guan ', # 0x0c
'Ji ', # 0x0d
'Suo ', # 0x0e
'Lei ', # 0x0f
'Lu ', # 0x10
'Liang ', # 0x11
'Mi ', # 0x12
'Lou ', # 0x13
'Chao ', # 0x14
'Su ', # 0x15
'Ke ', # 0x16
'Shu ', # 0x17
'Tang ', # 0x18
'Biao ', # 0x19
'Lu ', # 0x1a
'Jiu ', # 0x1b
'Shu ', # 0x1c
'Zha ', # 0x1d
'Shu ', # 0x1e
'Zhang ', # 0x1f
'Men ', # 0x20
'Mo ', # 0x21
'Niao ', # 0x22
'Yang ', # 0x23
'Tiao ', # 0x24
'Peng ', # 0x25
'Zhu ', # 0x26
'Sha ', # 0x27
'Xi ', # 0x28
'Quan ', # 0x29
'Heng ', # 0x2a
'Jian ', # 0x2b
'Cong ', # 0x2c
'[?] ', # 0x2d
'Hokuso ', # 0x2e
'Qiang ', # 0x2f
'Tara ', # 0x30
'Ying ', # 0x31
'Er ', # 0x32
'Xin ', # 0x33
'Zhi ', # 0x34
'Qiao ', # 0x35
'Zui ', # 0x36
'Cong ', # 0x37
'Pu ', # 0x38
'Shu ', # 0x39
'Hua ', # 0x3a
'Kui ', # 0x3b
'Zhen ', # 0x3c
'Zun ', # 0x3d
'Yue ', # 0x3e
'Zhan ', # 0x3f
'Xi ', # 0x40
'Xun ', # 0x41
'Dian ', # 0x42
'Fa ', # 0x43
'Gan ', # 0x44
'Mo ', # 0x45
'Wu ', # 0x46
'Qiao ', # 0x47
'Nao ', # 0x48
'Lin ', # 0x49
'Liu ', # 0x4a
'Qiao ', # 0x4b
'Xian ', # 0x4c
'Run ', # 0x4d
'Fan ', # 0x4e
'Zhan ', # 0x4f
'Tuo ', # 0x50
'Lao ', # 0x51
'Yun ', # 0x52
'Shun ', # 0x53
'Tui ', # 0x54
'Cheng ', # 0x55
'Tang ', # 0x56
'Meng ', # 0x57
'Ju ', # 0x58
'Cheng ', # 0x59
'Su ', # 0x5a
'Jue ', # 0x5b
'Jue ', # 0x5c
'Tan ', # 0x5d
'Hui ', # 0x5e
'Ji ', # 0x5f
'Nuo ', # 0x60
'Xiang ', # 0x61
'Tuo ', # 0x62
'Ning ', # 0x63
'Rui ', # 0x64
'Zhu ', # 0x65
'Chuang ', # 0x66
'Zeng ', # 0x67
'Fen ', # 0x68
'Qiong ', # 0x69
'Ran ', # 0x6a
'Heng ', # 0x6b
'Cen ', # 0x6c
'Gu ', # 0x6d
'Liu ', # 0x6e
'Lao ', # 0x6f
'Gao ', # 0x70
'Chu ', # 0x71
'Zusa ', # 0x72
'Nude ', # 0x73
'Ca ', # 0x74
'San ', # 0x75
'Ji ', # 0x76
'Dou ', # 0x77
'Shou ', # 0x78
'Lu ', # 0x79
'[?] ', # 0x7a
'[?] ', # 0x7b
'Yuan ', # 0x7c
'Ta ', # 0x7d
'Shu ', # 0x7e
'Jiang ', # 0x7f
'Tan ', # 0x80
'Lin ', # 0x81
'Nong ', # 0x82
'Yin ', # 0x83
'Xi ', # 0x84
'Sui ', # 0x85
'Shan ', # 0x86
'Zui ', # 0x87
'Xuan ', # 0x88
'Cheng ', # 0x89
'Gan ', # 0x8a
'Ju ', # 0x8b
'Zui ', # 0x8c
'Yi ', # 0x8d
'Qin ', # 0x8e
'Pu ', # 0x8f
'Yan ', # 0x90
'Lei ', # 0x91
'Feng ', # 0x92
'Hui ', # 0x93
'Dang ', # 0x94
'Ji ', # 0x95
'Sui ', # 0x96
'Bo ', # 0x97
'Bi ', # 0x98
'Ding ', # 0x99
'Chu ', # 0x9a
'Zhua ', # 0x9b
'Kuai ', # 0x9c
'Ji ', # 0x9d
'Jie ', # 0x9e
'Jia ', # 0x9f
'Qing ', # 0xa0
'Zhe ', # 0xa1
'Jian ', # 0xa2
'Qiang ', # 0xa3
'Dao ', # 0xa4
'Yi ', # 0xa5
'Biao ', # 0xa6
'Song ', # 0xa7
'She ', # 0xa8
'Lin ', # 0xa9
'Kunugi ', # 0xaa
'Cha ', # 0xab
'Meng ', # 0xac
'Yin ', # 0xad
'Tao ', # 0xae
'Tai ', # 0xaf
'Mian ', # 0xb0
'Qi ', # 0xb1
'Toan ', # 0xb2
'Bin ', # 0xb3
'Huo ', # 0xb4
'Ji ', # 0xb5
'Qian ', # 0xb6
'Mi ', # 0xb7
'Ning ', # 0xb8
'Yi ', # 0xb9
'Gao ', # 0xba
'Jian ', # 0xbb
'Yin ', # 0xbc
'Er ', # 0xbd
'Qing ', # 0xbe
'Yan ', # 0xbf
'Qi ', # 0xc0
'Mi ', # 0xc1
'Zhao ', # 0xc2
'Gui ', # 0xc3
'Chun ', # 0xc4
'Ji ', # 0xc5
'Kui ', # 0xc6
'Po ', # 0xc7
'Deng ', # 0xc8
'Chu ', # 0xc9
'[?] ', # 0xca
'Mian ', # 0xcb
'You ', # 0xcc
'Zhi ', # 0xcd
'Guang ', # 0xce
'Qian ', # 0xcf
'Lei ', # 0xd0
'Lei ', # 0xd1
'Sa ', # 0xd2
'Lu ', # 0xd3
'Li ', # 0xd4
'Cuan ', # 0xd5
'Lu ', # 0xd6
'Mie ', # 0xd7
'Hui ', # 0xd8
'Ou ', # 0xd9
'Lu ', # 0xda
'Jie ', # 0xdb
'Gao ', # 0xdc
'Du ', # 0xdd
'Yuan ', # 0xde
'Li ', # 0xdf
'Fei ', # 0xe0
'Zhuo ', # 0xe1
'Sou ', # 0xe2
'Lian ', # 0xe3
'Tamo ', # 0xe4
'Chu ', # 0xe5
'[?] ', # 0xe6
'Zhu ', # 0xe7
'Lu ', # 0xe8
'Yan ', # 0xe9
'Li ', # 0xea
'Zhu ', # 0xeb
'Chen ', # 0xec
'Jie ', # 0xed
'E ', # 0xee
'Su ', # 0xef
'Huai ', # 0xf0
'Nie ', # 0xf1
'Yu ', # 0xf2
'Long ', # 0xf3
'Lai ', # 0xf4
'[?] ', # 0xf5
'Xian ', # 0xf6
'Kwi ', # 0xf7
'Ju ', # 0xf8
'Xiao ', # 0xf9
'Ling ', # 0xfa
'Ying ', # 0xfb
'Jian ', # 0xfc
'Yin ', # 0xfd
'You ', # 0xfe
'Ying ', # 0xff
)
| gpl-3.0 |
hj3938/zulip | zerver/management/commands/email-mirror.py | 114 | 6746 | #!/usr/bin/python
"""
Forward messages sent to the configured email gateway to Zulip.
For zulip.com, messages to that address go to the Inbox of [email protected].
Zulip voyager configurations will differ.
Messages meant for Zulip have a special recipient form of
<stream name>+<regenerable stream token>@streams.zulip.com
This pattern is configurable via the EMAIL_GATEWAY_PATTERN settings.py
variable.
This script can be used via two mechanisms:
1) Run this in a cronjob every N minutes if you have configured Zulip to poll
an external IMAP mailbox for messages. The script will then connect to
your IMAP server and batch-process all messages.
We extract and validate the target stream from information in the
recipient address and retrieve, forward, and archive the message.
2) Alternatively, configure your MTA to execute this script on message
receipt with the contents of the message piped to standard input. The
script will queue the message for processing. In this mode of invocation,
you should pass the destination email address in the ORIGINAL_RECIPIENT
environment variable.
In Postfix, you can express that via an /etc/aliases entry like this:
|/usr/bin/python /home/zulip/deployments/current/manage.py email-mirror
"""
from __future__ import absolute_import
import email
import os
from email.header import decode_header
import logging
import re
import sys
import posix
from django.conf import settings
from django.core.management.base import BaseCommand
from zerver.lib.actions import decode_email_address
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.upload import upload_message_image
from zerver.lib.queue import queue_json_publish
from zerver.models import Stream, get_user_profile_by_email, UserProfile
from zerver.lib.email_mirror import logger, process_message, \
extract_and_validate, ZulipEmailForwardError, \
mark_missed_message_address_as_used, is_missed_message_address
from twisted.internet import protocol, reactor, ssl
from twisted.mail import imap4
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../api"))
import zulip
## Setup ##
log_format = "%(asctime)s: %(message)s"
logging.basicConfig(format=log_format)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(settings.EMAIL_MIRROR_LOG_PATH)
file_handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
## IMAP callbacks ##
def logout(result, proto):
# Log out.
return proto.logout()
def delete(result, proto):
# Close the connection, which also processes any flags that were
# set on messages.
return proto.close().addCallback(logout, proto)
def fetch(result, proto, mailboxes):
if not result:
return proto.logout()
message_uids = result.keys()
# Make sure we forward the messages in time-order.
message_uids.sort()
for uid in message_uids:
message = email.message_from_string(result[uid]["RFC822"])
process_message(message)
# Delete the processed messages from the Inbox.
message_set = ",".join([result[key]["UID"] for key in message_uids])
d = proto.addFlags(message_set, ["\\Deleted"], uid=True, silent=False)
d.addCallback(delete, proto)
return d
def examine_mailbox(result, proto, mailbox):
# Fetch messages from a particular mailbox.
return proto.fetchMessage("1:*", uid=True).addCallback(fetch, proto, mailbox)
def select_mailbox(result, proto):
# Select which mailbox we care about.
mbox = filter(lambda x: settings.EMAIL_GATEWAY_IMAP_FOLDER in x[2], result)[0][2]
return proto.select(mbox).addCallback(examine_mailbox, proto, result)
def list_mailboxes(res, proto):
# List all of the mailboxes for this account.
return proto.list("","*").addCallback(select_mailbox, proto)
def connected(proto):
d = proto.login(settings.EMAIL_GATEWAY_LOGIN, settings.EMAIL_GATEWAY_PASSWORD)
d.addCallback(list_mailboxes, proto)
d.addErrback(login_failed)
return d
def login_failed(failure):
return failure
def done(_):
reactor.callLater(0, reactor.stop)
def main():
imap_client = protocol.ClientCreator(reactor, imap4.IMAP4Client)
d = imap_client.connectSSL(settings.EMAIL_GATEWAY_IMAP_SERVER, settings.EMAIL_GATEWAY_IMAP_PORT, ssl.ClientContextFactory())
d.addCallbacks(connected, login_failed)
d.addBoth(done)
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument('recipient', metavar='<recipient>', type=str, nargs='?', default=None,
help="original recipient")
def handle(self, *args, **options):
rcpt_to = os.environ.get("ORIGINAL_RECIPIENT", options['recipient'])
if rcpt_to is not None:
if is_missed_message_address(rcpt_to):
try:
mark_missed_message_address_as_used(rcpt_to)
except ZulipEmailForwardError:
print "5.1.1 Bad destination mailbox address: Bad or expired missed message address."
exit(posix.EX_NOUSER)
else:
try:
extract_and_validate(rcpt_to)
except ZulipEmailForwardError:
print "5.1.1 Bad destination mailbox address: Please use the address specified in your Streams page."
exit(posix.EX_NOUSER)
# Read in the message, at most 25MiB. This is the limit enforced by
# Gmail, which we use here as a decent metric.
message = sys.stdin.read(25*1024*1024)
if len(sys.stdin.read(1)) != 0:
# We're not at EOF, reject large mail.
print "5.3.4 Message too big for system: Max size is 25MiB"
exit(posix.EX_DATAERR)
queue_json_publish(
"email_mirror",
{
"message": message,
"rcpt_to": rcpt_to
},
lambda x: None
)
else:
# We're probably running from cron, try to batch-process mail
if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
print "Please configure the Email Mirror Gateway in your local_settings.py, or specify $ORIGINAL_RECIPIENT if piping a single mail."
exit(1)
reactor.callLater(0, main)
reactor.run()
| apache-2.0 |
openhumanoids/oh-distro | ipab-distro/tests/systemtests/src/exoticaLWR/test.py | 1 | 1072 | from director import transformUtils
import numpy
if ikPlanner.pushToMatlab==True:
print "FAILURE - pushing requests to matlab"
exit()
qT=numpy.array([0, 0, 0, 0, 0, 0, -6.310489698080346e-05, 0.34103086590766907, 3.8130277971504256e-05, 1.4273228645324707, 5.833456089021638e-05, -0.4845042824745178, -3.8867587136337534e-05])
q0=numpy.array([ 0., 0., 0., 0., 0., 0., 0., 0.78539816, 0., 1.57079633, 0., -0.78539816, 0.])
goalFrame = transformUtils.frameFromPositionAndRPY([0.36932988056397303, -0.009998017176602909, 0.8891143571732633], [-1.3262913021702864e-12, 89.99999979432002, -89.99963750134272])
constraintSet = ikPlanner.planEndEffectorGoal(q0, 'left', goalFrame, lockBase=True, lockBack=True)
q=numpy.array(constraintSet.runIk()[0])
ret=constraintSet.runIkTraj()
if ((q-qT).__abs__()>1e-3).any():
print "FAILURE - IK pose incorrect."
exit()
if ret.plan_info[0]!=0:
print "FAILURE - Planner failed."
exit()
# When everything goes all right, report success
with open(os.environ.get('SYSTEMTEST_RESULT_FILE'),'w+') as f:
f.write('1\n')
exit()
| bsd-3-clause |
atsushieno/cerbero | cerbero/tools/libtool.py | 2 | 5912 | #!/usr/bin/env python3
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.enums import Platform
from cerbero.utils import shell
from cerbero.errors import FatalError
def get_libtool_versions(version, soversion=0):
parts = version.split('.')
if not parts or len(parts) > 3:
raise FatalError('Version must contain three or fewer parts: {!r}'
''.format(version))
try:
major = int(parts[0])
minor = 0
micro = 0
if len(parts) > 1:
minor = int(parts[1])
if len(parts) > 2:
micro = int(parts[2])
except ValueError:
raise FatalError('Invalid version: {!r}'.format(version))
interface_age = 0
if (minor % 2) == 0:
interface_age = micro
binary_age = (100 * minor) + micro
return (soversion, binary_age - interface_age, interface_age)
class LibtoolLibrary(object):
'''
Helper class to create libtool libraries files (.la)
'''
LIBTOOL_TPL = '''\
# %(libname)s - a libtool library file
# Generated by libtool (GNU libtool) 2.4.2 Debian-2.4.2-1ubuntu1
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='%(dlname)s'
# Names of this library.
library_names='%(library_names)s'
# The name of the static archive.
old_library='%(old_library)s'
# Linker flags that can not go in dependency_libs.
inherited_linker_flags=''
# Libraries that this one depends upon.
dependency_libs='%(dependency_libs)s'
# Names of additional weak libraries provided by this library
weak_library_names=''
# Version information for libglib-2.0.
current=%(current)s
age=%(age)s
revision=%(revision)s
# Is this an already installed library?
installed=yes
# Should we warn about portability when linking against -modules?
shouldnotlink=no
# Files to dlopen/dlpreopen
dlopen=''
dlpreopen=''
# Directory that this library needs to be installed in:
libdir='%(libdir)s'
'''
def __init__(self, libname, major, minor, micro, libdir, platform,
deps=None, static_only=False):
self.libtool_vars = {
'libname': '',
'dlname': '',
'library_names': '',
'old_library': '',
'dependency_libs': '',
'current': '',
'age': '',
'revision': '',
'libdir': ''}
if platform == Platform.WINDOWS:
shared_ext = 'dll.a'
elif platform in [Platform.DARWIN, Platform.IOS]:
shared_ext = 'dylib'
else:
shared_ext = 'so'
if not libname.startswith('lib'):
libname = 'lib%s' % libname
if deps is None:
deps = ''
self.libname = libname
self.libdir = libdir
self.laname = '%s.la' % libname
dlname_base = '%s.%s' % (libname, shared_ext)
dlname = dlname_base
dlname_all = dlname_base
major_str = ''
minor_str = ''
micro_str = ''
if major is not None:
dlname = '%s.%s' % (dlname_base, major)
major_str = major
if minor is not None:
dlname_all = '%s.%s' % (dlname, minor)
minor_str = minor
if micro is not None:
dlname_all = '%s.%s' % (dlname_all, micro)
micro_str = micro
old_library = '%s.a' % libname
self.change_value('libname', self.laname)
if not static_only:
self.change_value('dlname', dlname)
self.change_value('library_names', '%s %s %s' % (dlname_all, dlname,
dlname_base))
self.change_value('old_library', old_library)
self.change_value('current', minor_str)
self.change_value('age', minor_str)
self.change_value('revision', micro_str)
self.change_value('libdir', libdir)
self.change_value('dependency_libs', self._parse_deps(deps))
def save(self):
path = os.path.join(self.libdir, self.laname)
if shell.DRY_RUN:
print('Creating {}'.format(path))
return
with open(path, 'w') as f:
f.write(self.LIBTOOL_TPL % self.libtool_vars)
def change_value(self, key, val):
self.libtool_vars[key] = val
def _parse_deps(self, deps):
# FIXME: these deps need to be resolved recursively since the list of
# dependency_libs in .la files are exhaustive. For now, recipes are
# handling that.
deps_str = ''
libtool_deps = [x for x in deps if not x.startswith('-l')]
lib_deps = [x for x in deps if x.startswith('-l')]
for d in libtool_deps:
dep_dir, dep_name = os.path.split(d)
if dep_dir:
# we already have a prepended path
deps_str += ' ' + d + '.la '
else:
if not d.startswith('lib'):
d = 'lib' + d
deps_str += ' %s/%s.la ' % (self.libdir, d)
deps_str += ' '.join(lib_deps)
return deps_str
| lgpl-2.1 |
raspberrypi360/python_games | hacker_rank/join_circlesUT.py | 1 | 1942 | import unittest
import join_circles
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testStock(self):
self.assertEqual(18, join_circles.numCombos([1,2,3,4], 3))
self.assertEqual(180, join_circles.numCombos([1,2,3,4,5], 3))
s, num = join_circles.getProduct([1,2,3,4,5], 1)
self.assertEqual(720, s)
s, num = join_circles.getProduct([1,2,3,4,5], 2)
self.assertEqual(5850, s)
s, num = join_circles.getProduct([1,2,3,4,5], 3)
self.assertEqual(25200, s)
k=6
self.assertEqual(150, join_circles.numCombos([i+1 for i in range(k)], 2))
self.assertEqual(900, join_circles.numCombos([i+1 for i in range(k)], 3))
self.assertEqual(2700, join_circles.numCombos([i+1 for i in range(k)], 4))
s, num = join_circles.getProduct([i+1 for i in range(k)], 2)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 2), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 3)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 3), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 4)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 4), s)
k=7
s, num = join_circles.getProduct([i+1 for i in range(k)], 2)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 2), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 3)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 3), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 4)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 4), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 5)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 5), s)
if __name__ == "__main__":
unittest.main() | gpl-2.0 |
GauravSahu/odoo | addons/product/tests/test_pricelist.py | 280 | 6748 | from openerp.tests.common import TransactionCase
class TestPricelist(TransactionCase):
"""Tests for unit of measure conversion"""
def setUp(self):
super(TestPricelist, self).setUp()
cr, uid, context = self.cr, self.uid, {}
self.ir_model_data = self.registry('ir.model.data')
self.product_product = self.registry('product.product')
self.product_pricelist = self.registry('product.pricelist')
self.uom = self.registry('product.uom')
self.usb_adapter_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_48')[1]
self.datacard_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_46')[1]
self.unit_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
self.dozen_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_uom_dozen')[1]
self.tonne_id = self.ir_model_data.xmlid_to_res_id(cr, uid, 'product.product_uom_ton')
self.kg_id = self.ir_model_data.xmlid_to_res_id(cr, uid, 'product.product_uom_kgm')
self.public_pricelist_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'list0')[1]
self.sale_pricelist_id = self.product_pricelist.create(cr, uid, {
'name': 'Sale pricelist',
'type': 'sale',
'version_id': [(0, 0, {
'name': 'v1.0',
'items_id': [(0, 0, {
'name': 'Discount 10%',
'base': 1, # based on public price
'price_discount': -0.1,
'product_id': self.usb_adapter_id
}), (0, 0, {
'name': 'Discount -0.5',
'base': 1, # based on public price
'price_surcharge': -0.5,
'product_id': self.datacard_id
})]
})]
}, context=context)
def test_10_discount(self):
# Make sure the price using a pricelist is the same than without after
# applying the computation manually
cr, uid, context = self.cr, self.uid, {}
public_context = dict(context, pricelist=self.public_pricelist_id)
pricelist_context = dict(context, pricelist=self.sale_pricelist_id)
usb_adapter_without_pricelist = self.product_product.browse(cr, uid, self.usb_adapter_id, context=public_context)
usb_adapter_with_pricelist = self.product_product.browse(cr, uid, self.usb_adapter_id, context=pricelist_context)
self.assertEqual(usb_adapter_with_pricelist.price, usb_adapter_without_pricelist.price*0.9)
datacard_without_pricelist = self.product_product.browse(cr, uid, self.datacard_id, context=public_context)
datacard_with_pricelist = self.product_product.browse(cr, uid, self.datacard_id, context=pricelist_context)
self.assertEqual(datacard_with_pricelist.price, datacard_without_pricelist.price-0.5)
# Make sure that changing the unit of measure does not break the unit
# price (after converting)
unit_context = dict(context,
pricelist=self.sale_pricelist_id,
uom=self.unit_id)
dozen_context = dict(context,
pricelist=self.sale_pricelist_id,
uom=self.dozen_id)
usb_adapter_unit = self.product_product.browse(cr, uid, self.usb_adapter_id, context=unit_context)
usb_adapter_dozen = self.product_product.browse(cr, uid, self.usb_adapter_id, context=dozen_context)
self.assertAlmostEqual(usb_adapter_unit.price*12, usb_adapter_dozen.price)
datacard_unit = self.product_product.browse(cr, uid, self.datacard_id, context=unit_context)
datacard_dozen = self.product_product.browse(cr, uid, self.datacard_id, context=dozen_context)
# price_surcharge applies to product default UoM, here "Units", so surcharge will be multiplied
self.assertAlmostEqual(datacard_unit.price*12, datacard_dozen.price)
def test_20_pricelist_uom(self):
# Verify that the pricelist rules are correctly using the product's default UoM
# as reference, and return a result according to the target UoM (as specific in the context)
cr, uid = self.cr, self.uid
kg, tonne = self.kg_id, self.tonne_id
tonne_price = 100
# make sure 'tonne' resolves down to 1 'kg'.
self.uom.write(cr, uid, tonne, {'rounding': 0.001})
# setup product stored in 'tonnes', with a discounted pricelist for qty > 3 tonnes
spam_id = self.product_product.copy(cr, uid, self.usb_adapter_id,
{ 'name': '1 tonne of spam',
'uom_id': self.tonne_id,
'uos_id': self.tonne_id,
'uom_po_id': self.tonne_id,
'list_price': tonne_price,
})
pricelist_version_id = self.ir_model_data.xmlid_to_res_id(cr, uid, 'product.ver0')
self.registry('product.pricelist.item').create(cr, uid,
{ 'price_version_id': pricelist_version_id,
'sequence': 10,
'name': '3+ tonnes: -10 EUR discount/t',
'base': 1, # based on public price
'min_quantity': 3, # min = 3 tonnes
'price_surcharge': -10, # -10 EUR / tonne
'product_id': spam_id,
})
pricelist_id = self.public_pricelist_id
def test_unit_price(qty, uom, expected_unit_price):
unit_price = self.registry('product.pricelist').price_get(cr, uid, [pricelist_id],
spam_id, qty,
context={'uom': uom})[pricelist_id]
self.assertAlmostEqual(unit_price, expected_unit_price, msg='Computed unit price is wrong')
# Test prices - they are *per unit*, the quantity is only here to match the pricelist rules!
test_unit_price(2, kg, tonne_price / 1000.0)
test_unit_price(2000, kg, tonne_price / 1000.0)
test_unit_price(3500, kg, (tonne_price - 10) / 1000.0)
test_unit_price(2, tonne, tonne_price)
test_unit_price(3, tonne, tonne_price - 10)
| agpl-3.0 |
vrieni/orange | Orange/OrangeWidgets/Unsupervised/OWPCA.py | 6 | 21788 | """
<name>PCA</name>
<description>Perform Principal Component Analysis</description>
<contact>ales.erjavec(@ at @)fri.uni-lj.si</contact>
<icon>icons/PCA.svg</icon>
<tags>pca,principal,component,projection</tags>
<priority>3050</priority>
"""
import sys
import numpy as np
from PyQt4.Qwt5 import QwtPlot, QwtPlotCurve, QwtSymbol
from PyQt4.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
import Orange
import Orange.projection.linear as plinear
from OWWidget import *
from OWGraph import OWGraph
import OWGUI
def plot_curve(title=None, pen=None, brush=None, style=QwtPlotCurve.Lines,
symbol=QwtSymbol.Ellipse, legend=True, antialias=True,
auto_scale=True, xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
curve = QwtPlotCurve(title or "")
return configure_curve(curve, pen=pen, brush=brush, style=style,
symbol=symbol, legend=legend, antialias=antialias,
auto_scale=auto_scale, xaxis=xaxis, yaxis=yaxis)
def configure_curve(curve, title=None, pen=None, brush=None,
style=QwtPlotCurve.Lines, symbol=QwtSymbol.Ellipse,
legend=True, antialias=True, auto_scale=True,
xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
if title is not None:
curve.setTitle(title)
if pen is not None:
curve.setPen(pen)
if brush is not None:
curve.setBrush(brush)
if not isinstance(symbol, QwtSymbol):
symbol_ = QwtSymbol()
symbol_.setStyle(symbol)
symbol = symbol_
curve.setStyle(style)
curve.setSymbol(QwtSymbol(symbol))
curve.setRenderHint(QwtPlotCurve.RenderAntialiased, antialias)
curve.setItemAttribute(QwtPlotCurve.Legend, legend)
curve.setItemAttribute(QwtPlotCurve.AutoScale, auto_scale)
curve.setAxis(xaxis, yaxis)
return curve
class PlotTool(QObject):
"""
A base class for Plot tools that operate on QwtPlot's canvas
widget by installing itself as its event filter.
"""
cursor = Qt.ArrowCursor
def __init__(self, parent=None, graph=None):
QObject.__init__(self, parent)
self.__graph = None
self.__oldCursor = None
self.setGraph(graph)
def setGraph(self, graph):
"""
Install this tool to operate on ``graph``.
"""
if self.__graph is graph:
return
if self.__graph is not None:
self.uninstall(self.__graph)
self.__graph = graph
if graph is not None:
self.install(graph)
def graph(self):
return self.__graph
def install(self, graph):
canvas = graph.canvas()
canvas.setMouseTracking(True)
canvas.installEventFilter(self)
canvas.destroyed.connect(self.__on_destroyed)
self.__oldCursor = canvas.cursor()
canvas.setCursor(self.cursor)
def uninstall(self, graph):
canvas = graph.canvas()
canvas.removeEventFilter(self)
canvas.setCursor(self.__oldCursor)
canvas.destroyed.disconnect(self.__on_destroyed)
self.__oldCursor = None
def eventFilter(self, obj, event):
if obj is self.__graph.canvas():
return self.canvasEvent(event)
return False
def canvasEvent(self, event):
"""
Main handler for a canvas events.
"""
if event.type() == QEvent.MouseButtonPress:
return self.mousePressEvent(event)
elif event.type() == QEvent.MouseButtonRelease:
return self.mouseReleaseEvent(event)
elif event.type() == QEvent.MouseButtonDblClick:
return self.mouseDoubleClickEvent(event)
elif event.type() == QEvent.MouseMove:
return self.mouseMoveEvent(event)
elif event.type() == QEvent.Leave:
return self.leaveEvent(event)
elif event.type() == QEvent.Enter:
return self.enterEvent(event)
return False
# These are actually event filters (note the return values)
def mousePressEvent(self, event):
return False
def mouseMoveEvent(self, event):
return False
def mouseReleaseEvent(self, event):
return False
def mouseDoubleClickEvent(self, event):
return False
def enterEvent(self, event):
return False
def leaveEvent(self, event):
return False
def keyPressEvent(self, event):
return False
def transform(self, point, xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
"""
Transform a QPointF from plot coordinates to canvas local coordinates.
"""
x = self.__graph.transform(xaxis, point.x())
y = self.__graph.transform(yaxis, point.y())
return QPoint(x, y)
def invTransform(self, point, xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
"""
Transform a QPoint from canvas local coordinates to plot coordinates.
"""
x = self.__graph.invTransform(xaxis, point.x())
y = self.__graph.invTransform(yaxis, point.y())
return QPointF(x, y)
@Slot()
def __on_destroyed(self, obj):
obj.removeEventFilter(self)
class CutoffControler(PlotTool):
class CutoffCurve(QwtPlotCurve):
pass
cutoffChanged = Signal(float)
cutoffMoved = Signal(float)
cutoffPressed = Signal()
cutoffReleased = Signal()
NoState, Drag = 0, 1
def __init__(self, parent=None, graph=None):
self.__curve = None
self.__range = (0, 1)
self.__cutoff = 0
super(CutoffControler, self).__init__(parent, graph)
self._state = self.NoState
def install(self, graph):
super(CutoffControler, self).install(graph)
assert self.__curve is None
self.__curve = CutoffControler.CutoffCurve("")
configure_curve(self.__curve, symbol=QwtSymbol.NoSymbol, legend=False)
self.__curve.setData([self.__cutoff, self.__cutoff], [0.0, 1.0])
self.__curve.attach(graph)
def uninstall(self, graph):
super(CutoffControler, self).uninstall(graph)
self.__curve.detach()
self.__curve = None
def _toRange(self, value):
minval, maxval = self.__range
return max(min(value, maxval), minval)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
cut = self.invTransform(event.pos()).x()
self.setCutoff(cut)
self.cutoffPressed.emit()
self._state = self.Drag
return True
def mouseMoveEvent(self, event):
if self._state == self.Drag:
cut = self._toRange(self.invTransform(event.pos()).x())
self.setCutoff(cut)
self.cutoffMoved.emit(cut)
else:
cx = self.transform(QPointF(self.cutoff(), 0)).x()
if abs(cx - event.pos().x()) < 2:
self.graph().canvas().setCursor(Qt.SizeHorCursor)
else:
self.graph().canvas().setCursor(self.cursor)
return True
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton and self._state == self.Drag:
cut = self._toRange(self.invTransform(event.pos()).x())
self.setCutoff(cut)
self.cutoffReleased.emit()
self._state = self.NoState
return True
def setCutoff(self, cutoff):
minval, maxval = self.__range
cutoff = max(min(cutoff, maxval), minval)
if self.__cutoff != cutoff:
self.__cutoff = cutoff
if self.__curve is not None:
self.__curve.setData([cutoff, cutoff], [0.0, 1.0])
self.cutoffChanged.emit(cutoff)
if self.graph() is not None:
self.graph().replot()
def cutoff(self):
return self.__cutoff
def setRange(self, minval, maxval):
maxval = max(minval, maxval)
if self.__range != (minval, maxval):
self.__range = (minval, maxval)
self.setCutoff(max(min(self.cutoff(), maxval), minval))
class Graph(OWGraph):
def __init__(self, *args, **kwargs):
super(Graph, self).__init__(*args, **kwargs)
self.gridCurve.attach(self)
# bypass the OWGraph event handlers
def mousePressEvent(self, event):
QwtPlot.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
QwtPlot.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
QwtPlot.mouseReleaseEvent(self, event)
class OWPCA(OWWidget):
settingsList = ["standardize", "max_components", "variance_covered",
"use_generalized_eigenvectors", "auto_commit"]
def __init__(self, parent=None, signalManager=None, title="PCA"):
OWWidget.__init__(self, parent, signalManager, title, wantGraph=True)
self.inputs = [("Input Data", Orange.data.Table, self.set_data)]
self.outputs = [("Transformed Data", Orange.data.Table, Default),
("Eigen Vectors", Orange.data.Table)]
self.standardize = True
self.max_components = 0
self.variance_covered = 100.0
self.use_generalized_eigenvectors = False
self.auto_commit = False
self.loadSettings()
self.data = None
self.changed_flag = False
#####
# GUI
#####
grid = QGridLayout()
box = OWGUI.widgetBox(self.controlArea, "Components Selection",
orientation=grid)
label1 = QLabel("Max components", box)
grid.addWidget(label1, 1, 0)
sb1 = OWGUI.spin(box, self, "max_components", 0, 1000,
tooltip="Maximum number of components",
callback=self.on_update,
addToLayout=False,
keyboardTracking=False
)
self.max_components_spin = sb1.control
self.max_components_spin.setSpecialValueText("All")
grid.addWidget(sb1.control, 1, 1)
label2 = QLabel("Variance covered", box)
grid.addWidget(label2, 2, 0)
sb2 = OWGUI.doubleSpin(box, self, "variance_covered", 1.0, 100.0, 1.0,
tooltip="Percent of variance covered.",
callback=self.on_update,
decimals=1,
addToLayout=False,
keyboardTracking=False
)
sb2.control.setSuffix("%")
grid.addWidget(sb2.control, 2, 1)
OWGUI.rubber(self.controlArea)
box = OWGUI.widgetBox(self.controlArea, "Commit")
cb = OWGUI.checkBox(box, self, "auto_commit", "Commit on any change")
b = OWGUI.button(box, self, "Commit",
callback=self.update_components)
OWGUI.setStopper(self, b, cb, "changed_flag", self.update_components)
self.plot = Graph()
canvas = self.plot.canvas()
canvas.setFrameStyle(QFrame.StyledPanel)
self.mainArea.layout().addWidget(self.plot)
self.plot.setAxisTitle(QwtPlot.yLeft, "Proportion of Variance")
self.plot.setAxisTitle(QwtPlot.xBottom, "Principal Components")
self.plot.setAxisScale(QwtPlot.yLeft, 0.0, 1.0)
self.plot.enableGridXB(True)
self.plot.enableGridYL(True)
self.plot.setGridColor(Qt.lightGray)
self.variance_curve = plot_curve(
"Variance",
pen=QPen(Qt.red, 2),
symbol=QwtSymbol.NoSymbol,
xaxis=QwtPlot.xBottom,
yaxis=QwtPlot.yLeft
)
self.cumulative_variance_curve = plot_curve(
"Cumulative Variance",
pen=QPen(Qt.darkYellow, 2),
symbol=QwtSymbol.NoSymbol,
xaxis=QwtPlot.xBottom,
yaxis=QwtPlot.yLeft
)
self.variance_curve.attach(self.plot)
self.cumulative_variance_curve.attach(self.plot)
self.selection_tool = CutoffControler(parent=self.plot.canvas())
self.selection_tool.cutoffMoved.connect(self.on_cutoff_moved)
self.graphButton.clicked.connect(self.saveToFile)
self.components = None
self.variances = None
self.variances_sum = None
self.projector_full = None
self.currently_selected = 0
self.resize(800, 400)
def clear(self):
"""
Clear (reset) the widget state.
"""
self.data = None
self.selection_tool.setGraph(None)
self.clear_cached()
self.variance_curve.setVisible(False)
self.cumulative_variance_curve.setVisible(False)
def clear_cached(self):
"""Clear cached components
"""
self.components = None
self.variances = None
self.variances_cumsum = None
self.projector_full = None
self.currently_selected = 0
def set_data(self, data=None):
"""Set the widget input data.
"""
self.clear()
if data is not None:
self.data = data
self.on_change()
else:
self.send("Transformed Data", None)
self.send("Eigen Vectors", None)
def on_change(self):
"""Data has changed and we need to recompute the projection.
"""
if self.data is None:
return
self.clear_cached()
self.apply()
def on_update(self):
"""Component selection was changed by the user.
"""
if self.data is None:
return
self.update_cutoff_curve()
if self.currently_selected != self.number_of_selected_components():
self.update_components_if()
def construct_pca_all_comp(self):
pca = plinear.PCA(standardize=self.standardize,
max_components=0,
variance_covered=1,
use_generalized_eigenvectors=self.use_generalized_eigenvectors
)
return pca
def construct_pca(self):
max_components = self.max_components
variance_covered = self.variance_covered
pca = plinear.PCA(standardize=self.standardize,
max_components=max_components,
variance_covered=variance_covered / 100.0,
use_generalized_eigenvectors=self.use_generalized_eigenvectors
)
return pca
def apply(self):
"""
Apply PCA on input data, caching the full projection and
updating the selected components.
"""
pca = self.construct_pca_all_comp()
self.projector_full = pca(self.data)
self.variances = self.projector_full.variances
self.variances /= np.sum(self.variances)
self.variances_cumsum = np.cumsum(self.variances)
self.max_components_spin.setRange(0, len(self.variances))
self.max_components = min(self.max_components,
len(self.variances) - 1)
self.update_scree_plot()
self.update_cutoff_curve()
self.update_components_if()
def update_components_if(self):
if self.auto_commit:
self.update_components()
else:
self.changed_flag = True
def update_components(self):
"""Update the output components.
"""
if self.data is None:
return
scale = self.projector_full.scale
center = self.projector_full.center
components = self.projector_full.projection
input_domain = self.projector_full.input_domain
variances = self.projector_full.variances
# Get selected components (based on max_components and
# variance_coverd)
pca = self.construct_pca()
variances, components, variance_sum = pca._select_components(variances, components)
projector = plinear.PcaProjector(input_domain=input_domain,
standardize=self.standardize,
scale=scale,
center=center,
projection=components,
variances=variances,
variance_sum=variance_sum)
projected_data = projector(self.data)
append_metas(projected_data, self.data)
eigenvectors = self.eigenvectors_as_table(components)
self.currently_selected = self.number_of_selected_components()
self.send("Transformed Data", projected_data)
self.send("Eigen Vectors", eigenvectors)
self.changed_flag = False
def eigenvectors_as_table(self, U):
features = [Orange.feature.Continuous("C%i" % i) \
for i in range(1, U.shape[1] + 1)]
domain = Orange.data.Domain(features, False)
return Orange.data.Table(domain, [list(v) for v in U])
def update_scree_plot(self):
x_space = np.arange(0, len(self.variances))
self.plot.enableAxis(QwtPlot.xBottom, True)
self.plot.enableAxis(QwtPlot.yLeft, True)
if len(x_space) <= 5:
self.plot.setXlabels(["PC" + str(i + 1) for i in x_space])
else:
# Restore continuous plot scale
# TODO: disable minor ticks
self.plot.setXlabels(None)
self.variance_curve.setData(x_space, self.variances)
self.cumulative_variance_curve.setData(x_space, self.variances_cumsum)
self.variance_curve.setVisible(True)
self.cumulative_variance_curve.setVisible(True)
self.selection_tool.setRange(0, len(self.variances) - 1)
self.selection_tool.setGraph(self.plot)
self.plot.replot()
def on_cutoff_moved(self, value):
"""Cutoff curve was moved by the user.
"""
components = int(np.floor(value)) + 1
# Did the number of components actually change
self.max_components = components
self.variance_covered = self.variances_cumsum[components - 1] * 100
if self.currently_selected != self.number_of_selected_components():
self.update_components_if()
def update_cutoff_curve(self):
"""Update cutoff curve from 'Components Selection' control box.
"""
if self.max_components == 0:
# Special "All" value
max_components = len(self.variances_cumsum)
else:
max_components = self.max_components
variance = self.variances_cumsum[max_components - 1] * 100.0
if variance < self.variance_covered:
cutoff = max_components - 1
else:
cutoff = np.searchsorted(self.variances_cumsum,
self.variance_covered / 100.0)
self.selection_tool.setCutoff(float(cutoff + 0.5))
def number_of_selected_components(self):
"""How many components are selected.
"""
if self.data is None:
return 0
variance_components = np.searchsorted(self.variances_cumsum,
self.variance_covered / 100.0)
if self.max_components == 0:
# Special "All" value
max_components = len(self.variances_cumsum)
else:
max_components = self.max_components
return min(variance_components + 1, max_components)
def sendReport(self):
self.reportSettings("PCA Settings",
[("Max. components", self.max_components),
("Variance covered", "%i%%" % self.variance_covered),
])
if self.data is not None and self.projector_full:
output_domain = self.projector_full.output_domain
st_dev = np.sqrt(self.projector_full.variances)
summary = [[""] + [a.name for a in output_domain.attributes],
["Std. deviation"] + ["%.3f" % sd for sd in st_dev],
["Proportion Var"] + ["%.3f" % v for v in self.variances * 100.0],
["Cumulative Var"] + ["%.3f" % v for v in self.variances_cumsum * 100.0]
]
th = "<th>%s</th>".__mod__
header = "".join(map(th, summary[0]))
td = "<td>%s</td>".__mod__
summary = ["".join(map(td, row)) for row in summary[1:]]
tr = "<tr>%s</tr>".__mod__
summary = "\n".join(map(tr, [header] + summary))
summary = "<table>\n%s\n</table>" % summary
self.reportSection("Summary")
self.reportRaw(summary)
self.reportSection("Scree Plot")
self.reportImage(self.plot.saveToFileDirect)
def saveToFile(self):
self.plot.saveToFile()
def append_metas(dest, source):
"""
Append all meta attributes from the `source` table to `dest` table.
The tables must be of the same length.
:param dest:
An data table into which the meta values will be copied.
:type dest: :class:`Orange.data.Table`
:param source:
A data table with the meta attributes/values to be copied into `dest`.
:type source: :class:`Orange.data.Table`
"""
if len(dest) != len(source):
raise ValueError("'dest' and 'source' must have the same length.")
dest.domain.add_metas(source.domain.get_metas())
for dest_inst, source_inst in zip(dest, source):
for meta_id, val in source_inst.get_metas().items():
dest_inst[meta_id] = val
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWPCA()
data = Orange.data.Table("iris")
w.set_data(data)
w.show()
w.set_data(Orange.data.Table("brown-selected"))
app.exec_()
| gpl-3.0 |
redhat-openstack/horizon | openstack_dashboard/test/test_data/utils.py | 14 | 4574 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def load_test_data(load_onto=None):
from openstack_dashboard.test.test_data import ceilometer_data
from openstack_dashboard.test.test_data import cinder_data
from openstack_dashboard.test.test_data import exceptions
from openstack_dashboard.test.test_data import glance_data
from openstack_dashboard.test.test_data import heat_data
from openstack_dashboard.test.test_data import keystone_data
from openstack_dashboard.test.test_data import neutron_data
from openstack_dashboard.test.test_data import nova_data
from openstack_dashboard.test.test_data import swift_data
# The order of these loaders matters, some depend on others.
loaders = (
exceptions.data,
keystone_data.data,
glance_data.data,
nova_data.data,
cinder_data.data,
neutron_data.data,
swift_data.data,
heat_data.data,
ceilometer_data.data,
)
if load_onto:
for data_func in loaders:
data_func(load_onto)
return load_onto
else:
return TestData(*loaders)
class TestData(object):
"""Holder object for test data. Any functions passed to the init method
will be called with the ``TestData`` object as their only argument. They
can then load data onto the object as desired.
The idea is to use the instantiated object like this::
>>> import glance_data
>>> TEST = TestData(glance_data.data)
>>> TEST.images.list()
[<Image: visible_image>, <Image: invisible_image>]
>>> TEST.images.first()
<Image: visible_image>
You can load as little or as much data as you like as long as the loaders
don't conflict with each other.
See the
:class:`~openstack_dashboard.test.test_data.utils.TestDataContainer`
class for a list of available methods.
"""
def __init__(self, *args):
for data_func in args:
data_func(self)
class TestDataContainer(object):
"""A container for test data objects.
The behavior of this class is meant to mimic a "manager" class, which
has convenient shortcuts for common actions like "list", "filter", "get",
and "add".
"""
def __init__(self):
self._objects = []
def add(self, *args):
"""Add a new object to this container.
Generally this method should only be used during data loading, since
adding data during a test can affect the results of other tests.
"""
for obj in args:
if obj not in self._objects:
self._objects.append(obj)
def list(self):
"""Returns a list of all objects in this container."""
return self._objects
def filter(self, filtered=None, **kwargs):
"""Returns objects in this container whose attributes match the given
keyword arguments.
"""
if filtered is None:
filtered = self._objects
try:
key, value = kwargs.popitem()
except KeyError:
# We're out of filters, return
return filtered
def get_match(obj):
return hasattr(obj, key) and getattr(obj, key) == value
filtered = [obj for obj in filtered if get_match(obj)]
return self.filter(filtered=filtered, **kwargs)
def get(self, **kwargs):
"""Returns the single object in this container whose attributes match
the given keyword arguments. An error will be raised if the arguments
provided don't return exactly one match.
"""
matches = self.filter(**kwargs)
if not matches:
raise Exception("No matches found.")
elif len(matches) > 1:
raise Exception("Multiple matches found.")
else:
return matches.pop()
def first(self):
"""Returns the first object from this container."""
return self._objects[0]
def count(self):
return len(self._objects)
| apache-2.0 |
dbentley/pants | contrib/android/tests/python/pants_test/contrib/android/tasks/test_aapt_gen_integration.py | 14 | 4947 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import unittest
from pants_test.contrib.android.android_integration_test import AndroidIntegrationTest
class AaptGenIntegrationTest(AndroidIntegrationTest):
"""Integration test for AaptGen
The Android SDK is modular, finding an SDK on the PATH is no guarantee that there is
a particular aapt binary on disk. The TOOLS are the ones required by the target in 'test_aapt_gen'
method. If you add a target, you may need to expand the TOOLS list and perhaps define new
BUILD_TOOLS or TARGET_SDK class variables.
"""
TOOLS = [
os.path.join('build-tools', AndroidIntegrationTest.BUILD_TOOLS, 'aapt'),
os.path.join('platforms', 'android-' + AndroidIntegrationTest.TARGET_SDK, 'android.jar')
]
tools = AndroidIntegrationTest.requirements(TOOLS)
def aapt_gen_test(self, target):
pants_run = self.run_pants(['gen', target])
self.assert_success(pants_run)
@unittest.skipUnless(tools, reason='Android integration test requires tools {0!r} '
'and ANDROID_HOME set in path.'.format(TOOLS))
def test_aapt_gen(self):
self.aapt_gen_test(AndroidIntegrationTest.TEST_TARGET)
@unittest.skipUnless(tools, reason='Android integration test requires tools {0!r} '
'and ANDROID_HOME set in path.'.format(TOOLS))
# TODO(mateor) Write a testproject instead of using hello_with_library which may change.
def test_android_library_dep(self):
# Doing the work under a tempdir gives us a handle for the workdir and guarantees a clean build.
with self.temporary_workdir() as workdir:
spec = 'contrib/android/examples/src/android/hello_with_library:'
pants_run = self.run_pants_with_workdir(['gen', '-ldebug', spec], workdir)
self.assert_success(pants_run)
# Make sure that the R.java was produced for the binary and its library dependency.
lib_file = 'gen/aapt/21/org/pantsbuild/examples/example_library/R.java'
apk_file = 'gen/aapt/21/org/pantsbuild/examples/hello_with_library/R.java'
self.assertTrue(os.path.isfile(os.path.join(workdir, lib_file)))
self.assertTrue(os.path.isfile(os.path.join(workdir, apk_file)))
# Scrape debug statements.
def find_aapt_blocks(lines):
for line in lines:
if re.search(r'Executing: .*?\baapt', line):
yield line
aapt_blocks = list(find_aapt_blocks(pants_run.stderr_data.split('\n')))
# Pulling in google-play-services-v21 from the SDK brings in 20 .aar libraries of which only 6
# have resources. Add 2 for android_binary and android_library targets = 8 total invocations.
self.assertEquals(len(aapt_blocks), 8, 'Expected eight invocations of the aapt tool!'
'(was :{})\n{}'.format(len(aapt_blocks),
pants_run.stderr_data))
# Check to make sure the resources are being passed in correct order (apk->libs).
for line in aapt_blocks:
apk = re.search(r'hello_with_library.*?\b', line)
library = re.search(r'contrib/android/examples/src/android/example_library/AndroidManifest.*?\b', line)
resource_dirs = re.findall(r'-S ([^\s]+)', line)
if apk:
# The order of resource directories should mirror the dependencies. The dependency order
# is hello_with_library -> example_library -> gms-library.
self.assertEqual(resource_dirs[0], 'contrib/android/examples/src/android/hello_with_library/main/res')
self.assertEqual(resource_dirs[1], 'contrib/android/examples/src/android/example_library/res')
self.assertEqual(len(resource_dirs), 8, 'Expected eight resource dirs to be included '
'when calling aapt on hello_with_library apk. '
'(was: {})\n'.format(len(resource_dirs)))
elif library:
# The seven invocations are the example_library and the 6 gms dependencies.
self.assertEqual(len(resource_dirs), 7, 'Expected seven resource dir to be included '
'when calling aapt on example_library dep. '
'(was: {})\n'.format(len(resource_dirs)))
else:
self.assertEqual(len(resource_dirs), 1, 'Expected one resource dir to be included when '
'calling aapt on each gms-library dep. '
'(was: {})\n'.format(len(resource_dirs)))
| apache-2.0 |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/sqlalchemy/event/registry.py | 77 | 7468 | # event/registry.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides managed registration services on behalf of :func:`.listen`
arguments.
By "managed registration", we mean that event listening functions and
other objects can be added to various collections in such a way that their
membership in all those collections can be revoked at once, based on
an equivalent :class:`._EventKey`.
"""
from __future__ import absolute_import
import weakref
import collections
import types
from .. import exc, util
_key_to_collection = collections.defaultdict(dict)
"""
Given an original listen() argument, can locate all
listener collections and the listener fn contained
(target, identifier, fn) -> {
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
}
"""
_collection_to_key = collections.defaultdict(dict)
"""
Given a _ListenerCollection or _DispatchDescriptor, can locate
all the original listen() arguments and the listener fn contained
ref(listenercollection) -> {
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
}
"""
def _collection_gced(ref):
# defaultdict, so can't get a KeyError
if not _collection_to_key or ref not in _collection_to_key:
return
listener_to_key = _collection_to_key.pop(ref)
for key in listener_to_key.values():
if key in _key_to_collection:
# defaultdict, so can't get a KeyError
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(ref)
if not dispatch_reg:
_key_to_collection.pop(key)
def _stored_in_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
owner_ref = owner.ref
listen_ref = weakref.ref(event_key._listen_fn)
if owner_ref in dispatch_reg:
assert dispatch_reg[owner_ref] == listen_ref
else:
dispatch_reg[owner_ref] = listen_ref
listener_to_key = _collection_to_key[owner_ref]
listener_to_key[listen_ref] = key
def _removed_from_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
listen_ref = weakref.ref(event_key._listen_fn)
owner_ref = owner.ref
dispatch_reg.pop(owner_ref, None)
if not dispatch_reg:
del _key_to_collection[key]
if owner_ref in _collection_to_key:
listener_to_key = _collection_to_key[owner_ref]
listener_to_key.pop(listen_ref)
def _stored_in_collection_multi(newowner, oldowner, elements):
if not elements:
return
oldowner = oldowner.ref
newowner = newowner.ref
old_listener_to_key = _collection_to_key[oldowner]
new_listener_to_key = _collection_to_key[newowner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = old_listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
if newowner in dispatch_reg:
assert dispatch_reg[newowner] == listen_ref
else:
dispatch_reg[newowner] = listen_ref
new_listener_to_key[listen_ref] = key
def _clear(owner, elements):
if not elements:
return
owner = owner.ref
listener_to_key = _collection_to_key[owner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(owner, None)
if not dispatch_reg:
del _key_to_collection[key]
class _EventKey(object):
"""Represent :func:`.listen` arguments.
"""
def __init__(self, target, identifier, fn, dispatch_target, _fn_wrap=None):
self.target = target
self.identifier = identifier
self.fn = fn
if isinstance(fn, types.MethodType):
self.fn_key = id(fn.__func__), id(fn.__self__)
else:
self.fn_key = id(fn)
self.fn_wrap = _fn_wrap
self.dispatch_target = dispatch_target
@property
def _key(self):
return (id(self.target), self.identifier, self.fn_key)
def with_wrapper(self, fn_wrap):
if fn_wrap is self._listen_fn:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
self.dispatch_target,
_fn_wrap=fn_wrap
)
def with_dispatch_target(self, dispatch_target):
if dispatch_target is self.dispatch_target:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
dispatch_target,
_fn_wrap=self.fn_wrap
)
def listen(self, *args, **kw):
once = kw.pop("once", False)
if once:
self.with_wrapper(util.only_once(self._listen_fn)).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
def remove(self):
key = self._key
if key not in _key_to_collection:
raise exc.InvalidRequestError(
"No listeners found for event %s / %r / %s " %
(self.target, self.identifier, self.fn)
)
dispatch_reg = _key_to_collection.pop(key)
for collection_ref, listener_ref in dispatch_reg.items():
collection = collection_ref()
listener_fn = listener_ref()
if collection is not None and listener_fn is not None:
collection.remove(self.with_wrapper(listener_fn))
def contains(self):
"""Return True if this event key is registered to listen.
"""
return self._key in _key_to_collection
def base_listen(self, propagate=False, insert=False,
named=False):
target, identifier, fn = \
self.dispatch_target, self.identifier, self._listen_fn
dispatch_descriptor = getattr(target.dispatch, identifier)
fn = dispatch_descriptor._adjust_fn_spec(fn, named)
self = self.with_wrapper(fn)
if insert:
dispatch_descriptor.\
for_modify(target.dispatch).insert(self, propagate)
else:
dispatch_descriptor.\
for_modify(target.dispatch).append(self, propagate)
@property
def _listen_fn(self):
return self.fn_wrap or self.fn
def append_value_to_list(self, owner, list_, value):
_stored_in_collection(self, owner)
list_.append(value)
def append_to_list(self, owner, list_):
_stored_in_collection(self, owner)
list_.append(self._listen_fn)
def remove_from_list(self, owner, list_):
_removed_from_collection(self, owner)
list_.remove(self._listen_fn)
def prepend_to_list(self, owner, list_):
_stored_in_collection(self, owner)
list_.insert(0, self._listen_fn)
| mit |
ShineFan/odoo | addons/report_intrastat/__init__.py | 377 | 1079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_intrastat
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vipulkanade/EventbriteDjango | lib/python2.7/site-packages/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| mit |
karthikvadla16/spark-tk | python/sparktk/graph/ops/betweenness_centrality.py | 7 | 2366 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def betweenness_centrality(self, edge_weight=None, normalize=True):
"""
**Betweenness Centrality**
Calculates the betweenness centrality exactly, with an optional weights parameter
for the distance between the vertices.
Parameters
----------
:param edge_weight: (Optional(str)) The name of the column containing the edge weights,
If none, every edge is assigned a weight of 1.
:param normalize: (Optional(bool)) If true, normalize the betweenness centrality values
by the number of pairwise paths possible
:return: (Frame) Frame containing the vertex IDs and their corresponding betweenness centrality value
Examples
--------
>>> vertex_schema = [('id', int)]
>>> edge_schema = [('src', int), ('dst', int)]
>>> vertex_rows = [ [1], [2], [3], [4], [5] ]
>>> edge_rows = [ [1, 2], [1, 3], [2, 3], [1, 4], [4, 5] ]
>>> vertex_frame = tc.frame.create(vertex_rows, vertex_schema)
>>> edge_frame = tc.frame.create(edge_rows, edge_schema)
>>> graph = tc.graph.create(vertex_frame, edge_frame)
>>> result = graph.betweenness_centrality()
>>> result.inspect()
[#] id betweenness_centrality
===============================
[0] 1 0.666666666667
[1] 2 0.0
[2] 3 0.0
[3] 4 0.5
[4] 5 0.0
"""
from sparktk.frame.frame import Frame
return Frame(self._tc, self._scala.betweennessCentrality(self._tc.jutils.convert.to_scala_option(edge_weight), normalize))
| apache-2.0 |
citrix-openstack-build/python-novaclient | novaclient/v1_1/client.py | 7 | 7348 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import client
from novaclient.v1_1 import agents
from novaclient.v1_1 import certs
from novaclient.v1_1 import cloudpipe
from novaclient.v1_1 import aggregates
from novaclient.v1_1 import availability_zones
from novaclient.v1_1 import coverage_ext
from novaclient.v1_1 import flavors
from novaclient.v1_1 import flavor_access
from novaclient.v1_1 import floating_ip_dns
from novaclient.v1_1 import floating_ips
from novaclient.v1_1 import floating_ip_pools
from novaclient.v1_1 import fping
from novaclient.v1_1 import hosts
from novaclient.v1_1 import hypervisors
from novaclient.v1_1 import images
from novaclient.v1_1 import keypairs
from novaclient.v1_1 import limits
from novaclient.v1_1 import networks
from novaclient.v1_1 import quota_classes
from novaclient.v1_1 import quotas
from novaclient.v1_1 import security_group_rules
from novaclient.v1_1 import security_groups
from novaclient.v1_1 import servers
from novaclient.v1_1 import usage
from novaclient.v1_1 import virtual_interfaces
from novaclient.v1_1 import volumes
from novaclient.v1_1 import volume_snapshots
from novaclient.v1_1 import volume_types
from novaclient.v1_1 import services
from novaclient.v1_1 import fixed_ips
from novaclient.v1_1 import floating_ips_bulk
class Client(object):
"""
Top-level object to access the OpenStack Compute API.
Create an instance with your creds::
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
Then call methods on its managers::
>>> client.servers.list()
...
>>> client.flavors.list()
...
"""
# FIXME(jesse): project_id isn't required to authenticate
def __init__(self, username, api_key, project_id, auth_url=None,
insecure=False, timeout=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', extensions=None,
service_type='compute', service_name=None,
volume_service_name=None, timings=False,
bypass_url=None, os_cache=False, no_cache=True,
http_log_debug=False, auth_system='keystone',
auth_plugin=None,
cacert=None, tenant_id=None):
# FIXME(comstud): Rename the api_key argument above when we
# know it's not being used as keyword argument
password = api_key
self.projectid = project_id
self.tenant_id = tenant_id
self.flavors = flavors.FlavorManager(self)
self.flavor_access = flavor_access.FlavorAccessManager(self)
self.images = images.ImageManager(self)
self.limits = limits.LimitsManager(self)
self.servers = servers.ServerManager(self)
# extensions
self.agents = agents.AgentsManager(self)
self.dns_domains = floating_ip_dns.FloatingIPDNSDomainManager(self)
self.dns_entries = floating_ip_dns.FloatingIPDNSEntryManager(self)
self.cloudpipe = cloudpipe.CloudpipeManager(self)
self.certs = certs.CertificateManager(self)
self.floating_ips = floating_ips.FloatingIPManager(self)
self.floating_ip_pools = floating_ip_pools.FloatingIPPoolManager(self)
self.fping = fping.FpingManager(self)
self.volumes = volumes.VolumeManager(self)
self.volume_snapshots = volume_snapshots.SnapshotManager(self)
self.volume_types = volume_types.VolumeTypeManager(self)
self.keypairs = keypairs.KeypairManager(self)
self.networks = networks.NetworkManager(self)
self.quota_classes = quota_classes.QuotaClassSetManager(self)
self.quotas = quotas.QuotaSetManager(self)
self.security_groups = security_groups.SecurityGroupManager(self)
self.security_group_rules = \
security_group_rules.SecurityGroupRuleManager(self)
self.usage = usage.UsageManager(self)
self.virtual_interfaces = \
virtual_interfaces.VirtualInterfaceManager(self)
self.aggregates = aggregates.AggregateManager(self)
self.hosts = hosts.HostManager(self)
self.hypervisors = hypervisors.HypervisorManager(self)
self.services = services.ServiceManager(self)
self.fixed_ips = fixed_ips.FixedIPsManager(self)
self.floating_ips_bulk = floating_ips_bulk.FloatingIPBulkManager(self)
self.os_cache = os_cache or not no_cache
self.coverage = coverage_ext.CoverageManager(self)
self.availability_zones = \
availability_zones.AvailabilityZoneManager(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
self.client = client.HTTPClient(username,
password,
projectid=project_id,
tenant_id=tenant_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
auth_system=auth_system,
auth_plugin=auth_plugin,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
timings=timings,
bypass_url=bypass_url,
os_cache=self.os_cache,
http_log_debug=http_log_debug,
cacert=cacert)
def set_management_url(self, url):
self.client.set_management_url(url)
def get_timings(self):
return self.client.get_timings()
def reset_timings(self):
self.client.reset_timings()
def authenticate(self):
"""
Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()
| apache-2.0 |
waterponey/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
guiquanz/googletest | test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
lulandco/SickRage | lib/github/GistHistoryState.py | 74 | 10159 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
import github.CommitStats
import github.Gist
class GistHistoryState(github.GithubObject.CompletableGithubObject):
"""
This class represents GistHistoryStates as returned for example by http://developer.github.com/v3/todo
"""
@property
def change_status(self):
"""
:type: :class:`github.CommitStats.CommitStats`
"""
self._completeIfNotSet(self._change_status)
return self._change_status.value
@property
def comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def committed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._committed_at)
return self._committed_at.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def files(self):
"""
:type: dict of string to :class:`github.GistFile.GistFile`
"""
self._completeIfNotSet(self._files)
return self._files.value
@property
def forks(self):
"""
:type: list of :class:`github.Gist.Gist`
"""
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def git_pull_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_pull_url)
return self._git_pull_url.value
@property
def git_push_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_push_url)
return self._git_push_url.value
@property
def history(self):
"""
:type: list of :class:`GistHistoryState`
"""
self._completeIfNotSet(self._history)
return self._history.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: string
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def owner(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def public(self):
"""
:type: bool
"""
self._completeIfNotSet(self._public)
return self._public.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
@property
def version(self):
"""
:type: string
"""
self._completeIfNotSet(self._version)
return self._version.value
def _initAttributes(self):
self._change_status = github.GithubObject.NotSet
self._comments = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._committed_at = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._files = github.GithubObject.NotSet
self._forks = github.GithubObject.NotSet
self._forks_url = github.GithubObject.NotSet
self._git_pull_url = github.GithubObject.NotSet
self._git_push_url = github.GithubObject.NotSet
self._history = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
self._public = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
self._version = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "change_status" in attributes: # pragma no branch
self._change_status = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["change_status"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "committed_at" in attributes: # pragma no branch
self._committed_at = self._makeDatetimeAttribute(attributes["committed_at"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "files" in attributes: # pragma no branch
self._files = self._makeDictOfStringsToClassesAttribute(github.GistFile.GistFile, attributes["files"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeListOfClassesAttribute(github.Gist.Gist, attributes["forks"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "git_pull_url" in attributes: # pragma no branch
self._git_pull_url = self._makeStringAttribute(attributes["git_pull_url"])
if "git_push_url" in attributes: # pragma no branch
self._git_push_url = self._makeStringAttribute(attributes["git_push_url"])
if "history" in attributes: # pragma no branch
self._history = self._makeListOfClassesAttribute(GistHistoryState, attributes["history"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "public" in attributes: # pragma no branch
self._public = self._makeBoolAttribute(attributes["public"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
if "version" in attributes: # pragma no branch
self._version = self._makeStringAttribute(attributes["version"])
| gpl-3.0 |
1st1/uvloop | tests/test_base.py | 1 | 25924 | import asyncio
import fcntl
import logging
import os
import sys
import threading
import time
import uvloop
import unittest
import weakref
from unittest import mock
from uvloop._testbase import UVTestCase, AIOTestCase
class _TestBase:
def test_close(self):
self.assertFalse(self.loop._closed)
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop._closed)
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = self.loop.call_soon(lambda: None)
wd['h'] = h # Would fail without __weakref__ slot.
def test_call_soon_1(self):
calls = []
def cb(inc):
calls.append(inc)
self.loop.stop()
self.loop.call_soon(cb, 10)
h = self.loop.call_soon(cb, 100)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_soon(cb, 1)
self.loop.run_forever()
self.assertEqual(calls, [10, 1])
def test_call_soon_2(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_3(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_base_exc(self):
def cb():
raise KeyboardInterrupt()
self.loop.call_soon(cb)
with self.assertRaises(KeyboardInterrupt):
self.loop.run_forever()
self.assertFalse(self.loop.is_closed())
def test_calls_debug_reporting(self):
def run_test(debug, meth, stack_adj):
context = None
def handler(loop, ctx):
nonlocal context
context = ctx
self.loop.set_debug(debug)
self.loop.set_exception_handler(handler)
def cb():
1 / 0
meth(cb)
self.assertIsNone(context)
self.loop.run_until_complete(asyncio.sleep(0.05, loop=self.loop))
self.assertIs(type(context['exception']), ZeroDivisionError)
self.assertTrue(context['message'].startswith(
'Exception in callback'))
if debug:
tb = context['source_traceback']
self.assertEqual(tb[-1 + stack_adj].name, 'run_test')
else:
self.assertFalse('source_traceback' in context)
del context
for debug in (True, False):
for meth_name, meth, stack_adj in (
('call_soon',
self.loop.call_soon, 0),
('call_later', # `-1` accounts for lambda
lambda *args: self.loop.call_later(0.01, *args), -1)
):
with self.subTest(debug=debug, meth_name=meth_name):
run_test(debug, meth, stack_adj)
def test_now_update(self):
async def run():
st = self.loop.time()
time.sleep(0.05)
return self.loop.time() - st
delta = self.loop.run_until_complete(run())
self.assertTrue(delta > 0.049 and delta < 0.6)
def test_call_later_1(self):
calls = []
def cb(inc=10, stop=False):
calls.append(inc)
self.assertTrue(self.loop.is_running())
if stop:
self.loop.call_soon(self.loop.stop)
self.loop.call_later(0.05, cb)
# canceled right away
h = self.loop.call_later(0.05, cb, 100, True)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_later(0.05, cb, 1, True)
self.loop.call_later(1000, cb, 1000) # shouldn't be called
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(calls, [10, 1])
self.assertFalse(self.loop.is_running())
self.assertLess(finished - started, 0.1)
self.assertGreater(finished - started, 0.04)
def test_call_later_2(self):
# Test that loop.call_later triggers an update of
# libuv cached time.
async def main():
await asyncio.sleep(0.001, loop=self.loop)
time.sleep(0.01)
await asyncio.sleep(0.01, loop=self.loop)
started = time.monotonic()
self.loop.run_until_complete(main())
delta = time.monotonic() - started
self.assertGreater(delta, 0.019)
def test_call_later_3(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_4(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_negative(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop.stop()
self.loop.call_later(-1, cb, 'a')
self.loop.run_forever()
self.assertEqual(calls, ['a'])
def test_call_later_rounding(self):
# Refs #233, call_later() and call_at() shouldn't call cb early
def cb():
self.loop.stop()
for i in range(8):
self.loop.call_later(0.06 + 0.01, cb) # 0.06999999999999999
started = int(round(self.loop.time() * 1000))
self.loop.run_forever()
finished = int(round(self.loop.time() * 1000))
self.assertGreaterEqual(finished - started, 69)
def test_call_at(self):
if os.environ.get('TRAVIS_OS_NAME'):
# Time seems to be really unpredictable on Travis.
raise unittest.SkipTest('time is not monotonic on Travis')
i = 0
def cb(inc):
nonlocal i
i += inc
self.loop.stop()
at = self.loop.time() + 0.05
self.loop.call_at(at, cb, 100).cancel()
self.loop.call_at(at, cb, 10)
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(i, 10)
self.assertLess(finished - started, 0.07)
self.assertGreater(finished - started, 0.045)
def test_check_thread(self):
def check_thread(loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an "
"event loop other than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = self.new_loop()
try:
asyncio.set_event_loop(loop2)
check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
called = []
def cb(arg):
called.append(arg)
async def runner():
await self.loop.run_in_executor(None, cb, 'a')
self.loop.run_until_complete(runner())
self.assertEqual(called, ['a'])
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test_run_until_complete_type_error(self):
self.assertRaises(
TypeError, self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_loop()
self.addCleanup(other_loop.close)
self.assertRaises(
ValueError, other_loop.run_until_complete, task)
def test_run_until_complete_error(self):
async def foo():
raise ValueError('aaa')
with self.assertRaisesRegex(ValueError, 'aaa'):
self.loop.run_until_complete(foo())
def test_run_until_complete_loop_orphan_future_close_loop(self):
if self.implementation == 'asyncio' and sys.version_info < (3, 6, 2):
raise unittest.SkipTest('unfixed asyncio')
class ShowStopper(BaseException):
pass
async def foo(delay):
await asyncio.sleep(delay, loop=self.loop)
def throw():
raise ShowStopper
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except ShowStopper:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_debug_slow_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_soon(lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Handle', msg)
self.assertIn('test_debug_slow_callbacks', msg)
def test_debug_slow_timer_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_later(0.01, lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0.02, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <TimerHandle', msg)
self.assertIn('test_debug_slow_timer_callbacks', msg)
def test_debug_slow_task_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
async def foo():
time.sleep(0.3)
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(foo())
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Task finished', msg)
self.assertIn('test_debug_slow_task_callbacks', msg)
def test_default_exc_handler_callback(self):
self.loop.set_exception_handler(None)
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1 / 0
logger = logging.getLogger('asyncio')
# Test call_soon (events.Handle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
# Test call_later (events.TimerHandle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
def test_set_exc_handler_custom(self):
self.loop.set_exception_handler(None)
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
errors = []
def handler(loop, exc):
errors.append(exc)
self.loop.set_debug(True)
if hasattr(self.loop, 'get_exception_handler'):
# Available since Python 3.5.2
self.assertIsNone(self.loop.get_exception_handler())
self.loop.set_exception_handler(handler)
if hasattr(self.loop, 'get_exception_handler'):
self.assertIs(self.loop.get_exception_handler(), handler)
run_loop()
self.assertEqual(len(errors), 1)
self.assertRegex(errors[-1]['message'],
'Exception in callback.*zero_error')
self.loop.set_exception_handler(None)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
self.assertEqual(len(errors), 1)
def test_set_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Unhandled error in exception handler'),
exc_info=mock.ANY)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError,
'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def _compile_agen(self, src):
try:
g = {}
exec(src, globals(), g)
except SyntaxError:
# Python < 3.6
raise unittest.SkipTest()
else:
return g['waiter']
def test_shutdown_asyncgens_01(self):
finalized = list()
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
waiter = self._compile_agen(
'''async def waiter(timeout, finalized, loop):
try:
await asyncio.sleep(timeout, loop=loop)
yield 1
finally:
await asyncio.sleep(0, loop=loop)
finalized.append(1)
''')
async def wait():
async for _ in waiter(1, finalized, self.loop):
pass
t1 = self.loop.create_task(wait())
t2 = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(finalized, [1, 1])
# Silence warnings
t1.cancel()
t2.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
def test_shutdown_asyncgens_02(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
logged = 0
def logger(loop, context):
nonlocal logged
self.assertIn('asyncgen', context)
expected = 'an error occurred during closing of asynchronous'
if expected in context['message']:
logged += 1
waiter = self._compile_agen('''async def waiter(timeout, loop):
try:
await asyncio.sleep(timeout, loop=loop)
yield 1
finally:
1 / 0
''')
async def wait():
async for _ in waiter(1, self.loop):
pass
t = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
self.loop.set_exception_handler(logger)
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(logged, 1)
# Silence warnings
t.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
def test_shutdown_asyncgens_03(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
waiter = self._compile_agen('''async def waiter():
yield 1
yield 2
''')
async def foo():
# We specifically want to hit _asyncgen_finalizer_hook
# method.
await waiter().asend(None)
self.loop.run_until_complete(foo())
self.loop.run_until_complete(asyncio.sleep(0.01, loop=self.loop))
def test_inf_wait_for(self):
async def foo():
await asyncio.sleep(0.1, loop=self.loop)
return 123
res = self.loop.run_until_complete(
asyncio.wait_for(foo(), timeout=float('inf'), loop=self.loop))
self.assertEqual(res, 123)
class TestBaseUV(_TestBase, UVTestCase):
def test_loop_create_future(self):
fut = self.loop.create_future()
self.assertTrue(isinstance(fut, asyncio.Future))
self.assertIs(fut._loop, self.loop)
fut.cancel()
def test_loop_call_soon_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly()
self.assertFalse(handle.cancelled())
def test_loop_call_later_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly(delay=0.05)
self.assertFalse(handle.cancelled())
def test_loop_std_files_cloexec(self):
# See https://github.com/MagicStack/uvloop/issues/40 for details.
for fd in {0, 1, 2}:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
self.assertFalse(flags & fcntl.FD_CLOEXEC)
def test_default_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
_context = None
class Loop(uvloop.Loop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
self.addCleanup(lambda: asyncio.set_event_loop(None))
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
class TestBaseAIO(_TestBase, AIOTestCase):
pass
class TestPolicy(unittest.TestCase):
def test_uvloop_policy(self):
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
try:
self.assertIsInstance(loop, uvloop.Loop)
finally:
loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = uvloop.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
| mit |
django-notifications/django-notifications | notifications/migrations/0001_initial.py | 1 | 2201 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import swapper
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(default='info', max_length=20, choices=[('success', 'success'), ('info', 'info'), ('warning', 'warning'), ('error', 'error')])),
('unread', models.BooleanField(default=True)),
('actor_object_id', models.CharField(max_length=255)),
('verb', models.CharField(max_length=255)),
('description', models.TextField(null=True, blank=True)),
('target_object_id', models.CharField(max_length=255, null=True, blank=True)),
('action_object_object_id', models.CharField(max_length=255, null=True, blank=True)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('public', models.BooleanField(default=True)),
('action_object_content_type', models.ForeignKey(related_name='notify_action_object', blank=True, to='contenttypes.ContentType', null=True, on_delete=models.CASCADE)),
('actor_content_type', models.ForeignKey(related_name='notify_actor', to='contenttypes.ContentType', on_delete=models.CASCADE)),
('recipient', models.ForeignKey(related_name='notifications', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('target_content_type', models.ForeignKey(related_name='notify_target', blank=True, to='contenttypes.ContentType', null=True, on_delete=models.CASCADE)),
],
options={
'swappable': swapper.swappable_setting('notifications', 'Notification'),
'ordering': ('-timestamp',),
},
bases=(models.Model,),
),
]
| bsd-3-clause |
catapult-project/catapult | third_party/gsutil/gslib/vendored/boto/boto/ec2/tag.py | 181 | 3076 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class TagSet(dict):
"""
A TagSet is used to collect the tags associated with a particular
EC2 resource. Not all resources can be tagged but for those that
can, this dict object will be used to collect those values. See
:class:`boto.ec2.ec2object.TaggedEC2Object` for more details.
"""
def __init__(self, connection=None):
self.connection = connection
self._current_key = None
self._current_value = None
def startElement(self, name, attrs, connection):
if name == 'item':
self._current_key = None
self._current_value = None
return None
def endElement(self, name, value, connection):
if name == 'key':
self._current_key = value
elif name == 'value':
self._current_value = value
elif name == 'item':
self[self._current_key] = self._current_value
class Tag(object):
"""
A Tag is used when creating or listing all tags related to
an AWS account. It records not only the key and value but
also the ID of the resource to which the tag is attached
as well as the type of the resource.
"""
def __init__(self, connection=None, res_id=None, res_type=None,
name=None, value=None):
self.connection = connection
self.res_id = res_id
self.res_type = res_type
self.name = name
self.value = value
def __repr__(self):
return 'Tag:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'resourceId':
self.res_id = value
elif name == 'resourceType':
self.res_type = value
elif name == 'key':
self.name = value
elif name == 'value':
self.value = value
else:
setattr(self, name, value)
| bsd-3-clause |
gsalgado/pyethapp | pyethapp/accounts.py | 1 | 20516 | import json
import os
from random import SystemRandom
import shutil
from uuid import UUID
from devp2p.service import BaseService
from ethereum.tools import keys
from ethereum.slogging import get_logger
from ethereum.utils import privtopub # this is different than the one used in devp2p.crypto
from ethereum.utils import sha3, is_string, decode_hex, remove_0x_head
log = get_logger('accounts')
DEFAULT_COINBASE = 'de0b295669a9fd93d5f28d9ec85e40f4cb697bae'.decode('hex')
random = SystemRandom()
def mk_privkey(seed):
return sha3(seed)
def mk_random_privkey():
k = hex(random.getrandbits(256))[2:-1].zfill(64)
assert len(k) == 64
return k.decode('hex')
class Account(object):
"""Represents an account.
:ivar keystore: the key store as a dictionary (as decoded from json)
:ivar locked: `True` if the account is locked and neither private nor public keys can be
accessed, otherwise `False`
:ivar path: absolute path to the associated keystore file (`None` for in-memory accounts)
"""
def __init__(self, keystore, password=None, path=None):
self.keystore = keystore
try:
self._address = self.keystore['address'].decode('hex')
except KeyError:
self._address = None
self.locked = True
if password is not None:
self.unlock(password)
if path is not None:
self.path = os.path.abspath(path)
else:
self.path = None
@classmethod
def new(cls, password, key=None, uuid=None, path=None):
"""Create a new account.
Note that this creates the account in memory and does not store it on disk.
:param password: the password used to encrypt the private key
:param key: the private key, or `None` to generate a random one
:param uuid: an optional id
"""
if key is None:
key = mk_random_privkey()
keystore = keys.make_keystore_json(key, password)
keystore['id'] = uuid
return Account(keystore, password, path)
@classmethod
def load(cls, path, password=None):
"""Load an account from a keystore file.
:param path: full path to the keyfile
:param password: the password to decrypt the key file or `None` to leave it encrypted
"""
with open(path) as f:
keystore = json.load(f)
if not keys.check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path)
def dump(self, include_address=True, include_id=True):
"""Dump the keystore for later disk storage.
The result inherits the entries `'crypto'` and `'version`' from `account.keystore`, and
adds `'address'` and `'id'` in accordance with the parameters `'include_address'` and
`'include_id`'.
If address or id are not known, they are not added, even if requested.
:param include_address: flag denoting if the address should be included or not
:param include_id: flag denoting if the id should be included or not
"""
d = {}
d['crypto'] = self.keystore['crypto']
d['version'] = self.keystore['version']
if include_address and self.address is not None:
d['address'] = self.address.encode('hex')
if include_id and self.uuid is not None:
d['id'] = self.uuid
return json.dumps(d)
def unlock(self, password):
"""Unlock the account with a password.
If the account is already unlocked, nothing happens, even if the password is wrong.
:raises: :exc:`ValueError` (originating in ethereum.keys) if the password is wrong (and the
account is locked)
"""
if self.locked:
self._privkey = keys.decode_keystore_json(self.keystore, password)
self.locked = False
self.address # get address such that it stays accessible after a subsequent lock
def lock(self):
"""Relock an unlocked account.
This method sets `account.privkey` to `None` (unlike `account.address` which is preserved).
After calling this method, both `account.privkey` and `account.pubkey` are `None.
`account.address` stays unchanged, even if it has been derived from the private key.
"""
self._privkey = None
self.locked = True
@property
def privkey(self):
"""The account's private key or `None` if the account is locked"""
if not self.locked:
return self._privkey
else:
return None
@property
def pubkey(self):
"""The account's public key or `None` if the account is locked"""
if not self.locked:
return privtopub(self.privkey)
else:
return None
@property
def address(self):
"""The account's address or `None` if the address is not stored in the key file and cannot
be reconstructed (because the account is locked)
"""
if self._address:
pass
elif 'address' in self.keystore:
self._address = self.keystore['address'].decode('hex')
elif not self.locked:
self._address = keys.privtoaddr(self.privkey)
else:
return None
return self._address
@property
def uuid(self):
"""An optional unique identifier, formatted according to UUID version 4, or `None` if the
account does not have an id
"""
try:
return self.keystore['id']
except KeyError:
return None
@uuid.setter
def uuid(self, value):
"""Set the UUID. Set it to `None` in order to remove it."""
if value is not None:
self.keystore['id'] = value
elif 'id' in self.keystore:
self.keystore.pop('id')
def sign_tx(self, tx):
"""Sign a Transaction with the private key of this account.
If the account is unlocked, this is equivalent to ``tx.sign(account.privkey)``.
:param tx: the :class:`ethereum.transactions.Transaction` to sign
:raises: :exc:`ValueError` if the account is locked
"""
if self.privkey:
log.info('signing tx', tx=tx, account=self)
tx.sign(self.privkey)
else:
raise ValueError('Locked account cannot sign tx')
def __repr__(self):
if self.address is not None:
address = self.address.encode('hex')
else:
address = '?'
return '<Account(address={address}, id={id})>'.format(address=address, id=self.uuid)
class AccountsService(BaseService):
"""Service that manages accounts.
At initialization, this service collects the accounts stored as key files in the keystore
directory (config option `accounts.keystore_dir`) and below.
To add more accounts, use :method:`add_account`.
:ivar accounts: the :class:`Account`s managed by this service, sorted by the paths to their
keystore files
:ivar keystore_dir: absolute path to the keystore directory
"""
name = 'accounts'
default_config = dict(accounts=dict(keystore_dir='keystore', must_include_coinbase=True))
def __init__(self, app):
super(AccountsService, self).__init__(app)
self.keystore_dir = app.config['accounts']['keystore_dir']
if not os.path.isabs(self.keystore_dir):
self.keystore_dir = os.path.abspath(os.path.join(app.config['data_dir'],
self.keystore_dir))
assert os.path.isabs(self.keystore_dir)
self.accounts = []
if not os.path.exists(self.keystore_dir):
log.warning('keystore directory does not exist', directory=self.keystore_dir)
elif not os.path.isdir(self.keystore_dir):
log.error('configured keystore directory is a file, not a directory',
directory=self.keystore_dir)
else:
# traverse file tree rooted at keystore_dir
log.info('searching for key files', directory=self.keystore_dir)
for dirpath, _, filenames in os.walk(self.keystore_dir):
for filename in [os.path.join(dirpath, filename) for filename in filenames]:
try:
self.accounts.append(Account.load(filename))
except ValueError:
log.warning('invalid file skipped in keystore directory',
path=filename)
self.accounts.sort(key=lambda account: account.path) # sort accounts by path
if not self.accounts:
log.warn('no accounts found')
else:
log.info('found account(s)', accounts=self.accounts)
@property
def coinbase(self):
"""Return the address that should be used as coinbase for new blocks.
The coinbase address is given by the config field pow.coinbase_hex. If this does not exist
or is `None`, the address of the first account is used instead. If there are no accounts,
the coinbase is `DEFAULT_COINBASE`.
:raises: :exc:`ValueError` if the coinbase is invalid (no string, wrong length) or there is
no account for it and the config flag `accounts.check_coinbase` is set (does not
apply to the default coinbase)
"""
cb_hex = self.app.config.get('pow', {}).get('coinbase_hex')
if cb_hex is None:
if not self.accounts_with_address:
return DEFAULT_COINBASE
cb = self.accounts_with_address[0].address
else:
if not is_string(cb_hex):
raise ValueError('coinbase must be string')
try:
cb = decode_hex(remove_0x_head(cb_hex))
except (ValueError, TypeError):
raise ValueError('invalid coinbase')
if len(cb) != 20:
raise ValueError('wrong coinbase length')
if self.config['accounts']['must_include_coinbase']:
if cb not in (acct.address for acct in self.accounts):
raise ValueError('no account for coinbase')
return cb
def add_account(self, account, store=True, include_address=True, include_id=True):
"""Add an account.
If `store` is true the account will be stored as a key file at the location given by
`account.path`. If this is `None` a :exc:`ValueError` is raised. `include_address` and
`include_id` determine if address and id should be removed for storage or not.
This method will raise a :exc:`ValueError` if the new account has the same UUID as an
account already known to the service. Note that address collisions do not result in an
exception as those may slip through anyway for locked accounts with hidden addresses.
"""
log.info('adding account', account=account)
if account.uuid is not None:
if len([acct for acct in self.accounts if acct.uuid == account.uuid]) > 0:
log.error('could not add account (UUID collision)', uuid=account.uuid)
raise ValueError('Could not add account (UUID collision)')
if store:
if account.path is None:
raise ValueError('Cannot store account without path')
assert os.path.isabs(account.path), account.path
if os.path.exists(account.path):
log.error('File does already exist', path=account.path)
raise IOError('File does already exist')
assert account.path not in [acct.path for acct in self.accounts]
try:
directory = os.path.dirname(account.path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(account.path, 'w') as f:
f.write(account.dump(include_address, include_id))
except IOError as e:
log.error('Could not write to file', path=account.path, message=e.strerror,
errno=e.errno)
raise
self.accounts.append(account)
self.accounts.sort(key=lambda account: account.path)
def update_account(self, account, new_password, include_address=True, include_id=True):
"""Replace the password of an account.
The update is carried out in three steps:
1) the old keystore file is renamed
2) the new keystore file is created at the previous location of the old keystore file
3) the old keystore file is removed
In this way, at least one of the keystore files exists on disk at any time and can be
recovered if the process is interrupted.
:param account: the :class:`Account` which must be unlocked, stored on disk and included in
:attr:`AccountsService.accounts`.
:param include_address: forwarded to :meth:`add_account` during step 2
:param include_id: forwarded to :meth:`add_account` during step 2
:raises: :exc:`ValueError` if the account is locked, if it is not added to the account
manager, or if it is not stored
"""
if account not in self.accounts:
raise ValueError('Account not managed by account service')
if account.locked:
raise ValueError('Cannot update locked account')
if account.path is None:
raise ValueError('Account not stored on disk')
assert os.path.isabs(account.path)
# create new account
log.debug('creating new account')
new_account = Account.new(new_password, key=account.privkey, uuid=account.uuid)
new_account.path = account.path
# generate unique path and move old keystore file there
backup_path = account.path + '~'
i = 1
while os.path.exists(backup_path):
backup_path = backup_path[:backup_path.rfind('~') + 1] + str(i)
i += 1
assert not os.path.exists(backup_path)
log.info('moving old keystore file to backup location', **{'from': account.path,
'to': backup_path})
try:
shutil.move(account.path, backup_path)
except:
log.error('could not backup keystore, stopping account update',
**{'from': account.path, 'to': backup_path})
raise
assert os.path.exists(backup_path)
assert not os.path.exists(new_account.path)
account.path = backup_path
# remove old account from manager (not from disk yet) and add new account
self.accounts.remove(account)
assert account not in self.accounts
try:
self.add_account(new_account, include_address, include_id)
except:
log.error('adding new account failed, recovering from backup')
shutil.move(backup_path, new_account.path)
self.accounts.append(account)
self.accounts.sort(key=lambda account: account.path)
raise
assert os.path.exists(new_account.path)
assert new_account in self.accounts
# everything was successful (we are still here), so delete old keystore file
log.info('deleting backup of old keystore', path=backup_path)
try:
os.remove(backup_path)
except:
log.error('failed to delete no longer needed backup of old keystore',
path=account.path)
raise
# set members of account to values of new_account
account.keystore = new_account.keystore
account.path = new_account.path
assert account.__dict__ == new_account.__dict__
# replace new_account by old account in account list
self.accounts.append(account)
self.accounts.remove(new_account)
self.accounts.sort(key=lambda account: account.path)
log.debug('account update successful')
@property
def accounts_with_address(self):
"""Return a list of accounts whose address is known."""
return [account for account in self if account.address]
@property
def unlocked_accounts(self):
"""Return a list of all unlocked accounts."""
return [account for account in self if not account.locked]
def find(self, identifier):
"""Find an account by either its address, its id or its index as string.
Example identifiers:
- '9c0e0240776cfbe6fa1eb37e57721e1a88a563d1' (address)
- '0x9c0e0240776cfbe6fa1eb37e57721e1a88a563d1' (address with 0x prefix)
- '01dd527b-f4a5-4b3c-9abb-6a8e7cd6722f' (UUID)
- '3' (index)
:param identifier: the accounts hex encoded, case insensitive address (with optional 0x
prefix), its UUID or its index (as string, >= 1) in
`account_service.accounts`
:raises: :exc:`ValueError` if the identifier could not be interpreted
:raises: :exc:`KeyError` if the identified account is not known to the account_service
"""
try:
uuid = UUID(identifier)
except ValueError:
pass
else:
return self.get_by_id(str(uuid))
try:
index = int(identifier, 10)
except ValueError:
pass
else:
if index <= 0:
raise ValueError('Index must be 1 or greater')
try:
return self.accounts[index - 1]
except IndexError as e:
raise KeyError(e.message)
if identifier[:2] == '0x':
identifier = identifier[2:]
try:
address = identifier.decode('hex')
except TypeError:
success = False
else:
if len(address) != 20:
success = False
else:
return self[address]
assert not success
raise ValueError('Could not interpret account identifier')
def get_by_id(self, id):
"""Return the account with a given id.
Note that accounts are not required to have an id.
:raises: `KeyError` if no matching account can be found
"""
accts = [acct for acct in self.accounts if UUID(acct.uuid) == UUID(id)]
assert len(accts) <= 1
if len(accts) == 0:
raise KeyError('account with id {} unknown'.format(id))
elif len(accts) > 1:
log.warning('multiple accounts with same UUID found', uuid=id)
return accts[0]
def get_by_address(self, address):
"""Get an account by its address.
Note that even if an account with the given address exists, it might not be found if it is
locked. Also, multiple accounts with the same address may exist, in which case the first
one is returned (and a warning is logged).
:raises: `KeyError` if no matching account can be found
"""
assert len(address) == 20
accounts = [account for account in self.accounts if account.address == address]
if len(accounts) == 0:
raise KeyError('account not found by address', address=address.encode('hex'))
elif len(accounts) > 1:
log.warning('multiple accounts with same address found', address=address.encode('hex'))
return accounts[0]
def sign_tx(self, address, tx):
self.get_by_address(address).sign_tx(tx)
def propose_path(self, address):
return os.path.join(self.keystore_dir, address.encode('hex'))
def __contains__(self, address):
assert len(address) == 20
return address in [a.address for a in self.accounts]
def __getitem__(self, address_or_idx):
if isinstance(address_or_idx, bytes):
address = address_or_idx
assert len(address) == 20
for a in self.accounts:
if a.address == address:
return a
raise KeyError
else:
assert isinstance(address_or_idx, int)
return self.accounts[address_or_idx]
def __iter__(self):
return iter(self.accounts)
def __len__(self):
return len(self.accounts)
"""
--import-key = key.json
--unlock <password dialog>
--password passwordfile
--newkey <password dialog>
"""
| mit |
imsparsh/python-for-android | python3-alpha/extra_modules/pyxmpp2/ext/component.py | 46 | 7090 | #
# (C) Copyright 2003-2010 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Component (jabber:component:accept) stream handling.
Normative reference:
- `JEP 114 <http://www.jabber.org/jeps/jep-0114.html>`__
"""
raise ImportError("{0} is not yet rewritten for PyXMPP2".format(__name__))
__docformat__="restructuredtext en"
import hashlib
import logging
from ..stream import Stream
from ..streambase import stanza_factory,HostMismatch
from ..xmlextra import common_doc,common_root
from ..utils import to_utf8
from ..exceptions import StreamError,FatalStreamError,ComponentStreamError,FatalComponentStreamError
class ComponentStream(Stream):
"""Handles jabberd component (jabber:component:accept) connection stream.
:Ivariables:
- `server`: server to use.
- `port`: port number to use.
- `secret`: authentication secret.
:Types:
- `server`: `str`
- `port`: `int`
- `secret`: `str`"""
def __init__(self, jid, secret, server, port, keepalive = 0, owner = None):
"""Initialize a `ComponentStream` object.
:Parameters:
- `jid`: JID of the component.
- `secret`: authentication secret.
- `server`: server address.
- `port`: TCP port number on the server.
- `keepalive`: keepalive interval. 0 to disable.
- `owner`: `Client`, `Component` or similar object "owning" this stream.
"""
Stream.__init__(self, "jabber:component:accept",
sasl_mechanisms = [],
tls_settings = None,
keepalive = keepalive,
owner = owner)
self.server=server
self.port=port
self.me=jid
self.secret=secret
self.process_all_stanzas=1
self.__logger=logging.getLogger("pyxmpp2.jabberd.ComponentStream")
def _reset(self):
"""Reset `ComponentStream` object state, making the object ready to
handle new connections."""
Stream._reset(self)
def connect(self,server=None,port=None):
"""Establish a client connection to a server.
[component only]
:Parameters:
- `server`: name or address of the server to use. If not given
then use the one specified when creating the object.
- `port`: port number of the server to use. If not given then use
the one specified when creating the object.
:Types:
- `server`: `str`
- `port`: `int`"""
self.lock.acquire()
try:
self._connect(server,port)
finally:
self.lock.release()
def _connect(self,server=None,port=None):
"""Same as `ComponentStream.connect` but assume `self.lock` is acquired."""
if self.me.node or self.me.resource:
raise Value("Component JID may have only domain defined")
if not server:
server=self.server
if not port:
port=self.port
if not server or not port:
raise ValueError("Server or port not given")
Stream._connect(self,server,port,None,self.me)
def accept(self,sock):
"""Accept an incoming component connection.
[server only]
:Parameters:
- `sock`: a listening socket."""
Stream.accept(self,sock,None)
def stream_start(self,doc):
"""Process <stream:stream> (stream start) tag received from peer.
Call `Stream.stream_start`, but ignore any `HostMismatch` error.
:Parameters:
- `doc`: document created by the parser"""
try:
Stream.stream_start(self,doc)
except HostMismatch:
pass
def _post_connect(self):
"""Initialize authentication when the connection is established
and we are the initiator."""
if self.initiator:
self._auth()
def _compute_handshake(self):
"""Compute the authentication handshake value.
:return: the computed hash value.
:returntype: `str`"""
return hashlib.sha1(to_utf8(self.stream_id)+to_utf8(self.secret)).hexdigest()
def _auth(self):
"""Authenticate on the server.
[component only]"""
if self.authenticated:
self.__logger.debug("_auth: already authenticated")
return
self.__logger.debug("doing handshake...")
hash_value=self._compute_handshake()
n=common_root.newTextChild(None,"handshake",hash_value)
self._write_node(n)
n.unlinkNode()
n.freeNode()
self.__logger.debug("handshake hash sent.")
def _process_node(self,node):
"""Process first level element of the stream.
Handle component handshake (authentication) element, and
treat elements in "jabber:component:accept", "jabber:client"
and "jabber:server" equally (pass to `self.process_stanza`).
All other elements are passed to `Stream._process_node`.
:Parameters:
- `node`: XML node describing the element
"""
ns=node.ns()
if ns:
ns_uri=node.ns().getContent()
if (not ns or ns_uri=="jabber:component:accept") and node.name=="handshake":
if self.initiator and not self.authenticated:
self.authenticated=1
self.state_change("authenticated",self.me)
self._post_auth()
return
elif not self.authenticated and node.getContent()==self._compute_handshake():
self.peer=self.me
n=common_doc.newChild(None,"handshake",None)
self._write_node(n)
n.unlinkNode()
n.freeNode()
self.peer_authenticated=1
self.state_change("authenticated",self.peer)
self._post_auth()
return
else:
self._send_stream_error("not-authorized")
raise FatalComponentStreamError("Hanshake error.")
if ns_uri in ("jabber:component:accept","jabber:client","jabber:server"):
stanza=stanza_factory(node)
self.lock.release()
try:
self.process_stanza(stanza)
finally:
self.lock.acquire()
stanza.free()
return
return Stream._process_node(self,node)
# vi: sts=4 et sw=4
| apache-2.0 |
lgp171188/fjord | vendor/packages/urllib3/dummyserver/handlers.py | 10 | 7113 | from __future__ import print_function
import gzip
import json
import logging
import sys
import time
import zlib
from io import BytesIO
from tornado.wsgi import HTTPRequest
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit
log = logging.getLogger(__name__)
class Response(object):
def __init__(self, body='', status='200 OK', headers=None):
if not isinstance(body, bytes):
body = body.encode('utf8')
self.body = body
self.status = status
self.headers = headers or [("Content-type", "text/plain")]
def __call__(self, environ, start_response):
start_response(self.status, self.headers)
return [self.body]
class WSGIHandler(object):
pass
class TestingApp(WSGIHandler):
"""
Simple app that performs various operations, useful for testing an HTTP
library.
Given any path, it will attempt to convert it will load a corresponding
local method if it exists. Status code 200 indicates success, 400 indicates
failure. Each method has its own conditions for success/failure.
"""
def __call__(self, environ, start_response):
req = HTTPRequest(environ)
req.params = {}
for k, v in req.arguments.items():
req.params[k] = next(iter(v))
path = req.path[:]
if not path.startswith('/'):
path = urlsplit(path).path
target = path[1:].replace('/', '_')
method = getattr(self, target, self.index)
resp = method(req)
if dict(resp.headers).get('Connection') == 'close':
# FIXME: Can we kill the connection somehow?
pass
return resp(environ, start_response)
def index(self, _request):
"Render simple message"
return Response("Dummy server!")
def set_up(self, request):
test_type = request.params.get('test_type')
test_id = request.params.get('test_id')
if test_id:
print('\nNew test %s: %s' % (test_type, test_id))
else:
print('\nNew test %s' % test_type)
return Response("Dummy server is ready!")
def specific_method(self, request):
"Confirm that the request matches the desired method type"
method = request.params.get('method')
if method and not isinstance(method, str):
method = method.decode('utf8')
if request.method != method:
return Response("Wrong method: %s != %s" %
(method, request.method), status='400 Bad Request')
return Response()
def upload(self, request):
"Confirm that the uploaded file conforms to specification"
# FIXME: This is a huge broken mess
param = request.params.get('upload_param', 'myfile').decode('ascii')
filename = request.params.get('upload_filename', '').decode('utf-8')
size = int(request.params.get('upload_size', '0'))
files_ = request.files.get(param)
if len(files_) != 1:
return Response("Expected 1 file for '%s', not %d" %(param, len(files_)),
status='400 Bad Request')
file_ = files_[0]
data = file_['body']
if int(size) != len(data):
return Response("Wrong size: %d != %d" %
(size, len(data)), status='400 Bad Request')
if filename != file_['filename']:
return Response("Wrong filename: %s != %s" %
(filename, file_.filename),
status='400 Bad Request')
return Response()
def redirect(self, request):
"Perform a redirect to ``target``"
target = request.params.get('target', '/')
headers = [('Location', target)]
return Response(status='303 See Other', headers=headers)
def keepalive(self, request):
if request.params.get('close', b'0') == b'1':
headers = [('Connection', 'close')]
return Response('Closing', headers=headers)
headers = [('Connection', 'keep-alive')]
return Response('Keeping alive', headers=headers)
def sleep(self, request):
"Sleep for a specified amount of ``seconds``"
seconds = float(request.params.get('seconds', '1'))
time.sleep(seconds)
return Response()
def echo(self, request):
"Echo back the params"
if request.method == 'GET':
return Response(request.query)
return Response(request.body)
def encodingrequest(self, request):
"Check for UA accepting gzip/deflate encoding"
data = b"hello, world!"
encoding = request.headers.get('Accept-Encoding', '')
headers = None
if encoding == 'gzip':
headers = [('Content-Encoding', 'gzip')]
file_ = BytesIO()
zipfile = gzip.GzipFile('', mode='w', fileobj=file_)
zipfile.write(data)
zipfile.close()
data = file_.getvalue()
elif encoding == 'deflate':
headers = [('Content-Encoding', 'deflate')]
data = zlib.compress(data)
elif encoding == 'garbage-gzip':
headers = [('Content-Encoding', 'gzip')]
data = 'garbage'
elif encoding == 'garbage-deflate':
headers = [('Content-Encoding', 'deflate')]
data = 'garbage'
return Response(data, headers=headers)
def headers(self, request):
return Response(json.dumps(request.headers))
def shutdown(self, request):
sys.exit()
# RFC2231-aware replacement of internal tornado function
def _parse_header(line):
r"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
>>> d = _parse_header("CD: fd; foo=\"bar\"; file*=utf-8''T%C3%A4st")[1]
>>> d['file'] == 'T\u00e4st'
True
>>> d['foo']
'bar'
"""
import tornado.httputil
import email.utils
from urllib3.packages import six
if not six.PY3:
line = line.encode('utf-8')
parts = tornado.httputil._parseparam(';' + line)
key = next(parts)
# decode_params treats first argument special, but we already stripped key
params = [('Dummy', 'value')]
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
params.append((name, value))
params = email.utils.decode_params(params)
params.pop(0) # get rid of the dummy again
pdict = {}
for name, value in params:
print(repr(value))
value = email.utils.collapse_rfc2231_value(value)
if len(value) >= 2 and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
pdict[name] = value
return key, pdict
# TODO: make the following conditional as soon as we know a version
# which does not require this fix.
# See https://github.com/facebook/tornado/issues/868
if True:
import tornado.httputil
tornado.httputil._parse_header = _parse_header
| bsd-3-clause |
tsufiev/horizon | openstack_dashboard/dashboards/admin/volumes/snapshots/views.py | 3 | 2439 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import forms as vol_snapshot_forms
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import tabs as vol_snapshot_tabs
from openstack_dashboard.dashboards.project.volumes.snapshots \
import views
class UpdateStatusView(forms.ModalFormView):
form_class = vol_snapshot_forms.UpdateStatus
template_name = 'admin/volumes/snapshots/update_status.html'
success_url = reverse_lazy("horizon:admin:volumes:snapshots_tab")
page_title = _("Update Volume Snapshot Status")
@memoized.memoized_method
def get_object(self):
snap_id = self.kwargs['snapshot_id']
try:
self._object = cinder.volume_snapshot_get(self.request,
snap_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume snapshot.'),
redirect=self.success_url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context['snapshot_id'] = self.kwargs["snapshot_id"]
return context
def get_initial(self):
snapshot = self.get_object()
return {'snapshot_id': self.kwargs["snapshot_id"],
'status': snapshot.status}
class DetailView(views.DetailView):
tab_group_class = vol_snapshot_tabs.SnapshotDetailsTabs
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:volumes:index')
| apache-2.0 |
SchrodingersGat/InvenTree | InvenTree/report/serializers.py | 2 | 2030 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from InvenTree.serializers import InvenTreeModelSerializer
from InvenTree.serializers import InvenTreeAttachmentSerializerField
from .models import TestReport
from .models import BuildReport
from .models import BillOfMaterialsReport
from .models import PurchaseOrderReport, SalesOrderReport
class TestReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = TestReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class BuildReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = BuildReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class BOMReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = BillOfMaterialsReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class POReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = PurchaseOrderReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class SOReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = SalesOrderReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
| mit |
louietsai/python-for-android | python-modules/twisted/twisted/spread/flavors.py | 56 | 21733 | # -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module represents flavors of remotely acessible objects.
Currently this is only objects accessible through Perspective Broker, but will
hopefully encompass all forms of remote access which can emulate subsets of PB
(such as XMLRPC or SOAP).
Future Plans: Optimization. Exploitation of new-style object model.
Optimizations to this module should not affect external-use semantics at all,
but may have a small impact on users who subclass and override methods.
@author: Glyph Lefkowitz
"""
# NOTE: this module should NOT import pb; it is supposed to be a module which
# abstractly defines remotely accessible types. Many of these types expect to
# be serialized by Jelly, but they ought to be accessible through other
# mechanisms (like XMLRPC)
# system imports
import sys
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, reflect
# sibling imports
from jelly import setUnjellyableForClass, setUnjellyableForClassTree, setUnjellyableFactoryForClass, unjellyableRegistry
from jelly import Jellyable, Unjellyable, _Dummy, _DummyNewStyle
from jelly import setInstanceState, getInstanceState
# compatibility
setCopierForClass = setUnjellyableForClass
setCopierForClassTree = setUnjellyableForClassTree
setFactoryForClass = setUnjellyableFactoryForClass
copyTags = unjellyableRegistry
copy_atom = "copy"
cache_atom = "cache"
cached_atom = "cached"
remote_atom = "remote"
class NoSuchMethod(AttributeError):
"""Raised if there is no such remote method"""
class IPBRoot(Interface):
"""Factory for root Referenceable objects for PB servers."""
def rootObject(broker):
"""Return root Referenceable for broker."""
class Serializable(Jellyable):
"""An object that can be passed remotely.
I am a style of object which can be serialized by Perspective
Broker. Objects which wish to be referenceable or copied remotely
have to subclass Serializable. However, clients of Perspective
Broker will probably not want to directly subclass Serializable; the
Flavors of transferable objects are listed below.
What it means to be \"Serializable\" is that an object can be
passed to or returned from a remote method. Certain basic types
(dictionaries, lists, tuples, numbers, strings) are serializable by
default; however, classes need to choose a specific serialization
style: L{Referenceable}, L{Viewable}, L{Copyable} or L{Cacheable}.
You may also pass C{[lists, dictionaries, tuples]} of L{Serializable}
instances to or return them from remote methods, as many levels deep
as you like.
"""
def processUniqueID(self):
"""Return an ID which uniquely represents this object for this process.
By default, this uses the 'id' builtin, but can be overridden to
indicate that two values are identity-equivalent (such as proxies
for the same object).
"""
return id(self)
class Referenceable(Serializable):
perspective = None
"""I am an object sent remotely as a direct reference.
When one of my subclasses is sent as an argument to or returned
from a remote method call, I will be serialized by default as a
direct reference.
This means that the peer will be able to call methods on me;
a method call xxx() from my peer will be resolved to methods
of the name remote_xxx.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'remote_messagename' and call it with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "remote_%s" % message, None)
if method is None:
raise NoSuchMethod("No such method: remote_%s" % (message,))
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self.perspective)
def jellyFor(self, jellier):
"""(internal)
Return a tuple which will be used as the s-expression to
serialize this to a peer.
"""
return "remote", jellier.invoker.registerReference(self)
class Root(Referenceable):
"""I provide a root object to L{pb.Broker}s for a L{pb.BrokerFactory}.
When a L{pb.BrokerFactory} produces a L{pb.Broker}, it supplies that
L{pb.Broker} with an object named \"root\". That object is obtained
by calling my rootObject method.
"""
implements(IPBRoot)
def rootObject(self, broker):
"""A L{pb.BrokerFactory} is requesting to publish me as a root object.
When a L{pb.BrokerFactory} is sending me as the root object, this
method will be invoked to allow per-broker versions of an
object. By default I return myself.
"""
return self
class ViewPoint(Referenceable):
"""
I act as an indirect reference to an object accessed through a
L{pb.Perspective}.
Simply put, I combine an object with a perspective so that when a
peer calls methods on the object I refer to, the method will be
invoked with that perspective as a first argument, so that it can
know who is calling it.
While L{Viewable} objects will be converted to ViewPoints by default
when they are returned from or sent as arguments to a remote
method, any object may be manually proxied as well. (XXX: Now that
this class is no longer named C{Proxy}, this is the only occourance
of the term 'proxied' in this docstring, and may be unclear.)
This can be useful when dealing with L{pb.Perspective}s, L{Copyable}s,
and L{Cacheable}s. It is legal to implement a method as such on
a perspective::
| def perspective_getViewPointForOther(self, name):
| defr = self.service.getPerspectiveRequest(name)
| defr.addCallbacks(lambda x, self=self: ViewPoint(self, x), log.msg)
| return defr
This will allow you to have references to Perspective objects in two
different ways. One is through the initial 'attach' call -- each
peer will have a L{pb.RemoteReference} to their perspective directly. The
other is through this method; each peer can get a L{pb.RemoteReference} to
all other perspectives in the service; but that L{pb.RemoteReference} will
be to a L{ViewPoint}, not directly to the object.
The practical offshoot of this is that you can implement 2 varieties
of remotely callable methods on this Perspective; view_xxx and
C{perspective_xxx}. C{view_xxx} methods will follow the rules for
ViewPoint methods (see ViewPoint.L{remoteMessageReceived}), and
C{perspective_xxx} methods will follow the rules for Perspective
methods.
"""
def __init__(self, perspective, object):
"""Initialize me with a Perspective and an Object.
"""
self.perspective = perspective
self.object = object
def processUniqueID(self):
"""Return an ID unique to a proxy for this perspective+object combination.
"""
return (id(self.perspective), id(self.object))
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{view_messagename}' to my Object and call it on my object with
the same arguments, modified by inserting my Perspective as
the first argument.
"""
args = broker.unserialize(args, self.perspective)
kw = broker.unserialize(kw, self.perspective)
method = getattr(self.object, "view_%s" % message)
try:
state = apply(method, (self.perspective,)+args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
rv = broker.serialize(state, self.perspective, method, args, kw)
return rv
class Viewable(Serializable):
"""I will be converted to a L{ViewPoint} when passed to or returned from a remote method.
The beginning of a peer's interaction with a PB Service is always
through a perspective. However, if a C{perspective_xxx} method returns
a Viewable, it will be serialized to the peer as a response to that
method.
"""
def jellyFor(self, jellier):
"""Serialize a L{ViewPoint} for me and the perspective of the given broker.
"""
return ViewPoint(jellier.invoker.serializingPerspective, self).jellyFor(jellier)
class Copyable(Serializable):
"""Subclass me to get copied each time you are returned from or passed to a remote method.
When I am returned from or passed to a remote method call, I will be
converted into data via a set of callbacks (see my methods for more
info). That data will then be serialized using Jelly, and sent to
the peer.
The peer will then look up the type to represent this with; see
L{RemoteCopy} for details.
"""
def getStateToCopy(self):
"""Gather state to send when I am serialized for a peer.
I will default to returning self.__dict__. Override this to
customize this behavior.
"""
return self.__dict__
def getStateToCopyFor(self, perspective):
"""
Gather state to send when I am serialized for a particular
perspective.
I will default to calling L{getStateToCopy}. Override this to
customize this behavior.
"""
return self.getStateToCopy()
def getTypeToCopy(self):
"""Determine what type tag to send for me.
By default, send the string representation of my class
(package.module.Class); normally this is adequate, but
you may override this to change it.
"""
return reflect.qual(self.__class__)
def getTypeToCopyFor(self, perspective):
"""Determine what type tag to send for me.
By default, defer to self.L{getTypeToCopy}() normally this is
adequate, but you may override this to change it.
"""
return self.getTypeToCopy()
def jellyFor(self, jellier):
"""Assemble type tag and state to copy for this broker.
This will call L{getTypeToCopyFor} and L{getStateToCopy}, and
return an appropriate s-expression to represent me.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
p = jellier.invoker.serializingPerspective
t = self.getTypeToCopyFor(p)
state = self.getStateToCopyFor(p)
sxp = jellier.prepare(self)
sxp.extend([t, jellier.jelly(state)])
return jellier.preserve(self, sxp)
class Cacheable(Copyable):
"""A cached instance.
This means that it's copied; but there is some logic to make sure
that it's only copied once. Additionally, when state is retrieved,
it is passed a "proto-reference" to the state as it will exist on
the client.
XXX: The documentation for this class needs work, but it's the most
complex part of PB and it is inherently difficult to explain.
"""
def getStateToCacheAndObserveFor(self, perspective, observer):
"""
Get state to cache on the client and client-cache reference
to observe locally.
This is similiar to getStateToCopyFor, but it additionally
passes in a reference to the client-side RemoteCache instance
that will be created when it is unserialized. This allows
Cacheable instances to keep their RemoteCaches up to date when
they change, such that no changes can occur between the point
at which the state is initially copied and the client receives
it that are not propogated.
"""
return self.getStateToCopyFor(perspective)
def jellyFor(self, jellier):
"""Return an appropriate tuple to serialize me.
Depending on whether this broker has cached me or not, this may
return either a full state or a reference to an existing cache.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
luid = jellier.invoker.cachedRemotelyAs(self, 1)
if luid is None:
luid = jellier.invoker.cacheRemotely(self)
p = jellier.invoker.serializingPerspective
type_ = self.getTypeToCopyFor(p)
observer = RemoteCacheObserver(jellier.invoker, self, p)
state = self.getStateToCacheAndObserveFor(p, observer)
l = jellier.prepare(self)
jstate = jellier.jelly(state)
l.extend([type_, luid, jstate])
return jellier.preserve(self, l)
else:
return cached_atom, luid
def stoppedObserving(self, perspective, observer):
"""This method is called when a client has stopped observing me.
The 'observer' argument is the same as that passed in to
getStateToCacheAndObserveFor.
"""
class RemoteCopy(Unjellyable):
"""I am a remote copy of a Copyable object.
When the state from a L{Copyable} object is received, an instance will
be created based on the copy tags table (see setUnjellyableForClass) and
sent the L{setCopyableState} message. I provide a reasonable default
implementation of that message; subclass me if you wish to serve as
a copier for remote data.
NOTE: copiers are invoked with no arguments. Do not implement a
constructor which requires args in a subclass of L{RemoteCopy}!
"""
def setCopyableState(self, state):
"""I will be invoked with the state to copy locally.
'state' is the data returned from the remote object's
'getStateToCopyFor' method, which will often be the remote
object's dictionary (or a filtered approximation of it depending
on my peer's perspective).
"""
self.__dict__ = state
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.setCopyableState(unjellier.unjelly(jellyList[1]))
return self
class RemoteCache(RemoteCopy, Serializable):
"""A cache is a local representation of a remote L{Cacheable} object.
This represents the last known state of this object. It may
also have methods invoked on it -- in order to update caches,
the cached class generates a L{pb.RemoteReference} to this object as
it is originally sent.
Much like copy, I will be invoked with no arguments. Do not
implement a constructor that requires arguments in one of my
subclasses.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{observe_messagename}' and call it on my with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "observe_%s" % message)
try:
state = apply(method, args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, None, method, args, kw)
def jellyFor(self, jellier):
"""serialize me (only for the broker I'm for) as the original cached reference
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
assert jellier.invoker is self.broker, "You cannot exchange cached proxies between brokers."
return 'lcache', self.luid
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.broker = unjellier.invoker
self.luid = jellyList[1]
if isinstance(self.__class__, type): #new-style class
cProxy = _DummyNewStyle()
else:
cProxy = _Dummy()
cProxy.__class__ = self.__class__
cProxy.__dict__ = self.__dict__
# XXX questionable whether this was a good design idea...
init = getattr(cProxy, "__init__", None)
if init:
init()
unjellier.invoker.cacheLocally(jellyList[1], self)
cProxy.setCopyableState(unjellier.unjelly(jellyList[2]))
# Might have changed due to setCopyableState method; we'll assume that
# it's bad form to do so afterwards.
self.__dict__ = cProxy.__dict__
# chomp, chomp -- some existing code uses "self.__dict__ =", some uses
# "__dict__.update". This is here in order to handle both cases.
self.broker = unjellier.invoker
self.luid = jellyList[1]
return cProxy
## def __really_del__(self):
## """Final finalization call, made after all remote references have been lost.
## """
def __cmp__(self, other):
"""Compare me [to another RemoteCache.
"""
if isinstance(other, self.__class__):
return cmp(id(self.__dict__), id(other.__dict__))
else:
return cmp(id(self.__dict__), other)
def __hash__(self):
"""Hash me.
"""
return int(id(self.__dict__) % sys.maxint)
broker = None
luid = None
def __del__(self):
"""Do distributed reference counting on finalize.
"""
try:
# log.msg( ' --- decache: %s %s' % (self, self.luid) )
if self.broker:
self.broker.decCacheRef(self.luid)
except:
log.deferr()
def unjellyCached(unjellier, unjellyList):
luid = unjellyList[1]
cNotProxy = unjellier.invoker.cachedLocallyAs(luid)
cProxy = _Dummy()
cProxy.__class__ = cNotProxy.__class__
cProxy.__dict__ = cNotProxy.__dict__
return cProxy
setUnjellyableForClass("cached", unjellyCached)
def unjellyLCache(unjellier, unjellyList):
luid = unjellyList[1]
obj = unjellier.invoker.remotelyCachedForLUID(luid)
return obj
setUnjellyableForClass("lcache", unjellyLCache)
def unjellyLocal(unjellier, unjellyList):
obj = unjellier.invoker.localObjectForID(unjellyList[1])
return obj
setUnjellyableForClass("local", unjellyLocal)
class RemoteCacheMethod:
"""A method on a reference to a L{RemoteCache}.
"""
def __init__(self, name, broker, cached, perspective):
"""(internal) initialize.
"""
self.name = name
self.broker = broker
self.perspective = perspective
self.cached = cached
def __cmp__(self, other):
return cmp((self.name, self.broker, self.perspective, self.cached), other)
def __hash__(self):
return hash((self.name, self.broker, self.perspective, self.cached))
def __call__(self, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID, self.name, args, kw)
class RemoteCacheObserver:
"""I am a reverse-reference to the peer's L{RemoteCache}.
I am generated automatically when a cache is serialized. I
represent a reference to the client's L{RemoteCache} object that
will represent a particular L{Cacheable}; I am the additional
object passed to getStateToCacheAndObserveFor.
"""
def __init__(self, broker, cached, perspective):
"""(internal) Initialize me.
@param broker: a L{pb.Broker} instance.
@param cached: a L{Cacheable} instance that this L{RemoteCacheObserver}
corresponds to.
@param perspective: a reference to the perspective who is observing this.
"""
self.broker = broker
self.cached = cached
self.perspective = perspective
def __repr__(self):
return "<RemoteCacheObserver(%s, %s, %s) at %s>" % (
self.broker, self.cached, self.perspective, id(self))
def __hash__(self):
"""Generate a hash unique to all L{RemoteCacheObserver}s for this broker/perspective/cached triplet
"""
return ( (hash(self.broker) % 2**10)
+ (hash(self.perspective) % 2**10)
+ (hash(self.cached) % 2**10))
def __cmp__(self, other):
"""Compare me to another L{RemoteCacheObserver}.
"""
return cmp((self.broker, self.perspective, self.cached), other)
def callRemote(self, _name, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the "
"object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{pb.RemoteMethod} for this key.
"""
return RemoteCacheMethod(key, self.broker, self.cached, self.perspective)
| apache-2.0 |
NLnetLabs/ldns | contrib/python/examples/ldns-signzone.py | 9 | 1609 | #!/usr/bin/python
# This example shows how to sign a given zone file with private key
import ldns
import sys, os, time
#private key TAG which identifies the private key
#use ldns-keygen.py in order to obtain private key
keytag = 30761
# Read zone file
#-------------------------------------------------------------
zone = ldns.ldns_zone.new_frm_fp(open("zone.txt","r"), None, 0, ldns.LDNS_RR_CLASS_IN)
soa = zone.soa()
origin = soa.owner()
# Prepare keys
#-------------------------------------------------------------
#Read private key from file
keyfile = open("key-%s-%d.private" % (origin, keytag), "r");
key = ldns.ldns_key.new_frm_fp(keyfile)
#Read public key from file
pubfname = "key-%s-%d.key" % (origin, keytag)
pubkey = None
if os.path.isfile(pubfname):
pubkeyfile = open(pubfname, "r");
pubkey,_,_,_ = ldns.ldns_rr.new_frm_fp(pubkeyfile)
if not pubkey:
#Create new public key
pubkey = key.key_to_rr()
#Set key expiration
key.set_expiration(int(time.time()) + 365*60*60*24) #365 days
#Set key owner (important step)
key.set_pubkey_owner(origin)
#Insert DNSKEY RR
zone.push_rr(pubkey)
# Sign zone
#-------------------------------------------------------------
#Create keylist and push private key
keys = ldns.ldns_key_list()
keys.push_key(key)
#Add SOA
signed_zone = ldns.ldns_dnssec_zone()
signed_zone.add_rr(soa)
#Add RRs
for rr in zone.rrs().rrs():
print "RR:",str(rr),
signed_zone.add_rr(rr)
added_rrs = ldns.ldns_rr_list()
status = signed_zone.sign(added_rrs, keys)
if (status == ldns.LDNS_STATUS_OK):
signed_zone.print_to_file(open("zone_signed.txt","w"))
| bsd-3-clause |
juanalfonsopr/odoo | addons/sale/sales_team.py | 171 | 4218 | # -*- coding: utf-8 -*-
import calendar
from datetime import date
from dateutil import relativedelta
import json
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_repr
class crm_case_section(osv.osv):
_inherit = 'crm.case.section'
def _get_sale_orders_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['sale.order']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
res[id] = {}
created_domain = [('section_id', '=', id), ('state', '=', 'draft'), ('date_order', '>=', date_begin), ('date_order', '<=', date_end)]
validated_domain = [('section_id', '=', id), ('state', 'not in', ['draft', 'sent', 'cancel']), ('date_order', '>=', date_begin), ('date_order', '<=', date_end)]
res[id]['monthly_quoted'] = json.dumps(self.__get_bar_values(cr, uid, obj, created_domain, ['amount_total', 'date_order'], 'amount_total', 'date_order', context=context))
res[id]['monthly_confirmed'] = json.dumps(self.__get_bar_values(cr, uid, obj, validated_domain, ['amount_untaxed', 'date_order'], 'amount_untaxed', 'date_order', context=context))
return res
def _get_invoices_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['account.invoice.report']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
created_domain = [('type', 'in', ['out_invoice', 'out_refund']), ('section_id', '=', id), ('state', 'not in', ['draft', 'cancel']), ('date', '>=', date_begin), ('date', '<=', date_end)]
values = self.__get_bar_values(cr, uid, obj, created_domain, ['price_total', 'date'], 'price_total', 'date', context=context)
for value in values:
value['value'] = float_repr(value.get('value', 0), precision_digits=self.pool['decimal.precision'].precision_get(cr, uid, 'Account'))
res[id] = json.dumps(values)
return res
_columns = {
'use_quotations': fields.boolean('Quotations', help="Check this box to manage quotations in this sales team."),
'invoiced_forecast': fields.integer(string='Invoice Forecast',
help="Forecast of the invoice revenue for the current month. This is the amount the sales \n"
"team should invoice this month. It is used to compute the progression ratio \n"
" of the current and forecast revenue on the kanban view."),
'invoiced_target': fields.integer(string='Invoice Target',
help="Target of invoice revenue for the current month. This is the amount the sales \n"
"team estimates to be able to invoice this month."),
'monthly_quoted': fields.function(_get_sale_orders_data,
type='char', readonly=True, multi='_get_sale_orders_data',
string='Rate of created quotation per duration'),
'monthly_confirmed': fields.function(_get_sale_orders_data,
type='char', readonly=True, multi='_get_sale_orders_data',
string='Rate of validate sales orders per duration'),
'monthly_invoiced': fields.function(_get_invoices_data,
type='char', readonly=True,
string='Rate of sent invoices per duration'),
}
_defaults = {
'use_quotations': True,
}
def action_forecast(self, cr, uid, id, value, context=None):
return self.write(cr, uid, [id], {'invoiced_forecast': round(float(value))}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
BorgERP/borg-erp-6of3 | addons/event_project/event_project.py | 9 | 2565 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class one2many_mod_task(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if not values:
values = {}
res = {}
for id in ids:
res[id] = []
for id in ids:
query = "select project_id from event_event where id = %s"
cr.execute(query, (id,))
project_ids = [ x[0] for x in cr.fetchall()]
ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id, 'in', project_ids), ('state', '<>', 'done')], limit=self._limit)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
res[id].append( r['id'] )
return res
class event(osv.osv):
_inherit = 'event.event'
def write(self, cr, uid, ids, vals, *args, **kwargs):
if 'date_begin' in vals and vals['date_begin']:
for eve in self.browse(cr, uid, ids):
if eve.project_id:
self.pool.get('project.project').write(cr, uid, [eve.project_id.id], {'date_end': eve.date_begin[:10]})
return super(event,self).write(cr, uid, ids, vals, *args, **kwargs)
_columns = {
'project_id': fields.many2one('project.project', 'Project', readonly=True),
'task_ids': one2many_mod_task('project.task', 'project_id', "Project tasks", readonly=True, domain="[('state', '<>', 'done')]"),
}
event()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
mikjo/bigitr | unit_test/daemonconfig_test.py | 1 | 4363 | #
# Copyright 2012 SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from cStringIO import StringIO
import tempfile
import testutils
from bigitr import daemonconfig
class TestDaemonConfig(testutils.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp(suffix='.bigitr')
os.environ['DDIR'] = self.dir
daemonConfig = self.dir + '/daemon'
file(daemonConfig, 'w').write('''
[GLOBAL]
appconfig = ${DDIR}/app
[foo]
repoconfig = ${DDIR}/foo1.* ${DDIR}/foo2.*
[bar]
appconfig = ${DDIR}/app2
repoconfig = ${DDIR}/bar
email = other@other blah@blah
''')
self.cfg = daemonconfig.DaemonConfig(daemonConfig)
def tearDown(self):
self.removeRecursive(self.dir)
os.unsetenv('DDIR')
def test_parallelConversions(self):
self.assertEqual(1, self.cfg.parallelConversions())
self.cfg.set('GLOBAL', 'parallel', '8')
self.assertEqual(8, self.cfg.parallelConversions())
def test_getPollFrequency(self):
self.assertEqual(300, self.cfg.getPollFrequency())
self.cfg.set('GLOBAL', 'pollfrequency', '1h')
self.assertEqual(3600, self.cfg.getPollFrequency())
def test_getFullSyncFrequency(self):
self.assertEqual(86000, self.cfg.getFullSyncFrequency())
self.cfg.set('GLOBAL', 'syncfrequency', '1h')
self.assertEqual(3600, self.cfg.getFullSyncFrequency())
def test_getEmail(self):
self.assertEqual(None, self.cfg.getEmail())
self.cfg.set('GLOBAL', 'email', 'here@here')
self.assertEqual(['here@here'], self.cfg.getEmail())
def test_getMailFrom(self):
self.assertEqual(None, self.cfg.getMailFrom())
self.cfg.set('GLOBAL', 'mailfrom', 'noreply@here')
self.assertEqual('noreply@here', self.cfg.getMailFrom())
def test_getMailAll(self):
self.assertFalse(self.cfg.getMailAll())
self.cfg.set('GLOBAL', 'mailall', 'true')
self.assertTrue(self.cfg.getMailAll())
def test_getSmartHost(self):
self.assertEqual('localhost', self.cfg.getSmartHost())
self.cfg.set('GLOBAL', 'smarthost', 'foo')
self.assertEqual('foo', self.cfg.getSmartHost())
def test_getApplicationContexts(self):
self.assertEqual(set(('foo', 'bar')), self.cfg.getApplicationContexts())
def test_getAppConfig(self):
self.assertEqual(self.dir + '/app', self.cfg.getAppConfig('foo'))
self.assertEqual(self.dir + '/app2', self.cfg.getAppConfig('bar'))
def test_getRepoConfigs(self):
# files have to exist to be globbed
file(self.dir + '/foo1.1', 'w')
file(self.dir + '/foo1.2', 'w')
file(self.dir + '/foo2.1', 'w')
file(self.dir + '/bar', 'w')
self.assertEqual([self.dir + '/foo1.1',
self.dir + '/foo1.2',
self.dir + '/foo2.1'],
self.cfg.getRepoConfigs('foo'))
self.assertEqual([self.dir + '/bar'], self.cfg.getRepoConfigs('bar'))
def test_parseTimeSpec(self):
self.assertEqual(3600, self.cfg._parseTimeSpec('1h'))
self.assertEqual(3600, self.cfg._parseTimeSpec('1H'))
self.assertEqual(60, self.cfg._parseTimeSpec('1m'))
self.assertEqual(60, self.cfg._parseTimeSpec('1M'))
self.assertEqual(1, self.cfg._parseTimeSpec('1s'))
self.assertEqual(1, self.cfg._parseTimeSpec('1S'))
self.assertEqual(1, self.cfg._parseTimeSpec('1'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h1m1'))
self.assertEqual(3612, self.cfg._parseTimeSpec('1h12'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h1m1s'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h 1m 1s'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h 1m 1s '))
self.assertEqual(3661, self.cfg._parseTimeSpec(' 1h 1m 1s '))
| apache-2.0 |
freakboy3742/pyxero | xero/constants.py | 3 | 2057 | XERO_BASE_URL = "https://api.xero.com"
REQUEST_TOKEN_URL = "/oauth/RequestToken"
AUTHORIZE_URL = "/oauth/Authorize"
ACCESS_TOKEN_URL = "/oauth/AccessToken"
XERO_API_URL = "/api.xro/2.0"
XERO_FILES_URL = "/files.xro/1.0"
XERO_PAYROLL_URL = "/payroll.xro/1.0"
XERO_PROJECTS_URL = "/projects.xro/2.0"
XERO_OAUTH2_AUTHORIZE_URL = "https://login.xero.com/identity/connect/authorize"
XERO_OAUTH2_TOKEN_URL = "https://identity.xero.com/connect/token"
XERO_OAUTH2_CONNECTIONS_URL = "/connections"
class XeroScopes:
# Offline Access
OFFLINE_ACCESS = "offline_access"
# OpenID connection
OPENID = "openid"
PROFILE = "profile"
EMAIL = "email"
# Accounting API
ACCOUNTING_TRANSACTIONS = "accounting.transactions"
ACCOUNTING_TRANSACTIONS_READ = "accounting.transactions.read"
ACCOUNTING_REPORTS_READ = "accounting.reports.read"
ACCOUNTING_JOURNALS_READ = "accounting.journals.read"
ACCOUNTING_SETTINGS = "accounting.settings"
ACCOUNTING_SETTINGS_READ = "accounting.settings.read"
ACCOUNTING_CONTACTS = "accounting.contacts"
ACCOUNTING_CONTACTS_READ = "accounting.contacts.read"
ACCOUNTING_ATTACHMENTS = "accounting.attachments"
ACCOUNTING_ATTACHMENTS_READ = "accounting.attachments.read"
# Payroll API
PAYROLL_EMPLOYEES = "payroll.employees"
PAYROLL_EMPLOYEES_READ = "payroll.employees.read"
PAYROLL_PAYRUNS = "payroll.payruns"
PAYROLL_PAYRUNS_READ = "payroll.payruns.read"
PAYROLL_PAYSLIP = "payroll.payslip"
PAYROLL_PAYSLIP_READ = "payroll.payslip.read"
PAYROLL_TIMESHEETS = "payroll.timesheets"
PAYROLL_TIMESHEETS_READ = "payroll.timesheets.read"
PAYROLL_SETTINGS = "payroll.settings"
PAYROLL_SETTINGS_READ = "payroll.settings.read"
# Files API
FILES = "files"
FILES_READ = "files.read"
# Asssets API
ASSETS = "assets"
ASSETS_READ = "assets.read"
# Projects API
PROJECTS = "projects"
PROJECTS_READ = "projects.read"
# Restricted Scopes
PAYMENTSERVICES = "paymentservices"
BANKFEEDS = "bankfeeds"
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
xuru/pyvisdk | pyvisdk/do/host_network_traffic_shaping_policy.py | 1 | 1089 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostNetworkTrafficShapingPolicy(vim, *args, **kwargs):
'''This data object type describes traffic shaping policy.'''
obj = vim.client.factory.create('ns0:HostNetworkTrafficShapingPolicy')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'averageBandwidth', 'burstSize', 'enabled', 'peakBandwidth', 'dynamicProperty',
'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit |
rogerhu/django | django/middleware/common.py | 11 | 7283 | import hashlib
import logging
import re
import warnings
from django.conf import settings
from django.core.mail import mail_managers
from django.core import urlresolvers
from django import http
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils import six
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.scheme,
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.SEND_BROKEN_LINK_EMAILS:
warnings.warn("SEND_BROKEN_LINK_EMAILS is deprecated. "
"Use BrokenLinkEmailsMiddleware instead.",
DeprecationWarning, stacklevel=2)
BrokenLinkEmailsMiddleware().process_response(request, response)
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause |
shail2810/nova | nova/tests/functional/test_servers.py | 31 | 19364 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import zlib
from oslo_log import log as logging
from oslo_utils import timeutils
from nova import context
from nova import exception
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.virt.fake
LOG = logging.getLogger(__name__)
class ServersTestBase(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
_force_delete_parameter = 'forceDelete'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = 'accessIPv4'
_access_ipv6_parameter = 'accessIPv6'
_return_resv_id_parameter = 'return_reservation_id'
_min_count_parameter = 'min_count'
def setUp(self):
super(ServersTestBase, self).setUp()
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, *args, **kwargs):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
self.compute = self.start_service('compute', *args, **kwargs)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
# if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def _get_access_ips_params(self):
return {self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2"}
def _verify_access_ips(self, server):
self.assertEqual('172.19.0.2',
server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
class ServersTest(ServersTestBase):
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server[self._image_ref_parameter] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server[self._image_ref_parameter] = good_server.get(
self._image_ref_parameter)
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server[self._flavor_ref_parameter] = good_server.get(
self._flavor_ref_parameter)
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertIn("image", server)
self.assertIn("flavor", server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
self.addCleanup(timeutils.clear_time_override)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id,
{self._force_delete_parameter: {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
post['rebuild'].update(self._get_access_ips_params())
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# Cleanup
self._delete_server(created_server_id)
def test_rename_server(self):
# Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map:
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
fake_network.set_stub_network_methods(self.stubs)
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': data.encode('base64'),
})
# Inject a binary file
data = zlib.compress('Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': data.encode('base64'),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
_api_version = 'v3'
| apache-2.0 |
oaubert/advene | lib/advene/plugins/tts.py | 1 | 14814 | #
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2008-2017 Olivier Aubert <[email protected]>
#
# Advene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import logging
logger = logging.getLogger(__name__)
from gettext import gettext as _
import subprocess
import os
import signal
import advene.core.config as config
from advene.rules.elements import RegisteredAction
import advene.util.helper as helper
import advene.model.tal.context
CREATE_NO_WINDOW = 0x8000000
name="Text-To-Speech actions"
ENGINES={}
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# Registering decorator
def ttsengine(name):
def inside_register(f):
ENGINES[name] = f
return f
return inside_register
def register(controller=None):
engine_name = config.data.preferences.get('tts-engine', 'auto')
selected = None
if engine_name == 'auto':
# Automatic configuration. Order is important.
for name in ('customarg', 'custom', 'espeak', 'macosx', 'festival', 'sapi', 'generic'):
c = ENGINES[name]
if c.can_run():
logger.info("TTS: Automatically using " + c.__doc__.splitlines()[0])
selected = c
break
else:
c = ENGINES.get(engine_name)
if c is None:
logger.warning("TTS: %s was specified but it does not exist. Using generic fallback. Please check your configuration." % c.__doc__.splitlines()[0])
selected = ENGINES['generic']
elif c.can_run():
logger.warning("TTS: Using %s as specified." % c.__doc__.splitlines()[0])
selected = c
else:
logger.warning("TTS: Using %s as specified, but it apparently cannot run. Please check your configuration." % c.__doc__.splitlines()[0])
selected = c
engine = selected(controller)
controller.register_action(RegisteredAction(
name="Pronounce",
method=engine.action_pronounce,
description=_("Pronounce a text"),
parameters={'message': _("String to pronounce.")},
defaults={'message': 'annotation/content/data'},
predefined={'message': (
( 'annotation/content/data', _("The annotation content") ),
)},
category='sound',
))
@ttsengine('generic')
class TTSEngine:
"""Generic TTSEngine.
"""
def __init__(self, controller=None):
self.controller=controller
self.gui=self.controller.gui
self.language=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
return True
def parse_parameter(self, context, parameters, name, default_value):
"""Helper method used in actions.
"""
if name in parameters:
try:
result=context.evaluateValue(parameters[name])
except advene.model.tal.context.AdveneTalesException as e:
try:
rulename=context.evaluateValue('rule')
except advene.model.tal.context.AdveneTalesException:
rulename=_("Unknown rule")
logger.error(_("Rule %(rulename)s: Error in the evaluation of the parameter %(parametername)s:") % {'rulename': rulename,
'parametername': name})
logger.error(str(e)[:160])
result=default_value
else:
result=default_value
return result
def set_language(self, language):
self.language=language
def pronounce(self, sentence):
"""Engine-specific method.
"""
logger.debug("TTS: pronounce " + sentence)
return True
def action_pronounce (self, context, parameters):
"""Pronounce action.
"""
message=self.parse_parameter(context, parameters, 'message', _("No message..."))
self.pronounce(message)
return True
@ttsengine('festival')
class FestivalTTSEngine(TTSEngine):
"""Festival TTSEngine.
Note: If it is not the case (depends on the version), festival
must be configured to play audio through the ALSA subsystem, in
order to be able to mix it with the movie sound if necessary.
For this, in older Festival versions (at least until 1.4.3), the
~/.festivalrc file should contain:
(Parameter.set 'Audio_Command "aplay -q -c 1 -t raw -f s16 -r $SR $FILE")
(Parameter.set 'Audio_Method 'Audio_Command)
"""
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.festival_path=helper.find_in_path('festival')
self.aplay_path=helper.find_in_path('aplay')
if self.festival_path is None:
logger.warning(_("TTS disabled. Cannot find the application 'festival' in PATH"))
if self.aplay_path is None:
logger.warning(_("TTS disabled. Cannot find the application 'aplay' in PATH"))
self.festival_process=None
def init(self):
if self.festival_path is not None and self.aplay_path is not None:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
self.festival_process = subprocess.Popen([ self.festival_path, '--pipe' ], stdin=subprocess.PIPE, **kw)
# Configure festival to use aplay
self.festival_process.stdin.write("""(Parameter.set 'Audio_Command "%s -q -c 1 -t raw -f s16 -r $SR $FILE")\n""" % self.aplay_path)
self.festival_process.stdin.write("""(Parameter.set 'Audio_Method 'Audio_Command)\n""")
@staticmethod
def can_run():
"""Can this engine run ?
"""
return helper.find_in_path('festival') is not None
def pronounce (self, sentence):
try:
self.init()
if self.festival_process is not None:
self.festival_process.stdin.write('(SayText "%s")\n' % helper.unaccent(sentence))
except OSError as e:
logger.error("TTS Error: " + str(e.message))
return True
@ttsengine('macosx')
class MacOSXTTSEngine(TTSEngine):
"""MacOSX TTSEngine.
"""
@staticmethod
def can_run():
"""Can this engine run ?
"""
return config.data.os == 'darwin'
def pronounce (self, sentence):
subprocess.call( [ '/usr/bin/say', sentence.encode(config.data.preferences['tts-encoding'], 'ignore') ] )
return True
"""
Win32: install pytts + pywin32 (from sf.net) + mfc71.dll + spchapi.exe (from www.microsoft.com/reader/developer/downloads/tts.mspx
)
On some flavors of Windows you can use:
import pyTTS
tts = pyTTS.Create()
tts.Speak('This is the sound of my voice.')
On Mac OS X you can use:
import os
http://farm.tucows.com/blog/_archives/2005/1/19/266813.html
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/114216
http://www.daniweb.com/code/snippet326.html
http://www.mindtrove.info/articles/pytts.html
"""
@ttsengine('espeak')
class EspeakTTSEngine(TTSEngine):
"""Espeak TTSEngine.
"""
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.language=None
self.espeak_path=helper.find_in_path('espeak')
if self.espeak_path is None and config.data.os == 'win32':
# Try c:\\Program Files\\eSpeak
if os.path.isdir('c:\\Program Files\\eSpeak'):
self.espeak_path='c:\\Program Files\\eSpeak\\command_line\\espeak.exe'
elif os.path.isdir('C:\\Program Files (x86)\\eSpeak'):
#winXp 64b
self.espeak_path='C:\\Program Files (x86)\\eSpeak\\command_line\\espeak.exe'
self.espeak_process=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
return (os.path.isdir('c:\\Program Files\\eSpeak')
or os.path.isdir('C:\\Program Files (x86)\\eSpeak')
or helper.find_in_path('espeak') is not None)
def close(self):
"""Close the espeak process.
"""
if self.espeak_process is not None:
if config.data.os == 'win32':
import win32api
win32api.TerminateProcess(int(self.espeak_process._handle), -1)
else:
os.kill(self.espeak_process.pid, signal.SIGTERM)
self.espeak_process.wait()
self.espeak_process=None
def pronounce (self, sentence):
lang=config.data.preferences.get('tts-language', 'en')
if self.language != lang:
# Need to restart espeak to use the new language
self.close()
self.language=lang
try:
if self.espeak_process is None:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
self.espeak_process = subprocess.Popen([ self.espeak_path, '-v', self.language ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kw)
self.espeak_process.stdin.write((sentence + "\n").encode(config.data.preferences['tts-encoding'], 'ignore'))
except OSError as e:
logger.error("TTS Error: %s", str(e.message))
return True
@ttsengine('sapi')
class SAPITTSEngine(TTSEngine):
"""SAPI (win32) TTSEngine.
"""
# SAPI constants (from http://msdn.microsoft.com/en-us/library/aa914305.aspx):
SPF_ASYNC = (1 << 0)
SPF_PURGEBEFORESPEAK = (1 << 1)
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.sapi=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
try:
import win32com.client
voice = win32com.client.Dispatch("sapi.SPVoice")
except:
voice = None
return voice
def pronounce (self, sentence):
if self.sapi is None:
import win32com.client
self.sapi=win32com.client.Dispatch("sapi.SPVoice")
self.sapi.Speak( sentence.encode(config.data.preferences['tts-encoding'], 'ignore'), self.SPF_ASYNC | self.SPF_PURGEBEFORESPEAK )
return True
@ttsengine('custom')
class CustomTTSEngine(TTSEngine):
"""Custom TTSEngine.
It tries to run a 'prononce' ('prononce.bat' on win32) script,
which takes strings on its stdin and pronounces them.
"""
if config.data.os == 'win32':
prgname='prononce.bat'
else:
prgname='prononce'
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.language=None
self.prg_path=helper.find_in_path(CustomTTSEngine.prgname)
self.prg_process=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
return helper.find_in_path(CustomTTSEngine.prgname) is not None
def close(self):
"""Close the process.
"""
if self.prg_process is not None:
if config.data.os == 'win32':
import win32api
win32api.TerminateProcess(int(self.prg_process._handle), -1)
else:
os.kill(self.prg_process.pid, signal.SIGTERM)
self.prg_process.wait()
self.prg_process=None
def pronounce (self, sentence):
lang=config.data.preferences.get('tts-language', 'en')
if self.language != lang:
self.close()
self.language=lang
try:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
if self.prg_process is None:
self.prg_process = subprocess.Popen([ self.prg_path, '-v', self.language ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kw)
self.prg_process.stdin.write((sentence + "\n").encode(config.data.preferences['tts-encoding'], 'ignore'))
except OSError as e:
logger.error("TTS Error: %s", str(e.message))
return True
@ttsengine('customarg')
class CustomArgTTSEngine(TTSEngine):
"""CustomArg TTSEngine.
It tries to run a 'prononcearg' ('prononcearg.bat' on win32) script,
which takes strings as arguments and pronounces them.
"""
if config.data.os == 'win32':
prgname='prononcearg.bat'
else:
prgname='prononcearg'
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.language=None
self.prg_path=helper.find_in_path(CustomArgTTSEngine.prgname)
@staticmethod
def can_run():
"""Can this engine run ?
"""
return helper.find_in_path(CustomArgTTSEngine.prgname) is not None
def close(self):
"""Close the process.
"""
pass
def pronounce (self, sentence):
lang=config.data.preferences.get('tts-language', 'en')
if self.language != lang:
self.close()
self.language=lang
try:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
subprocess.Popen(str(" ".join([self.prg_path, '-v', self.language, '"%s"' % (sentence.replace('\n',' ').replace('"', '') + "\n")])).encode(config.data.preferences['tts-encoding'], 'ignore'), **kw)
except OSError as e:
try:
m = str(e.message)
except UnicodeDecodeError:
logger.error("TTS: Error decoding error message with standard encoding %s", m.encode('ascii', 'replace'))
return True
| gpl-2.0 |
d9pouces/StarterPyth | starterpyth/model.py | 1 | 13326 | # -*- coding=utf-8 -*-
import datetime
import os
import shutil
import subprocess
from jinja2 import ChoiceLoader
import pkg_resources
from six import u
from starterpyth.cliforms import BaseForm
from starterpyth.utils import binary_path, walk
from starterpyth.log import display, GREEN, CYAN, RED
from starterpyth.translation import ugettext as _
__author__ = 'flanker'
class Model(object):
name = None
template_includes = [('starterpyth', 'templates/includes')]
include_suffix = '_inc'
template_suffix = '_tpl'
class ExtraForm(BaseForm):
pass
def __init__(self, base_context):
"""
:param base_context: dictionnary with the following keys:
string values
* project_name: explicit name of the project ( [a-zA-Z_\-]\w* )
* module_name: Python base module ( [a-z][\-_a-z0-9]* )
some boolean values:
* use_py2, use_py3: use Python 2 or Python 3
* use_py26, use_py27, use_py30, use_py31, use_py32, use_py33, use_py34, use_py35
* use_six, use_2to3: use six or 2to3 for Python 2&3 compatibility
"""
self.global_context = base_context
self.file_context = None
@property
def template_roots(self):
result = []
return result
def run(self, interactive=True):
project_root = self.global_context['project_root']
if os.path.exists(project_root):
if self.global_context['overwrite']:
if os.path.isdir(project_root):
shutil.rmtree(project_root)
else:
os.remove(project_root)
else:
display(_('Destination path already exists!'), color=RED, bold=True)
return
context = self.get_context()
self.global_context.update(context)
extra_form = self.get_extraform(interactive=interactive)
self.global_context.update(extra_form)
extra_context = self.get_extracontext()
self.global_context.update(extra_context)
filters = self.get_template_filters()
self.set_virtualenvs()
for modname, dirname in self.template_roots:
display('dirname %s' % dirname, color=CYAN)
env = self.get_environment(modname, dirname, filters)
self.write_files(modname, dirname, env)
def set_virtualenvs(self):
virtualenv_path = None
virtualenv_version = None
for k in ('26', '27', '30', '31', '32', '33', '34', '35'):
v = '%s.%s' % (k[0], k[1])
if self.global_context['create_venv%s' % k]:
if self.global_context['virtualenv_present']:
virtualenv_path = ('~/.virtualenvs/%s%s' % (self.global_context['module_name'], k))
python_path = binary_path('python%s' % v)
subprocess.check_call(['virtualenv', os.path.expanduser(virtualenv_path), '-p', python_path])
cmd_list = [os.path.join(os.path.expanduser(virtualenv_path), 'bin', 'python'), '--version']
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
content = p.communicate()
if content[0]: # Python 3 prints version on stdout
# noinspection PyUnresolvedReferences
virtualenv_version = content[0].decode('utf-8').strip()
else: # Python 2 prints version on stderr
# noinspection PyUnresolvedReferences
virtualenv_version = content[1].decode('utf-8').strip()
self.global_context['virtualenv'] = (virtualenv_path, virtualenv_version)
# noinspection PyMethodMayBeStatic
def get_context(self):
values = {'encoding': 'utf-8', 'entry_points': {}, 'cmdclass': {}, 'ext_modules': [],
'install_requires': [], 'setup_requires': [], 'classifiers': []}
if self.global_context['use_six']:
values['install_requires'] += ['six', 'setuptools>=1.0', ]
values['setup_requires'] += ['six', 'setuptools>=1.0', ]
license_fd = pkg_resources.resource_stream('starterpyth',
'data/licenses/%s.txt' % self.global_context['license'])
values['license_content'] = license_fd.read().decode('utf-8')
values['copyright'] = u('%d, %s') % (datetime.date.today().year, self.global_context['author'])
self.global_context['used_python_versions'] = []
values['classifiers'] += ['Development Status :: 3 - Alpha',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
]
lic = {'CeCILL-A': 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)',
'CeCILL-B': 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)',
'BSD-2-clauses': 'License :: OSI Approved :: BSD License',
'Apache-2': 'License :: OSI Approved :: Apache Software License',
'CeCILL-C': 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)',
'GPL-2': 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'GPL-3': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'LGPL-2': 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
'LGPL-3': 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'MIT': 'License :: OSI Approved :: MIT License',
'APSL': 'License :: OSI Approved :: Apple Public Source License',
'PSFL': 'License :: OSI Approved :: Python Software Foundation License',
}
values['classifiers'] += [lic[self.global_context['license']]]
for k in ('26', '27', '30', '31', '32', '33', '34', '35'):
v = '%s.%s' % (k[0], k[1])
if self.global_context['use_py%s' % k]:
values['classifiers'] += ['Programming Language :: Python :: %s' % v]
self.global_context['used_python_versions'].append(v)
if not self.global_context['use_py2']:
values['classifiers'] += ['Programming Language :: Python :: 3 :: Only']
elif not self.global_context['use_py3']:
values['classifiers'] += ['Programming Language :: Python :: 2 :: Only']
values['tox_used_python_versions'] = [('py' + x[0] + x[-1]) for x in
self.global_context['used_python_versions']]
return values
# noinspection PyMethodMayBeStatic
def get_extracontext(self):
return {}
def get_extraform(self, interactive=True):
form = self.ExtraForm(extra_env=self.global_context)
values = form.read(interactive=interactive)
return values
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def process_directory_or_file(self, src_path, dst_path, name, is_directory):
"""
:param src_path: source path, relative to python module
:param dst_path: absolute destination path
:param name: basename of the file or directory to be processed
:return:
"""
if name in ['.svn', '.git', '.hg', 'CVS'] or name[-len(self.include_suffix):] == self.include_suffix:
return False
return True
# noinspection PyMethodMayBeStatic
def get_environment(self, modname, dirname, filters):
"""
Return a valid Jinja2 environment (with filters)
:param modname:
:param dirname:
:param filters: dictionnary of extra filters for jinja2
:return:
"""
from jinja2 import Environment, PackageLoader
loaders = [PackageLoader(modname, dirname)]
for modname, dirname in self.template_includes:
loaders.append(PackageLoader(modname, dirname))
loader = ChoiceLoader(loaders)
env = Environment(loader=loader)
env.filters.update(filters)
return env
def write_files(self, modname, dirname, env):
"""
Write all templated or raw files to the new project. All template are rendered twice.
This behaviour allows to determine which functions must be imported at the beginning of Python files
:param modname: module containing template files
:param dirname: dirname containing template files in the module `modname`
:param env: Jinja2 environment
:return:
"""
from jinja2 import Template
project_root = self.global_context['project_root']
# creation of the project directory if needed
if not os.path.isdir(project_root):
os.makedirs(project_root)
display(_('Directory %(f)s created.') % {'f': project_root}, color=GREEN)
# noinspection PyTypeChecker
prefix_len = len(dirname) + 1
def get_path(root_, name):
"""return relative source path (to template dir) and absolute destination path"""
src_path_ = (root_ + '/' + name)[prefix_len:]
dst_path_ = src_path_
if os.sep != '/':
dst_path_ = dst_path_.replace('/', os.sep)
if dst_path_.find('{') > -1: # the name of the file is templated
dst_path_ = Template(dst_path_).render(**self.global_context)
if dst_path_[-len(self.template_suffix):] == self.template_suffix:
dst_path_ = dst_path_[:-len(self.template_suffix)]
return src_path_, os.path.join(project_root, dst_path_)
# walk through all files (raw and templates) in modname/dirname and write them to destination
for root, dirnames, filenames in walk(modname, dirname):
for dirname in dirnames:
src_path, dst_path = get_path(root, dirname)
if not self.process_directory_or_file(src_path, dst_path, dirname, True):
continue
if not os.path.isdir(dst_path):
os.makedirs(dst_path)
display(_('Directory %(f)s created.') % {'f': dst_path}, color=GREEN)
for filename in filenames:
src_path, dst_path = get_path(root, filename)
if not self.process_directory_or_file(src_path, dst_path, filename, False):
continue
if not os.path.isdir(os.path.dirname(dst_path)):
continue
if filename[-len(self.template_suffix):] == self.template_suffix:
self.file_context = {'render_pass': 1}
template = env.get_template(src_path)
f_out = open(dst_path, 'wb')
self.file_context.update(self.global_context)
template.render(**self.file_context)
self.file_context['render_pass'] = 2
template_content = template.render(**self.file_context).encode('utf-8')
f_out.write(template_content)
f_out.close()
display(_('Template %(f)s written.') % {'f': dst_path}, color=GREEN)
else:
f_out = open(dst_path, 'wb')
f_in = pkg_resources.resource_stream(modname, root + '/' + filename)
data = f_in.read(10240)
while data:
f_out.write(data)
data = f_in.read(10240)
f_in.close()
f_out.close()
display(_('File %(f)s written.') % {'f': dst_path}, color=GREEN)
def increment(self, key):
self.file_context[key] = self.file_context.get(key, 0) + 1
def text(self, value):
return self.raw_text(value)
def raw_text(self, value):
if '\n' in value:
prefix = '"""'
elif "'" not in value:
prefix = "'"
elif '"' not in value:
prefix = '"'
else:
value = value.replace("'", "\\'")
prefix = "'"
self.increment('counter_unicode_literals')
return '%s%s%s' % (prefix, value, prefix)
def docstring(self, value):
self.increment('counter_unicode_literals')
return '"""%s"""' % value
def translate(self, value):
if not self.global_context['use_i18n']:
return self.text(value)
self.increment('counter_i18n')
return "_(%s)" % self.text(value)
def binary(self, value):
return 'b' + self.raw_text(value)
def get_template_filters(self):
return {'text': self.text, 'binary': self.binary, 'repr': lambda x: repr(x), 'translate': self.translate,
'docstring': self.docstring, 'raw_text': self.raw_text}
if __name__ == '__main__':
import doctest
doctest.testmod() | gpl-2.0 |
dh4nav/lammps | tools/python/pizza/dump.py | 21 | 40376 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, [email protected], Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# dump tool
oneline = "Read, write, manipulate dump files and particle attributes"
docstr = """
d = dump("dump.one") read in one or more dump files
d = dump("dump.1 dump.2.gz") can be gzipped
d = dump("dump.*") wildcard expands to multiple files
d = dump("dump.*",0) two args = store filenames, but don't read
incomplete and duplicate snapshots are deleted
if atoms have 5 or 8 columns, assign id,type,x,y,z (ix,iy,iz)
atoms will be unscaled if stored in files as scaled
time = d.next() read next snapshot from dump files
used with 2-argument constructor to allow reading snapshots one-at-a-time
snapshot will be skipped only if another snapshot has same time stamp
return time stamp of snapshot read
return -1 if no snapshots left or last snapshot is incomplete
no column name assignment or unscaling is performed
d.map(1,"id",3,"x") assign names to atom columns (1-N)
not needed if dump file is self-describing
d.tselect.all() select all timesteps
d.tselect.one(N) select only timestep N
d.tselect.none() deselect all timesteps
d.tselect.skip(M) select every Mth step
d.tselect.test("$t >= 100 and $t < 10000") select matching timesteps
d.delete() delete non-selected timesteps
selecting a timestep also selects all atoms in the timestep
skip() and test() only select from currently selected timesteps
test() uses a Python Boolean expression with $t for timestep value
Python comparison syntax: == != < > <= >= and or
d.aselect.all() select all atoms in all steps
d.aselect.all(N) select all atoms in one step
d.aselect.test("$id > 100 and $type == 2") select match atoms in all steps
d.aselect.test("$id > 100 and $type == 2",N) select matching atoms in one step
all() with no args selects atoms from currently selected timesteps
test() with one arg selects atoms from currently selected timesteps
test() sub-selects from currently selected atoms
test() uses a Python Boolean expression with $ for atom attributes
Python comparison syntax: == != < > <= >= and or
$name must end with a space
d.write("file") write selected steps/atoms to dump file
d.write("file",head,app) write selected steps/atoms to dump file
d.scatter("tmp") write selected steps/atoms to multiple files
write() can be specified with 2 additional flags
headd = 0/1 for no/yes snapshot header, app = 0/1 for write vs append
scatter() files are given timestep suffix: e.g. tmp.0, tmp.100, etc
d.scale() scale x,y,z to 0-1 for all timesteps
d.scale(100) scale atom coords for timestep N
d.unscale() unscale x,y,z to box size to all timesteps
d.unscale(1000) unscale atom coords for timestep N
d.wrap() wrap x,y,z into periodic box via ix,iy,iz
d.unwrap() unwrap x,y,z out of box via ix,iy,iz
d.owrap("other") wrap x,y,z to same image as another atom
d.sort() sort atoms by atom ID in all selected steps
d.sort("x") sort atoms by column value in all steps
d.sort(1000) sort atoms in timestep N
scale(), unscale(), wrap(), unwrap(), owrap() operate on all steps and atoms
wrap(), unwrap(), owrap() require ix,iy,iz be defined
owrap() requires a column be defined which contains an atom ID
name of that column is the argument to owrap()
x,y,z for each atom is wrapped to same image as the associated atom ID
useful for wrapping all molecule's atoms the same so it is contiguous
m1,m2 = d.minmax("type") find min/max values for a column
d.set("$ke = $vx * $vx + $vy * $vy") set a column to a computed value
d.setv("type",vector) set a column to a vector of values
d.spread("ke",N,"color") 2nd col = N ints spread over 1st col
d.clone(1000,"color") clone timestep N values to other steps
minmax() operates on selected timesteps and atoms
set() operates on selected timesteps and atoms
left hand side column is created if necessary
left-hand side column is unset or unchanged for non-selected atoms
equation is in Python syntax
use $ for column names, $name must end with a space
setv() operates on selected timesteps and atoms
if column label does not exist, column is created
values in vector are assigned sequentially to atoms, so may want to sort()
length of vector must match # of selected atoms
spread() operates on selected timesteps and atoms
min and max are found for 1st specified column across all selected atoms
atom's value is linear mapping (1-N) between min and max
that is stored in 2nd column (created if needed)
useful for creating a color map
clone() operates on selected timesteps and atoms
values at every timestep are set to value at timestep N for that atom ID
useful for propagating a color map
t = d.time() return vector of selected timestep values
fx,fy,... = d.atom(100,"fx","fy",...) return vector(s) for atom ID N
fx,fy,... = d.vecs(1000,"fx","fy",...) return vector(s) for timestep N
atom() returns vectors with one value for each selected timestep
vecs() returns vectors with one value for each selected atom in the timestep
index,time,flag = d.iterator(0/1) loop over dump snapshots
time,box,atoms,bonds,tris = d.viz(index) return list of viz objects
d.atype = "color" set column returned as "type" by viz
d.extra("dump.bond") read bond list from dump file
d.extra(data) extract bond/tri/line list from data
iterator() loops over selected timesteps
iterator() called with arg = 0 first time, with arg = 1 on subsequent calls
index = index within dump object (0 to # of snapshots)
time = timestep value
flag = -1 when iteration is done, 1 otherwise
viz() returns info for selected atoms for specified timestep index
time = timestep value
box = [xlo,ylo,zlo,xhi,yhi,zhi]
atoms = id,type,x,y,z for each atom as 2d array
bonds = id,type,x1,y1,z1,x2,y2,z2,t1,t2 for each bond as 2d array
if bonds() was used to define bonds, else empty list
tris = id,type,x1,y1,z1,x2,y2,z2,x3,y3,z3,nx,ny,nz for each tri as 2d array
if extra() was used to define tris, else empty list
lines = id,type,x1,y1,z1,x2,y2,z2 for each line as 2d array
if extra() was used to define lines, else empty list
atype is column name viz() will return as atom type (def = "type")
extra() stores list of bonds/tris/lines to return each time viz() is called
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# 12/09, David Hart (SNL): allow use of NumPy or Numeric
# ToDo list
# try to optimize this line in read_snap: words += f.readline().split()
# allow $name in aselect.test() and set() to end with non-space
# should next() snapshot be auto-unscaled ?
# Variables
# flist = list of dump file names
# increment = 1 if reading snapshots one-at-a-time
# nextfile = which file to read from via next()
# eof = ptr into current file for where to read via next()
# nsnaps = # of snapshots
# nselect = # of selected snapshots
# snaps = list of snapshots
# names = dictionary of column names:
# key = "id", value = column # (0 to M-1)
# tselect = class for time selection
# aselect = class for atom selection
# atype = name of vector used as atom type by viz extract
# bondflag = 0 if no bonds, 1 if they are defined statically
# bondlist = static list of bonds to viz() return for all snapshots
# only a list of atom pairs, coords have to be created for each snapshot
# triflag = 0 if no tris, 1 if they are defined statically, 2 if dynamic
# trilist = static list of tris to return via viz() for all snapshots
# lineflag = 0 if no lines, 1 if they are defined statically
# linelist = static list of lines to return via viz() for all snapshots
# Snap = one snapshot
# time = time stamp
# tselect = 0/1 if this snapshot selected
# natoms = # of atoms
# nselect = # of selected atoms in this snapshot
# aselect[i] = 0/1 for each atom
# xlo,xhi,ylo,yhi,zlo,zhi = box bounds (float)
# atoms[i][j] = 2d array of floats, i = 0 to natoms-1, j = 0 to ncols-1
# Imports and external programs
import sys, commands, re, glob, types
from os import popen
from math import * # any function could be used by set()
try:
import numpy as np
oldnumeric = False
except:
import Numeric as np
oldnumeric = True
try: from DEFAULTS import PIZZA_GUNZIP
except: PIZZA_GUNZIP = "gunzip"
# Class definition
class dump:
# --------------------------------------------------------------------
def __init__(self,*list):
self.snaps = []
self.nsnaps = self.nselect = 0
self.names = {}
self.tselect = tselect(self)
self.aselect = aselect(self)
self.atype = "type"
self.bondflag = 0
self.bondlist = []
self.triflag = 0
self.trilist = []
self.triobj = 0
self.lineflag = 0
self.linelist = []
# flist = list of all dump file names
words = list[0].split()
self.flist = []
for word in words: self.flist += glob.glob(word)
if len(self.flist) == 0 and len(list) == 1:
raise StandardError,"no dump file specified"
if len(list) == 1:
self.increment = 0
self.read_all()
else:
self.increment = 1
self.nextfile = 0
self.eof = 0
# --------------------------------------------------------------------
def read_all(self):
# read all snapshots from each file
# test for gzipped files
for file in self.flist:
if file[-3:] == ".gz":
f = popen("%s -c %s" % (PIZZA_GUNZIP,file),'r')
else: f = open(file)
snap = self.read_snapshot(f)
while snap:
self.snaps.append(snap)
print snap.time,
sys.stdout.flush()
snap = self.read_snapshot(f)
f.close()
print
# sort entries by timestep, cull duplicates
self.snaps.sort(self.compare_time)
self.cull()
self.nsnaps = len(self.snaps)
print "read %d snapshots" % self.nsnaps
# select all timesteps and atoms
self.tselect.all()
# set default names for atom columns if file wasn't self-describing
if len(self.snaps) == 0:
print "no column assignments made"
elif len(self.names):
print "assigned columns:",self.names2str()
elif self.snaps[0].atoms == None:
print "no column assignments made"
elif len(self.snaps[0].atoms[0]) == 5:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z")
print "assigned columns:",self.names2str()
elif len(self.snaps[0].atoms[0]) == 8:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z",6,"ix",7,"iy",8,"iz")
print "assigned columns:",self.names2str()
else:
print "no column assignments made"
# if snapshots are scaled, unscale them
if (not self.names.has_key("x")) or \
(not self.names.has_key("y")) or \
(not self.names.has_key("z")):
print "no unscaling could be performed"
elif self.nsnaps > 0:
if self.scaled(self.nsnaps-1): self.unscale()
else: print "dump is already unscaled"
# --------------------------------------------------------------------
# read next snapshot from list of files
def next(self):
if not self.increment: raise StandardError,"cannot read incrementally"
# read next snapshot in current file using eof as pointer
# if fail, try next file
# if new snapshot time stamp already exists, read next snapshot
while 1:
f = open(self.flist[self.nextfile],'rb')
f.seek(self.eof)
snap = self.read_snapshot(f)
if not snap:
self.nextfile += 1
if self.nextfile == len(self.flist): return -1
f.close()
self.eof = 0
continue
self.eof = f.tell()
f.close()
try:
self.findtime(snap.time)
continue
except: break
# select the new snapshot with all its atoms
self.snaps.append(snap)
snap = self.snaps[self.nsnaps]
snap.tselect = 1
snap.nselect = snap.natoms
for i in xrange(snap.natoms): snap.aselect[i] = 1
self.nsnaps += 1
self.nselect += 1
return snap.time
# --------------------------------------------------------------------
# read a single snapshot from file f
# return snapshot or 0 if failed
# assign column names if not already done and file is self-describing
# convert xs,xu to x
def read_snapshot(self,f):
try:
snap = Snap()
item = f.readline()
snap.time = int(f.readline().split()[0]) # just grab 1st field
item = f.readline()
snap.natoms = int(f.readline())
snap.aselect = np.zeros(snap.natoms)
item = f.readline()
words = f.readline().split()
snap.xlo,snap.xhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.ylo,snap.yhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.zlo,snap.zhi = float(words[0]),float(words[1])
item = f.readline()
if len(self.names) == 0:
words = item.split()[2:]
if len(words):
for i in range(len(words)):
if words[i] == "xs" or words[i] == "xu":
self.names["x"] = i
elif words[i] == "ys" or words[i] == "yu":
self.names["y"] = i
elif words[i] == "zs" or words[i] == "zu":
self.names["z"] = i
else: self.names[words[i]] = i
if snap.natoms:
words = f.readline().split()
ncol = len(words)
for i in xrange(1,snap.natoms):
words += f.readline().split()
floats = map(float,words)
if oldnumeric: atoms = np.zeros((snap.natoms,ncol),np.Float)
else: atoms = np.zeros((snap.natoms,ncol),np.float)
start = 0
stop = ncol
for i in xrange(snap.natoms):
atoms[i] = floats[start:stop]
start = stop
stop += ncol
else: atoms = None
snap.atoms = atoms
return snap
except:
return 0
# --------------------------------------------------------------------
# decide if snapshot i is scaled/unscaled from coords of first and last atom
def scaled(self,i):
ix = self.names["x"]
iy = self.names["y"]
iz = self.names["z"]
natoms = self.snaps[i].natoms
if natoms == 0: return 0
x1 = self.snaps[i].atoms[0][ix]
y1 = self.snaps[i].atoms[0][iy]
z1 = self.snaps[i].atoms[0][iz]
x2 = self.snaps[i].atoms[natoms-1][ix]
y2 = self.snaps[i].atoms[natoms-1][iy]
z2 = self.snaps[i].atoms[natoms-1][iz]
if x1 >= -0.1 and x1 <= 1.1 and y1 >= -0.1 and y1 <= 1.1 and \
z1 >= -0.1 and z1 <= 1.1 and x2 >= -0.1 and x2 <= 1.1 and \
y2 >= -0.1 and y2 <= 1.1 and z2 >= -0.1 and z2 <= 1.1:
return 1
else: return 0
# --------------------------------------------------------------------
# map atom column names
def map(self,*pairs):
if len(pairs) % 2 != 0:
raise StandardError, "dump map() requires pairs of mappings"
for i in range(0,len(pairs),2):
j = i + 1
self.names[pairs[j]] = pairs[i]-1
# delete unselected snapshots
# --------------------------------------------------------------------
def delete(self):
ndel = i = 0
while i < self.nsnaps:
if not self.snaps[i].tselect:
del self.snaps[i]
self.nsnaps -= 1
ndel += 1
else: i += 1
print "%d snapshots deleted" % ndel
print "%d snapshots remaining" % self.nsnaps
# --------------------------------------------------------------------
# scale coords to 0-1 for all snapshots or just one
def scale(self,*list):
if len(list) == 0:
print "Scaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.scale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.scale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def scale_one(self,snap,x,y,z):
if snap.atoms == None: return
xprdinv = 1.0 / (snap.xhi - snap.xlo)
yprdinv = 1.0 / (snap.yhi - snap.ylo)
zprdinv = 1.0 / (snap.zhi - snap.zlo)
atoms = snap.atoms
atoms[:,x] = (atoms[:,x] - snap.xlo) * xprdinv
atoms[:,y] = (atoms[:,y] - snap.ylo) * yprdinv
atoms[:,z] = (atoms[:,z] - snap.zlo) * zprdinv
# --------------------------------------------------------------------
# unscale coords from 0-1 to box size for all snapshots or just one
def unscale(self,*list):
if len(list) == 0:
print "Unscaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.unscale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.unscale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def unscale_one(self,snap,x,y,z):
if snap.atoms == None: return
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] = snap.xlo + atoms[:,x]*xprd
atoms[:,y] = snap.ylo + atoms[:,y]*yprd
atoms[:,z] = snap.zlo + atoms[:,z]*zprd
# --------------------------------------------------------------------
# wrap coords from outside box to inside
def wrap(self):
print "Wrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] -= atoms[:,ix]*xprd
atoms[:,y] -= atoms[:,iy]*yprd
atoms[:,z] -= atoms[:,iz]*zprd
# --------------------------------------------------------------------
# unwrap coords from inside box to outside
def unwrap(self):
print "Unwrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] += atoms[:,ix]*xprd
atoms[:,y] += atoms[:,iy]*yprd
atoms[:,z] += atoms[:,iz]*zprd
# --------------------------------------------------------------------
# wrap coords to same image as atom ID stored in "other" column
def owrap(self,other):
print "Wrapping to other ..."
id = self.names["id"]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
iother = self.names[other]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
ids = {}
for i in xrange(snap.natoms):
ids[atoms[i][id]] = i
for i in xrange(snap.natoms):
j = ids[atoms[i][iother]]
atoms[i][x] += (atoms[i][ix]-atoms[j][ix])*xprd
atoms[i][y] += (atoms[i][iy]-atoms[j][iy])*yprd
atoms[i][z] += (atoms[i][iz]-atoms[j][iz])*zprd
# --------------------------------------------------------------------
# convert column names assignment to a string, in column order
def names2str(self):
ncol = len(self.snaps[0].atoms[0])
pairs = self.names.items()
values = self.names.values()
str = ""
for i in xrange(ncol):
if i in values: str += pairs[values.index(i)][0] + ' '
return str
# --------------------------------------------------------------------
# sort atoms by atom ID in all selected timesteps by default
# if arg = string, sort all steps by that column
# if arg = numeric, sort atoms in single step
def sort(self,*list):
if len(list) == 0:
print "Sorting selected snapshots ..."
id = self.names["id"]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
elif type(list[0]) is types.StringType:
print "Sorting selected snapshots by %s ..." % list[0]
id = self.names[list[0]]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
else:
i = self.findtime(list[0])
id = self.names["id"]
self.sort_one(self.snaps[i],id)
# --------------------------------------------------------------------
# sort a single snapshot by ID column
def sort_one(self,snap,id):
atoms = snap.atoms
ids = atoms[:,id]
ordering = np.argsort(ids)
for i in xrange(len(atoms[0])):
atoms[:,i] = np.take(atoms[:,i],ordering)
# --------------------------------------------------------------------
# write a single dump file from current selection
def write(self,file,header=1,append=0):
if len(self.snaps): namestr = self.names2str()
if not append: f = open(file,"w")
else: f = open(file,"a")
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
if header:
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# write one dump file per snapshot from current selection
def scatter(self,root):
if len(self.snaps): namestr = self.names2str()
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
file = root + "." + str(snap.time)
f = open(file,"w")
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# find min/max across all selected snapshots/atoms for a particular column
def minmax(self,colname):
icol = self.names[colname]
min = 1.0e20
max = -min
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] < min: min = atoms[i][icol]
if atoms[i][icol] > max: max = atoms[i][icol]
return (min,max)
# --------------------------------------------------------------------
# set a column value via an equation for all selected snapshots
def set(self,eq):
print "Setting ..."
pattern = "\$\w*"
list = re.findall(pattern,eq)
lhs = list[0][1:]
if not self.names.has_key(lhs):
self.newcolumn(lhs)
for item in list:
name = item[1:]
column = self.names[name]
insert = "snap.atoms[i][%d]" % (column)
eq = eq.replace(item,insert)
ceq = compile(eq,'','single')
for snap in self.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if snap.aselect[i]: exec ceq
# --------------------------------------------------------------------
# set a column value via an input vec for all selected snapshots/atoms
def setv(self,colname,vec):
print "Setting ..."
if not self.names.has_key(colname):
self.newcolumn(colname)
icol = self.names[colname]
for snap in self.snaps:
if not snap.tselect: continue
if snap.nselect != len(vec):
raise StandardError,"vec length does not match # of selected atoms"
atoms = snap.atoms
m = 0
for i in xrange(snap.natoms):
if snap.aselect[i]:
atoms[i][icol] = vec[m]
m += 1
# --------------------------------------------------------------------
# clone value in col across selected timesteps for atoms with same ID
def clone(self,nstep,col):
istep = self.findtime(nstep)
icol = self.names[col]
id = self.names["id"]
ids = {}
for i in xrange(self.snaps[istep].natoms):
ids[self.snaps[istep].atoms[i][id]] = i
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
j = ids[atoms[i][id]]
atoms[i][icol] = self.snaps[istep].atoms[j][icol]
# --------------------------------------------------------------------
# values in old column are spread as ints from 1-N and assigned to new column
def spread(self,old,n,new):
iold = self.names[old]
if not self.names.has_key(new): self.newcolumn(new)
inew = self.names[new]
min,max = self.minmax(old)
print "min/max = ",min,max
gap = max - min
invdelta = n/gap
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
ivalue = int((atoms[i][iold] - min) * invdelta) + 1
if ivalue > n: ivalue = n
if ivalue < 1: ivalue = 1
atoms[i][inew] = ivalue
# --------------------------------------------------------------------
# return vector of selected snapshot time stamps
def time(self):
vec = self.nselect * [0]
i = 0
for snap in self.snaps:
if not snap.tselect: continue
vec[i] = snap.time
i += 1
return vec
# --------------------------------------------------------------------
# extract vector(s) of values for atom ID n at each selected timestep
def atom(self,n,*list):
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(self.nselect * [0])
ncol = len(columns)
id = self.names["id"]
m = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if atoms[i][id] == n: break
if atoms[i][id] != n:
raise StandardError, "could not find atom ID in snapshot"
for j in xrange(ncol):
values[j][m] = atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# extract vector(s) of values for selected atoms at chosen timestep
def vecs(self,n,*list):
snap = self.snaps[self.findtime(n)]
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(snap.nselect * [0])
ncol = len(columns)
m = 0
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
for j in xrange(ncol):
values[j][m] = snap.atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# add a new column to every snapshot and set value to 0
# set the name of the column to str
def newcolumn(self,str):
ncol = len(self.snaps[0].atoms[0])
self.map(ncol+1,str)
for snap in self.snaps:
atoms = snap.atoms
if oldnumeric: newatoms = np.zeros((snap.natoms,ncol+1),np.Float)
else: newatoms = np.zeros((snap.natoms,ncol+1),np.float)
newatoms[:,0:ncol] = snap.atoms
snap.atoms = newatoms
# --------------------------------------------------------------------
# sort snapshots on time stamp
def compare_time(self,a,b):
if a.time < b.time:
return -1
elif a.time > b.time:
return 1
else:
return 0
# --------------------------------------------------------------------
# delete successive snapshots with duplicate time stamp
def cull(self):
i = 1
while i < len(self.snaps):
if self.snaps[i].time == self.snaps[i-1].time:
del self.snaps[i]
else:
i += 1
# --------------------------------------------------------------------
# iterate over selected snapshots
def iterator(self,flag):
start = 0
if flag: start = self.iterate + 1
for i in xrange(start,self.nsnaps):
if self.snaps[i].tselect:
self.iterate = i
return i,self.snaps[i].time,1
return 0,0,-1
# --------------------------------------------------------------------
# return list of atoms to viz for snapshot isnap
# augment with bonds, tris, lines if extra() was invoked
def viz(self,isnap):
snap = self.snaps[isnap]
time = snap.time
box = [snap.xlo,snap.ylo,snap.zlo,snap.xhi,snap.yhi,snap.zhi]
id = self.names["id"]
type = self.names[self.atype]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
# create atom list needed by viz from id,type,x,y,z
# need Numeric/Numpy mode here
atoms = []
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
atom = snap.atoms[i]
atoms.append([atom[id],atom[type],atom[x],atom[y],atom[z]])
# create list of current bond coords from static bondlist
# alist = dictionary of atom IDs for atoms list
# lookup bond atom IDs in alist and grab their coords
# try is used since some atoms may be unselected
# any bond with unselected atom is not returned to viz caller
# need Numeric/Numpy mode here
bonds = []
if self.bondflag:
alist = {}
for i in xrange(len(atoms)): alist[int(atoms[i][0])] = i
for bond in self.bondlist:
try:
i = alist[bond[2]]
j = alist[bond[3]]
atom1 = atoms[i]
atom2 = atoms[j]
bonds.append([bond[0],bond[1],atom1[2],atom1[3],atom1[4],
atom2[2],atom2[3],atom2[4],atom1[1],atom2[1]])
except: continue
tris = []
if self.triflag:
if self.triflag == 1: tris = self.trilist
elif self.triflag == 2:
timetmp,boxtmp,atomstmp,bondstmp, \
tris,linestmp = self.triobj.viz(time,1)
lines = []
if self.lineflag: lines = self.linelist
return time,box,atoms,bonds,tris,lines
# --------------------------------------------------------------------
def findtime(self,n):
for i in xrange(self.nsnaps):
if self.snaps[i].time == n: return i
raise StandardError, "no step %d exists" % n
# --------------------------------------------------------------------
# return maximum box size across all selected snapshots
def maxbox(self):
xlo = ylo = zlo = None
xhi = yhi = zhi = None
for snap in self.snaps:
if not snap.tselect: continue
if xlo == None or snap.xlo < xlo: xlo = snap.xlo
if xhi == None or snap.xhi > xhi: xhi = snap.xhi
if ylo == None or snap.ylo < ylo: ylo = snap.ylo
if yhi == None or snap.yhi > yhi: yhi = snap.yhi
if zlo == None or snap.zlo < zlo: zlo = snap.zlo
if zhi == None or snap.zhi > zhi: zhi = snap.zhi
return [xlo,ylo,zlo,xhi,yhi,zhi]
# --------------------------------------------------------------------
# return maximum atom type across all selected snapshots and atoms
def maxtype(self):
icol = self.names["type"]
max = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] > max: max = atoms[i][icol]
return int(max)
# --------------------------------------------------------------------
# grab bonds/tris/lines from another object
def extra(self,arg):
# read bonds from bond dump file
if type(arg) is types.StringType:
try:
f = open(arg,'r')
item = f.readline()
time = int(f.readline())
item = f.readline()
nbonds = int(f.readline())
item = f.readline()
if not re.search("BONDS",item):
raise StandardError, "could not read bonds from dump file"
words = f.readline().split()
ncol = len(words)
for i in xrange(1,nbonds):
words += f.readline().split()
f.close()
# convert values to int and absolute value since can be negative types
if oldnumeric: bondlist = np.zeros((nbonds,4),np.Int)
else: bondlist = np.zeros((nbonds,4),np.int)
ints = [abs(int(value)) for value in words]
start = 0
stop = 4
for i in xrange(nbonds):
bondlist[i] = ints[start:stop]
start += ncol
stop += ncol
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not read from bond dump file"
# request bonds from data object
elif type(arg) is types.InstanceType and ".data" in str(arg.__class__):
try:
bondlist = []
bondlines = arg.sections["Bonds"]
for line in bondlines:
words = line.split()
bondlist.append([int(words[0]),int(words[1]),
int(words[2]),int(words[3])])
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not extract bonds from data object"
# request tris/lines from cdata object
elif type(arg) is types.InstanceType and ".cdata" in str(arg.__class__):
try:
tmp,tmp,tmp,tmp,tris,lines = arg.viz(0)
if tris:
self.triflag = 1
self.trilist = tris
if lines:
self.lineflag = 1
self.linelist = lines
except:
raise StandardError,"could not extract tris/lines from cdata object"
# request tris from mdump object
elif type(arg) is types.InstanceType and ".mdump" in str(arg.__class__):
try:
self.triflag = 2
self.triobj = arg
except:
raise StandardError,"could not extract tris from mdump object"
else:
raise StandardError,"unrecognized argument to dump.extra()"
# --------------------------------------------------------------------
def compare_atom(self,a,b):
if a[0] < b[0]:
return -1
elif a[0] > b[0]:
return 1
else:
return 0
# --------------------------------------------------------------------
# one snapshot
class Snap:
pass
# --------------------------------------------------------------------
# time selection class
class tselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self):
data = self.data
for snap in data.snaps:
snap.tselect = 1
data.nselect = len(data.snaps)
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def one(self,n):
data = self.data
for snap in data.snaps:
snap.tselect = 0
i = data.findtime(n)
data.snaps[i].tselect = 1
data.nselect = 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def none(self):
data = self.data
for snap in data.snaps:
snap.tselect = 0
data.nselect = 0
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def skip(self,n):
data = self.data
count = n-1
for snap in data.snaps:
if not snap.tselect: continue
count += 1
if count == n:
count = 0
continue
snap.tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def test(self,teststr):
data = self.data
snaps = data.snaps
cmd = "flag = " + teststr.replace("$t","snaps[i].time")
ccmd = compile(cmd,'','single')
for i in xrange(data.nsnaps):
if not snaps[i].tselect: continue
exec ccmd
if not flag:
snaps[i].tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
# atom selection class
class aselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self,*args):
data = self.data
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
# --------------------------------------------------------------------
def test(self,teststr,*args):
data = self.data
# replace all $var with snap.atoms references and compile test string
pattern = "\$\w*"
list = re.findall(pattern,teststr)
for item in list:
name = item[1:]
column = data.names[name]
insert = "snap.atoms[i][%d]" % column
teststr = teststr.replace(item,insert)
cmd = "flag = " + teststr
ccmd = compile(cmd,'','single')
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
for i in xrange(data.nsnaps):
if data.snaps[i].tselect:
print "%d atoms of %d selected in first step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
for i in xrange(data.nsnaps-1,-1,-1):
if data.snaps[i].tselect:
print "%d atoms of %d selected in last step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
| gpl-2.0 |
algorhythms/LeetCode | 652 Find Duplicate Subtrees.py | 1 | 2915 | #!/usr/bin/python3
"""
Given a binary tree, return all duplicate subtrees. For each kind of duplicate
subtrees, you only need to return the root node of any one of them.
Two trees are duplicate if they have the same structure with same node values.
Example 1:
1
/ \
2 3
/ / \
4 2 4
/
4
The following are two duplicate subtrees:
2
/
4
and
4
Therefore, you need to return above trees' root in the form of a list.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import List
from collections import defaultdict
class MerkleHash:
def __init__(self):
self.start_key = 0
self.merkle_hash = defaultdict(self._auto_incr) # subtree -> id
def _auto_incr(self):
self.start_key += 1
return self.start_key
def __call__(self, val):
return self.merkle_hash[val]
class Solution:
def __init__(self):
self.counter = defaultdict(int)
self.merkle_hash = MerkleHash()
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
"""
Merkle hash based on current val, and left substree merkle and right merkle
Assign each subtree a identity/hash
Chain of hash can uniquely identify a subtree
"""
ret = []
self.walk(root, ret)
return ret
def walk(self, cur, ret) -> int:
"""
return merkle hash id
"""
if not cur:
return self.merkle_hash(None)
subtree_value = (cur.val, self.walk(cur.left, ret), self.walk(cur.right, ret))
merkle_hash = self.merkle_hash(subtree_value)
if self.counter[merkle_hash] == 1:
ret.append(cur)
self.counter[merkle_hash] += 1
return merkle_hash
class Solution2:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
"""
Only need to return the root
"""
ret = []
self.walk(root, defaultdict(int), ret)
return ret
def walk(self, cur, counter, ret) -> str:
"""
serialize the subtrees and check existence
Needs to have a unique representation
for the key, cannot but cur.val in the middle as not be able to
differentiate between
0
/
0
0
\
0
because you don't know which one is the root
complexity: O(N) * O(N) (string concatenation),
"""
if not cur:
return "None"
cur_key = ",".join([
self.walk(cur.left, counter, ret),
self.walk(cur.right, counter, ret),
str(cur.val),
])
if counter[cur_key] == 1:
ret.append(cur)
counter[cur_key] += 1
return cur_key
| mit |
Comcast/neutron | neutron/debug/debug_agent.py | 4 | 7959 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import socket
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux.dhcp import DictModel
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NETWORK_PROBE = 'network:probe'
DEVICE_OWNER_COMPUTE_PROBE = 'compute:probe'
class NeutronDebugAgent():
OPTS = [
# Needed for drivers
cfg.BoolOpt('use_namespaces', default=True,
help=_("Use Linux network namespaces")),
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
]
def __init__(self, conf, client, driver):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.client = client
self.driver = driver
def _get_namespace(self, port):
return "qprobe-%s" % port.id
def create_probe(self, network_id, device_owner='network'):
network = self._get_network(network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
port = self._create_port(network, device_owner)
interface_name = self.driver.get_device_name(port)
namespace = None
if self.conf.use_namespaces:
namespace = self._get_namespace(port)
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
bridge=bridge,
namespace=namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace)
return port
def _get_subnet(self, subnet_id):
subnet_dict = self.client.show_subnet(subnet_id)['subnet']
return DictModel(subnet_dict)
def _get_network(self, network_id):
network_dict = self.client.show_network(network_id)['network']
network = DictModel(network_dict)
network.external = network_dict.get('router:external')
obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets]
network.subnets = obj_subnet
return network
def clear_probe(self):
ports = self.client.list_ports(
device_id=socket.gethostname(),
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
self.delete_probe(port['id'])
def delete_probe(self, port_id):
port = DictModel(self.client.show_port(port_id)['port'])
network = self._get_network(port.network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces and ip.netns.exists(namespace):
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge,
namespace=namespace)
try:
ip.netns.delete(namespace)
except Exception:
LOG.warn(_('Failed to delete namespace %s'), namespace)
else:
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge)
self.client.delete_port(port.id)
def list_probes(self):
ports = self.client.list_ports(
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
port['device_name'] = self.driver.get_device_name(DictModel(port))
return info
def exec_command(self, port_id, command=None):
port = DictModel(self.client.show_port(port_id)['port'])
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces:
if not command:
return "sudo ip netns exec %s" % self._get_namespace(port)
namespace = ip.ensure_namespace(namespace)
return namespace.netns.execute(shlex.split(command))
else:
return utils.execute(shlex.split(command))
def ensure_probe(self, network_id):
ports = self.client.list_ports(network_id=network_id,
device_id=socket.gethostname(),
device_owner=DEVICE_OWNER_NETWORK_PROBE)
info = ports.get('ports', [])
if info:
return DictModel(info[0])
else:
return self.create_probe(network_id)
def ping_all(self, network_id=None, timeout=1):
if network_id:
ports = self.client.list_ports(network_id=network_id)['ports']
else:
ports = self.client.list_ports()['ports']
result = ""
for port in ports:
probe = self.ensure_probe(port['network_id'])
if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE:
continue
for fixed_ip in port['fixed_ips']:
address = fixed_ip['ip_address']
subnet = self._get_subnet(fixed_ip['subnet_id'])
if subnet.ip_version == 4:
ping_command = 'ping'
else:
ping_command = 'ping6'
result += self.exec_command(probe.id,
'%s -c 1 -w %s %s' % (ping_command,
timeout,
address))
return result
def _create_port(self, network, device_owner):
host = self.conf.host
body = {'port': {'admin_state_up': True,
'network_id': network.id,
'device_id': '%s' % socket.gethostname(),
'device_owner': '%s:probe' % device_owner,
'tenant_id': network.tenant_id,
'binding:host_id': host,
'fixed_ips': [dict(subnet_id=s.id)
for s in network.subnets]}}
port_dict = self.client.create_port(body)['port']
port = DictModel(port_dict)
port.network = network
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id)
return port
| apache-2.0 |
civisanalytics/civis-python | civis/cli/_cli_commands.py | 1 | 11952 | #!/usr/bin/env python3
"""
Additional commands to add to the CLI beyond the OpenAPI spec.
"""
from __future__ import print_function
import functools
import operator
import os
import sys
import time
import click
import requests
import webbrowser
import civis
from civis.io import file_to_civis, civis_to_file
# From http://patorjk.com/software/taag/#p=display&f=3D%20Diagonal&t=CIVIS
_CIVIS_ASCII_ART = r"""
,----.. ,---, ,---, .--.--.
/ / \ ,`--.' | ,---.,`--.' | / / '.
| : :| : : /__./|| : :| : /`. /
. | ;. /: | ' ,---.; ; |: | '; | |--`
. ; /--` | : |/___/ \ | || : || : ;_
; | ; ' ' ;\ ; \ ' |' ' ; \ \ `.
| : | | | | \ \ \: || | | `----. \
. | '___ ' : ; ; \ ' .' : ; __ \ \ |
' ; : .'|| | ' \ \ '| | ' / /`--' /
' | '/ :' : | \ ` ;' : |'--'. /
| : / ; |.' : \ |; |.' `--'---'
\ \ .' '---' '---" '---'
`---`
"""
_FOLLOW_LOG_NOTE = '''
Outputs job run logs in the format: "datetime message\\n" where
datetime is in ISO8601 format, like "2020-02-14T20:28:18.722Z".
If the job is still running, this command will continue outputting logs
until the run is done and then exit. If the run is already finished, it
will output all the logs from that run and then exit.
NOTE: This command could miss some log entries from a currently-running
job. It does not re-fetch logs that might have been saved out of order, to
preserve the chronological order of the logs and without duplication.
'''
_FOLLOW_POLL_INTERVAL_SEC = 3
@click.command('upload')
@click.argument('path')
@click.option('--name', type=str, default=None,
help="A name for the Civis File (defaults to the base file name")
@click.option('--expires-at', type=str, default=None,
help="The date and time the file will expire "
"(ISO-8601 format, e.g., \"2017-01-15\" or "
"\"2017-01-15T15:25:10Z\"). "
"Set \"never\" for the file to not expire."
"The default is the default in Civis (30 days).")
def files_upload_cmd(path, name, expires_at):
"""Upload a local file to Civis and get back the File ID."""
if name is None:
name = os.path.basename(path)
if expires_at is None:
# Use the default in Civis platform (30 days).
expires_kwarg = {}
elif expires_at.lower() == "never":
expires_kwarg = {"expires_at": None}
else:
expires_kwarg = {"expires_at": expires_at}
with open(path, 'rb') as f:
file_id = file_to_civis(f, name=name, **expires_kwarg)
print(file_id)
@click.command('download')
@click.argument('file_id', type=int)
@click.argument('path')
def files_download_cmd(file_id, path):
"""Download a Civis File to a specified local path."""
with open(path, 'wb') as f:
civis_to_file(file_id, f)
@click.command('sql')
@click.option('--dbname', '-d', type=str, required=True,
help='Execute the query on this Civis Platform database')
@click.option('--command', '-c', type=str, default=None,
help='Execute a single input command string')
@click.option('--filename', '-f', type=click.Path(exists=True),
help='Execute a query read from the given file')
@click.option('--output', '-o', type=click.Path(),
help='Download query results to this file')
@click.option('--quiet', '-q', is_flag=True, help='Suppress screen output')
@click.option('-n', type=int, default=100,
help="Display up to this many rows of the result. Max 100.")
def sql_cmd(dbname, command, filename, output, quiet, n):
"""\b Execute a SQL query in Civis Platform
If neither a command nor an input file is specified, read
the SQL command from stdin.
If writing to an output file, use a Civis SQL script and write the
entire query output to the specified file.
If not writing to an output file, use a Civis Query, and return a
preview of the results, up to a maximum of 100 rows.
"""
if filename:
with open(filename, 'rt') as f:
sql = f.read()
elif not command:
# Read the SQL query from user input. This also allows use of a heredoc
lines = []
while True:
try:
_i = input()
except (KeyboardInterrupt, EOFError):
# The end of a heredoc produces an EOFError.
break
if not _i:
break
else:
lines.append(_i)
sql = '\n'.join(lines)
else:
sql = command
if not sql:
# If the user didn't enter a query, exit.
if not quiet:
print('ERROR: Did not receive a SQL query.', file=sys.stderr)
return
if not quiet:
print('\nExecuting query...', file=sys.stderr)
if output:
fut = civis.io.civis_to_csv(output, sql, database=dbname)
fut.result() # Block for completion and raise exceptions if any
if not quiet:
print("Downloaded the result of the query to %s." % output,
file=sys.stderr)
else:
fut = civis.io.query_civis(sql, database=dbname,
preview_rows=n, polling_interval=3)
cols = fut.result()['result_columns']
rows = fut.result()['result_rows']
if not quiet:
print('...Query complete.\n', file=sys.stderr)
print(_str_table_result(cols, rows))
def _str_table_result(cols, rows):
"""Turn a Civis Query result into a readable table."""
str_rows = [['' if _v is None else _v for _v in row] for row in rows]
# Determine the maximum width of each column.
# First find the width of each element in each row, then find the max
# width in each position.
max_len = functools.reduce(
lambda x, y: [max(z) for z in zip(x, y)],
[[len(_v) for _v in _r] for _r in [cols] + str_rows])
header_str = " | ".join("{0:<{width}}".format(_v, width=_l)
for _l, _v in zip(max_len, cols))
tb_strs = [header_str, len(header_str) * '-']
for row in str_rows:
tb_strs.append(" | ".join("{0:>{width}}".format(_v, width=_l)
for _l, _v in zip(max_len, row)))
return '\n'.join(tb_strs)
@click.command(
'follow-log',
help='Output live log from the most recent job run.' + _FOLLOW_LOG_NOTE)
@click.argument('id', type=int)
def jobs_follow_log(id):
client = civis.APIClient()
runs = client.jobs.list_runs(id, limit=1, order='id', order_dir='desc')
if not runs:
raise click.ClickException('No runs found for that job ID.')
run_id = runs[0].id
print('Run ID: ' + str(run_id))
_jobs_follow_run_log(id, run_id)
@click.command(
'follow-run-log',
help='Output live run log.' + _FOLLOW_LOG_NOTE)
@click.argument('id', type=int)
@click.argument('run_id', type=int)
def jobs_follow_run_log(id, run_id):
_jobs_follow_run_log(id, run_id)
def _jobs_follow_run_log(id, run_id):
client = civis.APIClient(return_type='raw')
local_max_log_id = 0
continue_polling = True
while continue_polling:
# This call gets all available log messages since last_id up to
# the page size, ordered by log ID. We leave it to Platform to decide
# the best page size.
response = client.jobs.list_runs_logs(id, run_id,
last_id=local_max_log_id)
if 'civis-max-id' in response.headers:
remote_max_log_id = int(response.headers['civis-max-id'])
else:
# Platform hasn't seen any logs at all yet
remote_max_log_id = None
logs = response.json()
if logs:
local_max_log_id = max(log['id'] for log in logs)
logs.sort(key=operator.itemgetter('createdAt', 'id'))
for log in logs:
print(' '.join((log['createdAt'], log['message'].rstrip())))
# if output is a pipe, write the buffered output immediately:
sys.stdout.flush()
log_finished = response.headers['civis-cache-control'] != 'no-store'
if remote_max_log_id is None:
remote_has_more_logs_to_get_now = False
elif local_max_log_id == remote_max_log_id:
remote_has_more_logs_to_get_now = False
if log_finished:
continue_polling = False
else:
remote_has_more_logs_to_get_now = True
if continue_polling and not remote_has_more_logs_to_get_now:
time.sleep(_FOLLOW_POLL_INTERVAL_SEC)
@click.command('download')
@click.argument('notebook_id', type=int)
@click.argument('path')
def notebooks_download_cmd(notebook_id, path):
"""Download a notebook to a specified local path."""
client = civis.APIClient()
info = client.notebooks.get(notebook_id)
response = requests.get(info['notebook_url'], stream=True)
response.raise_for_status()
chunk_size = 32 * 1024
chunked = response.iter_content(chunk_size)
with open(path, 'wb') as f:
for lines in chunked:
f.write(lines)
@click.command('new')
@click.argument('language', type=click.Choice(['python3', 'r']),
default='python3')
@click.option('--mem', type=int, default=None,
help='Memory allocated for this notebook in MiB.')
@click.option('--cpu', type=int, default=None,
help='CPU available for this notebook in 1/1000 of a core.')
def notebooks_new_cmd(language='python3', mem=None, cpu=None):
"""Create a new notebook and open it in the browser."""
client = civis.APIClient()
kwargs = {'memory': mem, 'cpu': cpu}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
new_nb = client.notebooks.post(language=language, **kwargs)
print("Created new {language} notebook with ID {id} in Civis Platform"
" (https://platform.civisanalytics.com/#/notebooks/{id})."
.format(language=language, id=new_nb.id))
_notebooks_up(new_nb.id)
_notebooks_open(new_nb.id)
@click.command('up')
@click.argument('notebook_id', type=int)
@click.option('--mem', type=int, default=None,
help='Memory allocated for this notebook in MiB.')
@click.option('--cpu', type=int, default=None,
help='CPU available for this notebook in 1/1000 of a core.')
def notebooks_up(notebook_id, mem=None, cpu=None):
"""Start an existing notebook and open it in the browser."""
client = civis.APIClient()
kwargs = {'memory': mem, 'cpu': cpu}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
client.notebooks.patch(notebook_id, **kwargs)
_notebooks_up(notebook_id)
_notebooks_open(notebook_id)
def _notebooks_up(notebook_id):
client = civis.APIClient()
return client.notebooks.post_deployments(notebook_id)
@click.command('down')
@click.argument('notebook_id', type=int)
def notebooks_down(notebook_id):
"""Shut down a running notebook."""
client = civis.APIClient()
nb = client.notebooks.get(notebook_id)
state = nb['most_recent_deployment']['state']
if state not in ['running', 'pending']:
print('Notebook is in state "{}" and can\'t be stopped.'.format(state))
deployment_id = nb['most_recent_deployment']['deploymentId']
client.notebooks.delete_deployments(notebook_id, deployment_id)
@click.command('open')
@click.argument('notebook_id', type=int)
def notebooks_open(notebook_id):
"""Open an existing notebook in the browser."""
_notebooks_open(notebook_id)
def _notebooks_open(notebook_id):
url = 'https://platform.civisanalytics.com/#/notebooks/{}?fullscreen=true'
url = url.format(notebook_id)
webbrowser.open(url, new=2, autoraise=True)
@click.command('civis', help="Print Civis")
def civis_ascii_art():
print(_CIVIS_ASCII_ART)
| bsd-3-clause |
dhimmel/networkx | networkx/generators/tests/test_line.py | 57 | 2357 | import networkx as nx
from nose.tools import *
import networkx.generators.line as line
def test_node_func():
# graph
G = nx.Graph()
G.add_edge(1,2)
nf = line._node_func(G)
assert_equal(nf(1,2), (1,2))
assert_equal(nf(2,1), (1,2))
# multigraph
G = nx.MultiGraph()
G.add_edge(1,2)
G.add_edge(1,2)
nf = line._node_func(G)
assert_equal(nf(1,2,0), (1,2,0))
assert_equal(nf(2,1,0), (1,2,0))
def test_edge_func():
# graph
G = nx.Graph()
G.add_edge(1,2)
G.add_edge(2,3)
ef = line._edge_func(G)
expected = [(1,2),(2,3)]
result = sorted(ef())
assert_equal(expected, result)
# digraph
G = nx.MultiDiGraph()
G.add_edge(1,2)
G.add_edge(2,3)
G.add_edge(2,3)
ef = line._edge_func(G)
expected = [(1,2,0),(2,3,0),(2,3,1)]
result = sorted(ef())
assert_equal(expected, result)
def test_sorted_edge():
assert_equal( (1,2), line._sorted_edge(1,2) )
assert_equal( (1,2), line._sorted_edge(2,1) )
class TestGeneratorLine():
def test_star(self):
G = nx.star_graph(5)
L = nx.line_graph(G)
assert_true(nx.is_isomorphic(L, nx.complete_graph(5)))
def test_path(self):
G = nx.path_graph(5)
L = nx.line_graph(G)
assert_true(nx.is_isomorphic(L, nx.path_graph(4)))
def test_cycle(self):
G = nx.cycle_graph(5)
L = nx.line_graph(G)
assert_true(nx.is_isomorphic(L, G))
def test_digraph1(self):
G = nx.DiGraph()
G.add_edges_from([(0,1),(0,2),(0,3)])
L = nx.line_graph(G)
# no edge graph, but with nodes
assert_equal(L.adj, {(0,1):{}, (0,2):{}, (0,3):{}})
def test_digraph2(self):
G = nx.DiGraph()
G.add_edges_from([(0,1),(1,2),(2,3)])
L = nx.line_graph(G)
assert_equal(sorted(L.edges()), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
def test_create1(self):
G = nx.DiGraph()
G.add_edges_from([(0,1),(1,2),(2,3)])
L = nx.line_graph(G, create_using=nx.Graph())
assert_equal(sorted(L.edges()), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
def test_create2(self):
G = nx.Graph()
G.add_edges_from([(0,1),(1,2),(2,3)])
L = nx.line_graph(G, create_using=nx.DiGraph())
assert_equal(sorted(L.edges()), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
| bsd-3-clause |
cchurch/ansible | test/units/modules/storage/netapp/test_netapp_e_auditlog.py | 68 | 10758 | # (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AuditLogTests(ModuleTestCase):
REQUIRED_PARAMS = {'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1'}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request'
MAX_RECORDS_MAXIMUM = 50000
MAX_RECORDS_MINIMUM = 100
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
module_args.update(kwargs)
set_module_args(module_args)
def test_max_records_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
for max_records in max_records_set:
initial["max_records"] = max_records
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.max_records == max_records)
def test_max_records_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
for max_records in max_records_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
initial["max_records"] = max_records
self._set_args(**initial)
AuditLog()
def test_threshold_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (60, 75, 90)
for threshold in threshold_set:
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.threshold == threshold)
def test_threshold_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (59, 91)
for threshold in threshold_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
AuditLog()
def test_is_proxy_pass(self):
"""Verify that True is returned when proxy is used to communicate with storage."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"api_url": "https://10.1.1.10/devmgr/v2"}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
self.assertTrue(audit_log.is_proxy())
def test_is_proxy_fail(self):
"""Verify that AnsibleJsonFail exception is thrown when exception occurs."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.is_proxy()
def test_get_configuration_pass(self):
"""Validate get configuration does not throw exception when normal request is returned."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
expected = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
body = audit_log.get_configuration()
self.assertTrue(body == expected)
def test_get_configuration_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.get_configuration()
def test_build_configuration_pass(self):
"""Validate configuration changes will force an update."""
response = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
changes = [{"max_records": 50000},
{"log_level": "all"},
{"full_policy": "preventSystemAccess"},
{"threshold": 75}]
for change in changes:
initial_with_changes = initial.copy()
initial_with_changes.update(change)
self._set_args(**initial_with_changes)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, response)):
update = audit_log.build_configuration()
self.assertTrue(update)
def test_delete_log_messages_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.delete_log_messages()
def test_update_configuration_delete_pass(self):
"""Verify 422 and force successfully returns True."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": True}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, side_effect=[(200, body),
(422, {u"invalidFieldsIfKnown": None,
u"errorMessage": u"Configuration change...",
u"localizedMessage": u"Configuration change...",
u"retcode": u"auditLogImmediateFullCondition",
u"codeType": u"devicemgrerror"}),
(200, None),
(200, None)]):
self.assertTrue(audit_log.update_configuration())
def test_update_configuration_delete_skip_fail(self):
"""Verify 422 and no force results in AnsibleJsonFail exception."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": False}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}),
(200, None), (200, None)]):
audit_log.update_configuration()
| gpl-3.0 |
erjohnso/ansible | lib/ansible/modules/system/aix_inittab.py | 26 | 7531 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Joris Weijters <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: "Joris Weijters (@molekuul)"
module: aix_inittab
short_description: Manages the inittab on AIX.
description:
- Manages the inittab on AIX.
version_added: "2.3"
options:
name:
description:
- Name of the inittab entry.
required: True
aliases: ['service']
runlevel:
description:
- Runlevel of the entry.
required: True
action:
description:
- Action what the init has to do with this entry.
required: True
choices: [
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
]
command:
description:
- What command has to run.
required: True
insertafter:
description:
- After which inittabline should the new entry inserted.
state:
description:
- Whether the entry should be present or absent in the inittab file
choices: [ "present", "absent" ]
default: present
notes:
- The changes are persistent across reboots, you need root rights to read or adjust the inittab with the lsitab, chitab,
mkitab or rmitab commands.
- tested on AIX 7.1.
requirements: [ 'itertools']
'''
EXAMPLES = '''
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 4
action: once
command: "echo hello"
insertafter: existingservice
state: present
become: yes
# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
- name: Change startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: present
become: yes
# Remove inittab entry startmyservice.
- name: remove startmyservice from inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: absent
become: yes
'''
RETURN = '''
name:
description: name of the adjusted inittab entry
returned: always
type: string
sample: startmyservice
msg:
description: action done with the inittab entry
returned: changed
type: string
sample: changed inittab entry startmyservice
changed:
description: whether the inittab changed or not
returned: always
type: boolean
sample: true
'''
# Import necessary libraries
import itertools
from ansible.module_utils.basic import AnsibleModule
# end import modules
# start defining the functions
def check_current_entry(module):
# Check if entry exists, if not return False in exists in return dict,
# if true return True and the entry in return dict
existsdict = {'exist': False}
lsitab = module.get_bin_path('lsitab')
(rc, out, err) = module.run_command([lsitab, module.params['name']])
if rc == 0:
keys = ('name', 'runlevel', 'action', 'command')
values = out.split(":")
# strip non readable characters as \n
values = map(lambda s: s.strip(), values)
existsdict = dict(itertools.izip(keys, values))
existsdict.update({'exist': True})
return existsdict
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['service']),
runlevel=dict(required=True, type='str'),
action=dict(choices=[
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
], type='str'),
command=dict(required=True, type='str'),
insertafter=dict(type='str'),
state=dict(choices=[
'present',
'absent',
], required=True, type='str'),
),
supports_check_mode=True,
)
result = {
'name': module.params['name'],
'changed': False,
'msg': ""
}
# Find commandline strings
mkitab = module.get_bin_path('mkitab')
rmitab = module.get_bin_path('rmitab')
chitab = module.get_bin_path('chitab')
rc = 0
# check if the new entry exists
current_entry = check_current_entry(module)
# if action is install or change,
if module.params['state'] == 'present':
# create new entry string
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
# exists, then the entry wil be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
module.params['command'] != current_entry['command']):
# If the entry does exist then change the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command([chitab, new_entry])
if rc != 0:
module.fail_json(
msg="could not change inittab", rc=rc, err=err)
result['msg'] = "changed inittab entry" + " " + current_entry['name']
result['changed'] = True
# If the entry does not exist create the entry
elif not current_entry['exist']:
if module.params['insertafter']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, '-i', module.params['insertafter'], new_entry])
else:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, new_entry])
if rc != 0:
module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
result['msg'] = "add inittab entry" + " " + module.params['name']
result['changed'] = True
elif module.params['state'] == 'absent':
# If the action is remove and the entry exists then remove the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[rmitab, module.params['name']])
if rc != 0:
module.fail_json(
msg="could not remove entry grom inittab)", rc=rc, err=err)
result['msg'] = "removed inittab entry" + " " + current_entry['name']
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
catapult-project/catapult | telemetry/telemetry/internal/backends/chrome/android_browser_finder.py | 3 | 20134 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds android browsers that can be started and controlled by telemetry."""
from __future__ import absolute_import
import contextlib
import logging
import os
import platform
import posixpath
import shutil
import subprocess
from devil import base_error
from devil.android import apk_helper
from devil.android import flag_changer
from devil.android.sdk import version_codes
from py_utils import dependency_util
from py_utils import file_util
from py_utils import tempfile_ext
from telemetry import compat_mode_options
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import platform as telemetry_platform
from telemetry.core import util
from telemetry.internal.backends import android_browser_backend_settings
from telemetry.internal.backends.chrome import android_browser_backend
from telemetry.internal.backends.chrome import chrome_startup_args
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import android_device
from telemetry.internal.util import binary_manager
from telemetry.internal.util import format_for_logging
from telemetry.internal.util import local_first_binary_manager
ANDROID_BACKEND_SETTINGS = (
android_browser_backend_settings.ANDROID_BACKEND_SETTINGS)
@contextlib.contextmanager
def _ProfileWithExtraFiles(profile_dir, profile_files_to_copy):
"""Yields a temporary directory populated with input files.
Args:
profile_dir: A directory whose contents will be copied to the output
directory.
profile_files_to_copy: A list of (source, dest) tuples to be copied to
the output directory.
Yields: A path to a temporary directory, named "_default_profile". This
directory will be cleaned up when this context exits.
"""
with tempfile_ext.NamedTemporaryDirectory() as tempdir:
# TODO(csharrison): "_default_profile" was chosen because this directory
# will be pushed to the device's sdcard. We don't want to choose a
# random name due to the extra failure mode of filling up the sdcard
# in the case of unclean test teardown. We should consider changing
# PushProfile to avoid writing to this intermediate location.
host_profile = os.path.join(tempdir, '_default_profile')
if profile_dir:
shutil.copytree(profile_dir, host_profile)
else:
os.mkdir(host_profile)
# Add files from |profile_files_to_copy| into the host profile
# directory. Don't copy files if they already exist.
for source, dest in profile_files_to_copy:
host_path = os.path.join(host_profile, dest)
if not os.path.exists(host_path):
file_util.CopyFileWithIntermediateDirectories(source, host_path)
yield host_profile
class PossibleAndroidBrowser(possible_browser.PossibleBrowser):
"""A launchable android browser instance."""
def __init__(self, browser_type, finder_options, android_platform,
backend_settings, local_apk=None, target_os='android'):
super(PossibleAndroidBrowser, self).__init__(
browser_type, target_os, backend_settings.supports_tab_control)
assert browser_type in FindAllBrowserTypes(), (
'Please add %s to android_browser_finder.FindAllBrowserTypes' %
browser_type)
self._platform = android_platform
self._platform_backend = (
android_platform._platform_backend) # pylint: disable=protected-access
self._backend_settings = backend_settings
self._local_apk = local_apk
self._flag_changer = None
self._modules_to_install = None
self._compile_apk = finder_options.compile_apk
if self._local_apk is None and finder_options.chrome_root is not None:
self._local_apk = self._backend_settings.FindLocalApk(
self._platform_backend.device, finder_options.chrome_root)
# At this point the local_apk, if any, must exist.
assert self._local_apk is None or os.path.exists(self._local_apk)
self._build_dir = util.GetBuildDirFromHostApkPath(self._local_apk)
if finder_options.modules_to_install:
self._modules_to_install = set(['base'] +
finder_options.modules_to_install)
self._support_apk_list = []
if (self._backend_settings.requires_embedder or
self._backend_settings.has_additional_apk):
if finder_options.webview_embedder_apk:
self._support_apk_list = finder_options.webview_embedder_apk
else:
self._support_apk_list = self._backend_settings.FindSupportApks(
self._local_apk, finder_options.chrome_root)
elif finder_options.webview_embedder_apk:
logging.warning(
'No embedder needed for %s, ignoring --webview-embedder-apk option',
self._backend_settings.browser_type)
# At this point the apks in _support_apk_list, if any, must exist.
for apk in self._support_apk_list:
assert os.path.exists(apk)
def __repr__(self):
return 'PossibleAndroidBrowser(browser_type=%s)' % self.browser_type
@property
def settings(self):
"""Get the backend_settings for this possible browser."""
return self._backend_settings
@property
def browser_directory(self):
# On Android L+ the directory where base APK resides is also used for
# keeping extracted native libraries and .odex. Here is an example layout:
# /data/app/$package.apps.chrome-1/
# base.apk
# lib/arm/libchrome.so
# oat/arm/base.odex
# Declaring this toplevel directory as 'browser_directory' allows the cold
# startup benchmarks to flush OS pagecache for the native library, .odex and
# the APK.
apks = self._platform_backend.device.GetApplicationPaths(
self._backend_settings.package)
# A package can map to multiple APKs if the package overrides the app on
# the system image. Such overrides should not happen on perf bots. The
# package can also map to multiple apks if splits are used. In all cases, we
# want the directory that contains base.apk.
for apk in apks:
if apk.endswith('/base.apk'):
return apk[:-9]
return None
@property
def profile_directory(self):
return self._platform_backend.GetProfileDir(self._backend_settings.package)
@property
def last_modification_time(self):
if self._local_apk:
return os.path.getmtime(self._local_apk)
return -1
def _GetPathsForOsPageCacheFlushing(self):
return [self.profile_directory, self.browser_directory]
def _InitPlatformIfNeeded(self):
pass
def _SetupProfile(self):
if self._browser_options.dont_override_profile:
return
# Just remove the existing profile if we don't have any files to copy over.
# This is because PushProfile does not support pushing completely empty
# directories.
profile_files_to_copy = self._browser_options.profile_files_to_copy
if not self._browser_options.profile_dir and not profile_files_to_copy:
self._platform_backend.RemoveProfile(
self._backend_settings.package,
self._backend_settings.profile_ignore_list)
return
with _ProfileWithExtraFiles(self._browser_options.profile_dir,
profile_files_to_copy) as profile_dir:
self._platform_backend.PushProfile(self._backend_settings.package,
profile_dir)
def SetUpEnvironment(self, browser_options):
super(PossibleAndroidBrowser, self).SetUpEnvironment(browser_options)
self._platform_backend.DismissCrashDialogIfNeeded()
device = self._platform_backend.device
startup_args = self.GetBrowserStartupArgs(self._browser_options)
device.adb.Logcat(clear=True)
# use legacy commandline path if in compatibility mode
self._flag_changer = flag_changer.FlagChanger(
device, self._backend_settings.command_line_name, use_legacy_path=
compat_mode_options.LEGACY_COMMAND_LINE_PATH in
browser_options.compatibility_mode)
self._flag_changer.ReplaceFlags(startup_args, log_flags=False)
formatted_args = format_for_logging.ShellFormat(
startup_args, trim=browser_options.trim_logs)
logging.info('Flags set on device were %s', formatted_args)
# Stop any existing browser found already running on the device. This is
# done *after* setting the command line flags, in case some other Android
# process manages to trigger Chrome's startup before we do.
self._platform_backend.StopApplication(self._backend_settings.package)
self._SetupProfile()
# Remove any old crash dumps
self._platform_backend.device.RemovePath(
self._platform_backend.GetDumpLocation(self._backend_settings.package),
recursive=True, force=True)
def _TearDownEnvironment(self):
self._RestoreCommandLineFlags()
def _RestoreCommandLineFlags(self):
if self._flag_changer is not None:
try:
self._flag_changer.Restore()
finally:
self._flag_changer = None
def Create(self):
"""Launch the browser on the device and return a Browser object."""
return self._GetBrowserInstance(existing=False)
def FindExistingBrowser(self):
"""Find a browser running on the device and bind a Browser object to it.
The returned Browser object will only be bound to a running browser
instance whose package name matches the one specified by the backend
settings of this possible browser.
A BrowserGoneException is raised if the browser cannot be found.
"""
return self._GetBrowserInstance(existing=True)
def _GetBrowserInstance(self, existing):
# Init the LocalFirstBinaryManager if this is the first time we're creating
# a browser. Note that we use the host's OS and architecture since the
# retrieved dependencies are used on the host, not the device.
if local_first_binary_manager.LocalFirstBinaryManager.NeedsInit():
local_first_binary_manager.LocalFirstBinaryManager.Init(
self._build_dir, self._local_apk, platform.system().lower(),
platform.machine())
browser_backend = android_browser_backend.AndroidBrowserBackend(
self._platform_backend, self._browser_options,
self.browser_directory, self.profile_directory,
self._backend_settings,
build_dir=self._build_dir)
try:
return browser.Browser(
browser_backend, self._platform_backend, startup_args=(),
find_existing=existing)
except Exception:
browser_backend.Close()
raise
def GetBrowserStartupArgs(self, browser_options):
startup_args = chrome_startup_args.GetFromBrowserOptions(browser_options)
# use the flag `--ignore-certificate-errors` if in compatibility mode
supports_spki_list = (
self._backend_settings.supports_spki_list and
compat_mode_options.IGNORE_CERTIFICATE_ERROR
not in browser_options.compatibility_mode)
startup_args.extend(chrome_startup_args.GetReplayArgs(
self._platform_backend.network_controller_backend,
supports_spki_list=supports_spki_list))
startup_args.append('--enable-remote-debugging')
startup_args.append('--disable-fre')
startup_args.append('--disable-external-intent-requests')
# Need to specify the user profile directory for
# --ignore-certificate-errors-spki-list to work.
startup_args.append('--user-data-dir=' + self.profile_directory)
# Needed so that non-browser-process crashes avoid automatic dump upload
# and subsequent deletion. The extra "Crashpad" is necessary because
# crashpad_stackwalker.py is hard-coded to look for a "Crashpad" directory
# in the dump directory that it is provided.
startup_args.append('--breakpad-dump-location=' + posixpath.join(
self._platform_backend.GetDumpLocation(self._backend_settings.package),
'Crashpad'))
return startup_args
def SupportsOptions(self, browser_options):
if len(browser_options.extensions_to_load) != 0:
return False
return True
def IsAvailable(self):
"""Returns True if the browser is or can be installed on the platform."""
has_local_apks = self._local_apk and (
not self._backend_settings.requires_embedder or self._support_apk_list)
return has_local_apks or self.platform.CanLaunchApplication(
self.settings.package)
@decorators.Cache
def UpdateExecutableIfNeeded(self):
# TODO(crbug.com/815133): This logic should belong to backend_settings.
for apk in self._support_apk_list:
logging.warn('Installing %s on device if needed.', apk)
self.platform.InstallApplication(apk)
apk_name = self._backend_settings.GetApkName(
self._platform_backend.device)
is_webview_apk = apk_name is not None and ('SystemWebView' in apk_name or
'system_webview' in apk_name or
'TrichromeWebView' in apk_name or
'trichrome_webview' in apk_name)
# The WebView fallback logic prevents sideloaded WebView APKs from being
# installed and set as the WebView implementation correctly. Disable the
# fallback logic before installing the WebView APK to make sure the fallback
# logic doesn't interfere.
if is_webview_apk:
self._platform_backend.device.SetWebViewFallbackLogic(False)
if self._local_apk:
logging.warn('Installing %s on device if needed.', self._local_apk)
self.platform.InstallApplication(
self._local_apk, modules=self._modules_to_install)
if self._compile_apk:
package_name = apk_helper.GetPackageName(self._local_apk)
logging.warn('Compiling %s.', package_name)
self._platform_backend.device.RunShellCommand(
['cmd', 'package', 'compile', '-m', self._compile_apk, '-f',
package_name],
check_return=True)
sdk_version = self._platform_backend.device.build_version_sdk
# Bundles are in the ../bin directory, so it's safer to just check the
# correct name is part of the path.
is_monochrome = apk_name is not None and (apk_name == 'Monochrome.apk' or
'monochrome_bundle' in apk_name)
if ((is_webview_apk or
(is_monochrome and sdk_version < version_codes.Q)) and
sdk_version >= version_codes.NOUGAT):
package_name = apk_helper.GetPackageName(self._local_apk)
logging.warn('Setting %s as WebView implementation.', package_name)
self._platform_backend.device.SetWebViewImplementation(package_name)
def GetTypExpectationsTags(self):
tags = super(PossibleAndroidBrowser, self).GetTypExpectationsTags()
if 'webview' in self.browser_type:
tags.append('android-webview')
else:
tags.append('android-not-webview')
if 'weblayer' in self.browser_type:
tags.append('android-weblayer')
return tags
def SelectDefaultBrowser(possible_browsers):
"""Return the newest possible browser."""
if not possible_browsers:
return None
return max(possible_browsers, key=lambda b: b.last_modification_time)
def CanFindAvailableBrowsers():
return android_device.CanDiscoverDevices()
def _CanPossiblyHandlePath(apk_path):
if not apk_path:
return False
try:
apk_helper.ToHelper(apk_path)
return True
except apk_helper.ApkHelperError:
return False
def FindAllBrowserTypes():
browser_types = [b.browser_type for b in ANDROID_BACKEND_SETTINGS]
return browser_types + ['exact', 'reference']
def _FetchReferenceApk(android_platform, is_bundle=False):
"""Fetch the apk for reference browser type from gcloud.
Local path to the apk will be returned upon success.
Otherwise, None will be returned.
"""
os_version = dependency_util.GetChromeApkOsVersion(
android_platform.GetOSVersionName())
if is_bundle:
os_version += '_bundle'
arch = android_platform.GetArchName()
try:
reference_build = binary_manager.FetchPath(
'chrome_stable', 'android', arch, os_version)
if reference_build and os.path.exists(reference_build):
return reference_build
except binary_manager.NoPathFoundError:
logging.warning('Cannot find path for reference apk for device %s',
android_platform.GetDeviceId())
except binary_manager.CloudStorageError:
logging.warning('Failed to download reference apk for device %s',
android_platform.GetDeviceId())
return None
def _GetReferenceAndroidBrowser(android_platform, finder_options):
reference_build = _FetchReferenceApk(android_platform)
if reference_build:
return PossibleAndroidBrowser(
'reference',
finder_options,
android_platform,
android_browser_backend_settings.ANDROID_CHROME,
reference_build)
def _FindAllPossibleBrowsers(finder_options, android_platform):
"""Testable version of FindAllAvailableBrowsers."""
if not android_platform:
return []
possible_browsers = []
for apk in finder_options.webview_embedder_apk:
if not os.path.exists(apk):
raise exceptions.PathMissingError(
'Unable to find apk specified by --webview-embedder-apk=%s' % apk)
# Add the exact APK if given.
if _CanPossiblyHandlePath(finder_options.browser_executable):
if not os.path.exists(finder_options.browser_executable):
raise exceptions.PathMissingError(
'Unable to find exact apk specified by --browser-executable=%s' %
finder_options.browser_executable)
package_name = apk_helper.GetPackageName(finder_options.browser_executable)
try:
backend_settings = next(
b for b in ANDROID_BACKEND_SETTINGS if b.package == package_name)
except StopIteration:
raise exceptions.UnknownPackageError(
'%s specified by --browser-executable has an unknown package: %s' %
(finder_options.browser_executable, package_name))
possible_browsers.append(PossibleAndroidBrowser(
'exact',
finder_options,
android_platform,
backend_settings,
finder_options.browser_executable))
if finder_options.IsBrowserTypeRelevant('reference'):
reference_browser = _GetReferenceAndroidBrowser(
android_platform, finder_options)
if reference_browser:
possible_browsers.append(reference_browser)
# Add any other known available browsers.
for settings in ANDROID_BACKEND_SETTINGS:
if finder_options.IsBrowserTypeRelevant(settings.browser_type):
local_apk = None
if finder_options.IsBrowserTypeReference():
local_apk = _FetchReferenceApk(
android_platform, finder_options.IsBrowserTypeBundle())
if settings.IsWebView():
p_browser = PossibleAndroidBrowser(
settings.browser_type, finder_options, android_platform, settings,
local_apk=local_apk, target_os='android_webview')
else:
p_browser = PossibleAndroidBrowser(
settings.browser_type, finder_options, android_platform, settings,
local_apk=local_apk)
if p_browser.IsAvailable():
possible_browsers.append(p_browser)
return possible_browsers
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all the possible browsers on one device.
The device is either the only device on the host platform,
or |finder_options| specifies a particular device.
"""
if not isinstance(device, android_device.AndroidDevice):
return []
try:
android_platform = telemetry_platform.GetPlatformForDevice(
device, finder_options)
return _FindAllPossibleBrowsers(finder_options, android_platform)
except base_error.BaseError as e:
logging.error('Unable to find browsers on %s: %s', device.device_id, str(e))
ps_output = subprocess.check_output(['ps', '-ef'])
logging.error('Ongoing processes:\n%s', ps_output)
return []
| bsd-3-clause |
koushikcgit/xen-api | scripts/import-boxgrinder.py | 25 | 9294 | #!/usr/bin/env python
#
# Copyright (C) Citrix Inc
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Import appliances generated by boxgrinder into XenServer/XCP
import os, sys, time, socket, traceback, syslog
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
pid = None
use_syslog = False
def reopenlog(log_file):
global log_f
if log_f:
log_f.close()
if log_file and log_file <> "stdout:":
log_f = open(log_file, "aw")
elif log_file and log_file == "stdout:":
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
def log(txt):
global log_f, pid, use_syslog
if use_syslog:
syslog.syslog(txt)
return
if not pid:
pid = os.getpid()
t = time.strftime("%Y%m%dT%H:%M:%SZ", time.gmtime())
print >>log_f, "%s [%d] %s" % (t, pid, txt)
log_f.flush()
# For reference, here's what the boxgrinder default output XML looks like
# Is there a definition somewhere?
example = """

"""
import xmlrpclib
class XCPError(Exception):
def __init__(self, result):
self.result = result
def __str__(self):
# {'Status': 'Failure', 'ErrorDescription': ['SESSION_AUTHENTICATION_FAILED', 'a', 'Authentication failure']}
return " ".join(self.result["ErrorDescription"])
class Failure(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
def value(x):
if "Value" in x:
return x["Value"]
else:
raise XCPError(x)
# We base our VMs off this generic HVM template
base_template = "Other install media"
import xml.dom.minidom
import sys
# Creates the VM, VBDs and VDIs
def import_metadata(server, session, filename):
doc = xml.dom.minidom.parse(filename)
def getSingleElement(doc, name):
elements = doc.getElementsByTagName(name)
if len(elements) <> 1:
raise Failure("Expecting exactly one <%s> element" % name)
return elements[0]
image = getSingleElement(doc, "image")
domain = getSingleElement(image, "domain")
boot = getSingleElement(domain, "boot")
devices = getSingleElement(domain, "devices")
storage = getSingleElement(image, "storage")
def getText(doc, name):
nodes = doc.getElementsByTagName(name)
if len(nodes) <> 1:
print >>sys.stderr, "Expecting exactly one %s tag" % name
sys.exit(1)
result = ""
for child in nodes[0].childNodes:
if child.nodeType == child.TEXT_NODE:
result = result + child.data
return result
def getAttr(doc, name):
for (n, value) in doc.attributes.items():
if name == n:
return value
return ""
# Clone the "Other install media" template and inherit basic
# properties from it.
templates = value(server.VM.get_by_name_label(session, base_template))
if len(templates) <> 1:
raise Failure("Expecting exactly one \"%s\" template" % base_template)
template = templates[0]
name = getText(image, "name")
log("Cloning template %s into %s" % (base_template, name))
vm = value(server.VM.clone(session, template, name))
value(server.VM.set_is_a_template(session, vm, False))
vcpu = getText(devices, "vcpu")
if vcpu <> "":
log("Setting number of vCPUs to: %s" % vcpu)
value(server.VM.set_VCPUs_max(session, vm, vcpu))
value(server.VM.set_VCPUs_at_startup(session, vm, vcpu))
memory = getText(devices, "memory") # KiB
if memory <> "":
log("Setting memory to %s KiB" % memory)
bytes = str(long(memory) * 1024L)
value(server.VM.set_memory_limits(session, vm, "0", bytes, bytes, bytes))
boot_type = getAttr(boot, "type")
if boot_type == "hvm":
log("VM is set to HVM boot by default")
else:
log("Ignoring unknown boot type: %s" % boot_type)
# Disks
disks = storage.getElementsByTagName("disk")
drives = boot.getElementsByTagName("drive")
pool = value(server.pool.get_all(session))[0]
sr = value(server.pool.get_default_SR(session, pool))
try:
log("Will create disks in the default SR: %s" % (value(server.SR.get_name_label(session, sr))))
except Exception, e:
log("Caught %s" % str(e))
raise Failure("Default SR is not set on the pool (%s)" % sr)
vdis = {}
for disk in disks:
ty = getAttr(disk, "format")
if ty <> "raw":
raise Failure("Expected all disks to have format = raw")
filename = getAttr(disk, "file")
size = os.path.getsize(filename)
_type = "user"
if getAttr(disk, "use") == "system":
_type = "system"
vdi_info = {
"name_label": filename,
"name_description": "",
"SR": sr,
"virtual_size": str(size),
"type": _type,
"sharable": False,
"read_only": False,
"other_config": {},
}
vdi = value(server.VDI.create(session, vdi_info))
log("Created VDI %s for %s" % (vdi, filename))
vdis[filename] = vdi
for drive in drives:
disk = getAttr(drive, "disk")
target = getAttr(drive, "target")
vdi = vdis[disk]
bootable = drive == drives[0]
vbd_info = {
"VM": vm,
"VDI": vdi,
"userdevice": target,
"bootable": bootable,
"mode": "RW",
"type": "Disk",
"empty": False,
"other_config": { "owner": "true" },
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
vbd = value(server.VBD.create(session, vbd_info))
log("Created VBD %s for %s" % (vbd, disk))
return (vm, vdis)
CURL = "/usr/bin/curl"
if not(os.path.exists(CURL)):
raise Failure("%s doesn't exist" % CURL)
import commands
def import_vdi(url, session, vdi, filename):
cmd = "%s -T%s %s/import_raw_vdi?session_id=%s\&vdi=%s" % (CURL, filename, url, session, vdi)
log("%s" % cmd)
(code, output) = commands.getstatusoutput(cmd)
if code <> 0:
log("Disk upload failed: %s" % output)
raise Failure("disk upload failed")
if __name__ == "__main__":
from optparse import OptionParser
settings = {
"log": "stdout:",
"server": "http://127.0.0.1",
"username": "root",
"password": "",
}
log("settings = %s" % repr(settings))
parser = OptionParser(usage="usage: %prog [options] filename.xml")
parser.add_option("-l", "--log", dest="logfile", help="log to LOG", metavar="LOG")
parser.add_option("-s", "--server", dest="server", help="connect to SERVER", metavar="SERVER")
parser.add_option("-u", "--username", dest="username", help="login as USERNAME", metavar="USERNAME")
parser.add_option("-p", "--password", dest="password", help="use password PASSWORD", metavar="PASSWORD")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("wrong number of arguments")
options = options.__dict__
for setting in settings:
if setting in options and options[setting]:
settings[setting] = options[setting]
s = repr(settings[setting])
if setting == "password":
s = "*REDACTED*"
log("option settings[%s] <- %s" % (setting, s))
if settings["log"] == "syslog:":
use_syslog = True
reopenlog(None)
elif settings["log"] == "stdout:":
use_syslog = False
reopenlog("stdout:")
else:
use_syslog = False
reopenlog(settings["log"])
server = xmlrpclib.Server(settings["server"])
session = value(server.session.login_with_password(settings["username"], settings["password"], "1.0", "xen-api-scripts-import-boxgrinder"))
try:
(vm, vdis) = import_metadata(server, session, args[0])
for filename in vdis.keys():
import_vdi(settings["server"], session, vdis[filename], filename)
log("VM import complete")
log("%s" % vm)
finally:
value(server.session.logout(session))
| lgpl-2.1 |