commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
1d67f755ea0f638c3cabef9e9359665d5b50ff86
Clean up BeamConstellation
CactusDev/CactusBot
cactusbot/services/beam/constellation.py
cactusbot/services/beam/constellation.py
"""Interact with Beam Constellation.""" import re import json from .. import WebSocket class BeamConstellation(WebSocket): """Interact with Beam Constellation.""" URL = "wss://constellation.beam.pro" RESPONSE_EXPR = re.compile(r'^(\d+)(.+)?$') INTERFACE_EXPR = re.compile(r'^([a-z]+):\d+:([a-z]+)') def __init__(self, channel, user): super().__init__(self.URL) assert isinstance(channel, int), "Channel ID must be an integer." self.channel = channel assert isinstance(user, int), "User ID must be an integer." self.user = user async def initialize(self, *interfaces): """Subscribe to Constellation interfaces.""" if not interfaces: interfaces = ( "channel:{channel}:update", "channel:{channel}:status", "channel:{channel}:followed", "channel:{channel}:subscribed", "channel:{channel}:hosted", "user:{user}:followed", "user:{user}:subscribed", "user:{user}:achievement" ) interfaces = [ interface.format(channel=self.channel, user=self.user) for interface in interfaces ] packet = { "type": "method", "method": "livesubscribe", "params": { "events": interfaces }, "id": 1 } self.websocket.send_str(json.dumps(packet)) await self.receive() self.logger.info( "Successfully subscribed to Constellation interfaces.") async def parse(self, packet): """Parse a chat packet.""" try: packet = json.loads(packet) except (TypeError, ValueError): self.logger.exception("Invalid JSON: %s.", packet) return None else: if packet.get("error") is not None: self.logger.error(packet) else: self.logger.debug(packet) return packet
"""Interact with Beam Constellation.""" from logging import getLogger import re import json import asyncio from .. import WebSocket class BeamConstellation(WebSocket): """Interact with Beam Constellation.""" URL = "wss://constellation.beam.pro" RESPONSE_EXPR = re.compile(r'^(\d+)(.+)?$') INTERFACE_EXPR = re.compile(r'^([a-z]+):\d+:([a-z]+)') def __init__(self, channel, user): super().__init__(self.URL) self.logger = getLogger(__name__) assert isinstance(channel, int), "Channel ID must be an integer." self.channel = channel assert isinstance(user, int), "User ID must be an integer." self.user = user async def read(self, handle): """Read packets from the Constellation WebSocket.""" packet = await self.parse(await self.receive()) await super().read(handle) async def initialize(self, *interfaces): """Subscribe to Constellation interfaces.""" if not interfaces: interfaces = [ "channel:{channel}:update", "channel:{channel}:status", "channel:{channel}:followed", "channel:{channel}:subscribed", "channel:{channel}:hosted", "user:{user}:followed", "user:{user}:subscribed", "user:{user}:achievement" ] interfaces = list( interface.format(channel=self.channel, user=self.user) for interface in interfaces ) packet = { "type": "method", "method": "livesubscribe", "params": { "events": interfaces }, "id": 1 } self.websocket.send_str(json.dumps(packet)) self.logger.info( "Successfully subscribed to Constellation interfaces.")
mit
Python
948dadbd4aa262c86e561c56e7cd7748cdefa18b
Extend teacher column for institute courses
sonicyang/NCKU_Course,sonicyang/NCKU_Course,sonicyang/NCKU_Course,sonicyang/NCKU_Course
data_center/models.py
data_center/models.py
# -*- coding: utf-8 -*- from datetime import datetime from django.db import models class Course(models.Model): """Course database schema""" no = models.CharField(max_length=20, blank=True) code = models.CharField(max_length=20, blank=True) eng_title = models.CharField(max_length=200, blank=True) chi_title = models.CharField(max_length=200, blank=True) note = models.TextField(blank=True) objective = models.CharField(max_length=80, blank=True) time = models.CharField(max_length=80, blank=True) time_token = models.CharField(max_length=80, blank=True) teacher = models.CharField(max_length=120, blank=True) # Only save Chinese room = models.CharField(max_length=80, blank=True) credit = models.IntegerField(blank=True, null=True) limit = models.IntegerField(blank=True, null=True) prerequisite = models.BooleanField(default=False, blank=True) clas = models.CharField(max_length=10, blank=True) dept = models.CharField(max_length=10, blank=True) serial = models.CharField(max_length=20, blank=True) ge = models.CharField(max_length=80, blank=True) hit = models.IntegerField(default=0) syllabus = models.TextField(blank=True) # A html div def __str__(self): return self.no class Department(models.Model): dept_name = models.CharField(max_length=20, blank=True) required_course = models.ManyToManyField(Course, blank=True) def __unicode__(self): return self.dept_name class Announcement(models.Model): TAG_CHOICE = ( ('Info', '公告'), ('Bug', '已知問題'), ('Fix', '問題修復'), ) content = models.TextField(blank=True) time = models.DateTimeField(default=datetime.now) tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info') def __unicode__(self): return '%s|%s' % (self.time, self.tag)
# -*- coding: utf-8 -*- from datetime import datetime from django.db import models class Course(models.Model): """Course database schema""" no = models.CharField(max_length=20, blank=True) code = models.CharField(max_length=20, blank=True) eng_title = models.CharField(max_length=200, blank=True) chi_title = models.CharField(max_length=200, blank=True) note = models.TextField(blank=True) objective = models.CharField(max_length=80, blank=True) time = models.CharField(max_length=80, blank=True) time_token = models.CharField(max_length=80, blank=True) teacher = models.CharField(max_length=80, blank=True) # Only save Chinese room = models.CharField(max_length=80, blank=True) credit = models.IntegerField(blank=True, null=True) limit = models.IntegerField(blank=True, null=True) prerequisite = models.BooleanField(default=False, blank=True) clas = models.CharField(max_length=10, blank=True) dept = models.CharField(max_length=10, blank=True) serial = models.CharField(max_length=20, blank=True) ge = models.CharField(max_length=80, blank=True) hit = models.IntegerField(default=0) syllabus = models.TextField(blank=True) # A html div def __str__(self): return self.no class Department(models.Model): dept_name = models.CharField(max_length=20, blank=True) required_course = models.ManyToManyField(Course, blank=True) def __unicode__(self): return self.dept_name class Announcement(models.Model): TAG_CHOICE = ( ('Info', '公告'), ('Bug', '已知問題'), ('Fix', '問題修復'), ) content = models.TextField(blank=True) time = models.DateTimeField(default=datetime.now) tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info') def __unicode__(self): return '%s|%s' % (self.time, self.tag)
mit
Python
f0243e8ab8897d218bcf45af91a7cd03a3f83c5e
Add section comments.
Baza207/CloudKitPy
cloudkitpy/container.py
cloudkitpy/container.py
# # container.py # CloudKitPy # # Created by James Barrow on 28/04/2016. # Copyright (c) 2013-2016 Pig on a Hill Productions. All rights reserved. # # !/usr/bin/env python class Container: # Getting the Public and Private Databases public_cloud_database = None private_cloud_database = None # Getting the Identifier and Environment container_identifier = None environment = None apns_environment = None def __init__( self, container_identifier, environment, apns_environment=None ): pass # Discovering Users def fetch_user_info(self): """Fetch information about the current user asynchronously.""" pass def discover_user_info_with_email_address(self, email_address): """Fetch information about a single user. Based on the user's email address. """ pass def discover_user_info_with_user_record_name(self, record_name): """Fetch information about a single user using the record name.""" pass
# # container.py # CloudKitPy # # Created by James Barrow on 28/04/2016. # Copyright (c) 2013-2016 Pig on a Hill Productions. All rights reserved. # # !/usr/bin/env python class Container: public_cloud_database = None private_cloud_database = None container_identifier = None environment = None apns_environment = None def __init__( self, container_identifier, environment, apns_environment=None ): pass def fetch_user_info(self): """Fetch information about the current user asynchronously.""" pass def discover_user_info_with_email_address(self, email_address): """Fetch information about a single user. Based on the user's email address. """ pass def discover_user_info_with_user_record_name(self, record_name): """Fetch information about a single user using the record name.""" pass
mit
Python
7965ce3036f98a9b880f19f688e7e282644e63cf
remove server_name
vcaen/personalwebsite,vcaen/personalwebsite,vcaen/personalwebsite,vcaen/personalwebsite
app/main.py
app/main.py
from flask import Flask, render_template app = Flask(__name__) @app.route('/') def show_about(): return render_template('aboutme.html') if __name__ == '__main__': app.run()
from flask import Flask, render_template app = Flask(__name__) app.config['DEBUG'] = True app.config['SERVER_NAME'] = "vcaen.com" @app.route('/') def show_about(): return render_template('aboutme.html') if __name__ == '__main__': app.run()
cc0-1.0
Python
a1d9312e1ac6f66aaf558652d890ac2a6bd67e40
Add parent so we can track versions.
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
backend/loader/model/datafile.py
backend/loader/model/datafile.py
from dataitem import DataItem class DataFile(DataItem): def __init__(self, name, access, owner): super(DataFile, self).__init__(name, access, owner, "datafile") self.checksum = "" self.size = 0 self.location = "" self.mediatype = "" self.conditions = [] self.text = "" self.metatags = [] self.datadirs = [] self.parent = ""
from dataitem import DataItem class DataFile(DataItem): def __init__(self, name, access, owner): super(DataFile, self).__init__(name, access, owner, "datafile") self.checksum = "" self.size = 0 self.location = "" self.mediatype = "" self.conditions = [] self.text = "" self.metatags = [] self.datadirs = []
mit
Python
424b50960e7ca42c61ccc98864f9876e9688dcd4
remove empty elements
pkimber/search,pkimber/search,pkimber/search
example/models.py
example/models.py
from django.db import models class Cake(models.Model): name = models.CharField(max_length=100) description = models.TextField() class Meta: verbose_name = 'Cake' verbose_name_plural = 'Cakes' def __unicode__(self): return unicode('{}'.format(self.name)) def get_summary_description(self): return filter(None, ( self.name, self.description, )) class Coffee(models.Model): name = models.CharField(max_length=100) rating = models.IntegerField() class Meta: verbose_name = 'Coffee' verbose_name_plural = 'Coffees' def __unicode__(self): return unicode('{}'.format(self.name)) def get_summary_description(self): return filter(None, ( self.name, self.rating, ))
from django.db import models class Cake(models.Model): name = models.CharField(max_length=100) description = models.TextField() class Meta: verbose_name = 'Cake' verbose_name_plural = 'Cakes' def __unicode__(self): return unicode('{}'.format(self.name)) def get_summary_description(self): return self.name, self.description class Coffee(models.Model): name = models.CharField(max_length=100) rating = models.IntegerField() class Meta: verbose_name = 'Coffee' verbose_name_plural = 'Coffees' def __unicode__(self): return unicode('{}'.format(self.name)) def get_summary_description(self): return self.name, self.rating
apache-2.0
Python
bcc7692e14b7b695f08dfb39aaccf3dbfa67d857
Add safeGetInt to BMConfigParser
hb9kns/PyBitmessage,hb9kns/PyBitmessage,bmng-dev/PyBitmessage,bmng-dev/PyBitmessage,bmng-dev/PyBitmessage,hb9kns/PyBitmessage,hb9kns/PyBitmessage
src/configparser.py
src/configparser.py
import ConfigParser from singleton import Singleton @Singleton class BMConfigParser(ConfigParser.SafeConfigParser): def set(self, section, option, value=None): if self._optcre is self.OPTCRE or value: if not isinstance(value, basestring): raise TypeError("option values must be strings") return ConfigParser.ConfigParser.set(self, section, option, value) def get(self, section, option, raw=False, vars=None): if section == "bitmessagesettings" and option == "timeformat": try: return ConfigParser.ConfigParser.get(self, section, option, raw, vars) except ConfigParser.InterpolationError: return ConfigParser.ConfigParser.get(self, section, option, True, vars) return ConfigParser.ConfigParser.get(self, section, option, True, vars) def safeGetBoolean(self, section, field): if self.has_option(section, field): try: return self.getboolean(section, field) except ValueError: return False return False def safeGetInt(self, section, field): if self.has_option(section, field): try: return self.getint(section, field) except ValueError: return 0 return 0 def safeGet(self, section, option, default = None): if self.has_option(section, option): return self.get(section, option) else: return default def items(self, section, raw=False, vars=None): return ConfigParser.ConfigParser.items(self, section, True, vars)
import ConfigParser from singleton import Singleton @Singleton class BMConfigParser(ConfigParser.SafeConfigParser): def set(self, section, option, value=None): if self._optcre is self.OPTCRE or value: if not isinstance(value, basestring): raise TypeError("option values must be strings") return ConfigParser.ConfigParser.set(self, section, option, value) def get(self, section, option, raw=False, vars=None): if section == "bitmessagesettings" and option == "timeformat": try: return ConfigParser.ConfigParser.get(self, section, option, raw, vars) except ConfigParser.InterpolationError: return ConfigParser.ConfigParser.get(self, section, option, True, vars) return ConfigParser.ConfigParser.get(self, section, option, True, vars) def safeGetBoolean(self, section, field): if self.has_option(section, field): try: return self.getboolean(section, field) except ValueError: return False return False def safeGet(self, section, option, default = None): if self.has_option(section, option): return self.get(section, option) else: return default def items(self, section, raw=False, vars=None): return ConfigParser.ConfigParser.items(self, section, True, vars)
mit
Python
5bce4bb123a086dd116abbd0932d34fa170a83cd
Update view to point to corrected template path
ayleph/mediagoblin-basicsearch,ayleph/mediagoblin-basicsearch
search/views.py
search/views.py
# GNU MediaGoblin -- federated, autonomous media hosting # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from mediagoblin import mg_globals from mediagoblin.db.base import Session from mediagoblin.db.models import (MediaEntry, MediaTag, Collection, CollectionItem, User) from mediagoblin.decorators import uses_pagination from mediagoblin.tools.response import render_to_response from mediagoblin.tools.pagination import Pagination from mediagoblin.plugins.search import forms as search_forms from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ from mediagoblin.meddleware.csrf import csrf_exempt from sqlalchemy import and_, or_ import logging _log = logging.getLogger(__name__) @csrf_exempt @uses_pagination def search_results_view(request, page): media_entries = None pagination = None query = None form = search_forms.SearchForm( request.form) #if request.method == 'GET': if request.GET.get('query') != None: if request.GET.get('query') != '': query = '%' + request.GET.get('query') + '%' #cursor = MediaEntry.query.filter(MediaEntry.uploader==1).\ matches = MediaEntry.query.filter( and_( MediaEntry.state == u'processed', or_( MediaEntry.title.ilike(query), MediaEntry.description.ilike(query) ) )).order_by(MediaEntry.created.desc()) #_log.info(matches) pagination = Pagination(page, matches) media_entries = pagination() return render_to_response( request, 'mediagoblin/plugins/search/results.html', {'media_entries': media_entries, 'pagination': pagination, 'form': form})
# GNU MediaGoblin -- federated, autonomous media hosting # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from mediagoblin import mg_globals from mediagoblin.db.base import Session from mediagoblin.db.models import (MediaEntry, MediaTag, Collection, CollectionItem, User) from mediagoblin.decorators import uses_pagination from mediagoblin.tools.response import render_to_response from mediagoblin.tools.pagination import Pagination from mediagoblin.plugins.search import forms as search_forms from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ from mediagoblin.meddleware.csrf import csrf_exempt from sqlalchemy import and_, or_ import logging _log = logging.getLogger(__name__) @csrf_exempt @uses_pagination def search_results_view(request, page): media_entries = None pagination = None query = None form = search_forms.SearchForm( request.form) #if request.method == 'GET': if request.GET.get('query') != None: if request.GET.get('query') != '': query = '%' + request.GET.get('query') + '%' #cursor = MediaEntry.query.filter(MediaEntry.uploader==1).\ matches = MediaEntry.query.filter( and_( MediaEntry.state == u'processed', or_( MediaEntry.title.ilike(query), MediaEntry.description.ilike(query) ) )).order_by(MediaEntry.created.desc()) #_log.info(matches) pagination = Pagination(page, matches) media_entries = pagination() return render_to_response( request, 'search/results.html', {'media_entries': media_entries, 'pagination': pagination, 'form': form})
agpl-3.0
Python
d77777c2a011e77b284748d1dfbd3cd31e6c8565
make verifier regexing more robust
uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco
c_test_environment/verifier.py
c_test_environment/verifier.py
import re import sys def verify(testout, expected, ordered): numpat = re.compile(r'(\d+)') tuplepat = re.compile(r'Materialized') test = ({}, []) expect = ({}, []) def addTuple(tc, t): if ordered: tcl = tc[1] tcl.append(t) else: tcs = tc[0] if t not in tcs: tcs[t] = 1 else: tcs[t]+=1 with open(testout, 'r') as file: for line in file.readlines(): m = tuplepat.search(line) if m: tlist = [] for number in numpat.finditer(line, m.end()): tlist.append(int(number.group(0))) t = tuple(tlist) addTuple(test, t) with open(expected, 'r') as file: for line in file.readlines(): tlist = [] for number in numpat.finditer(line): tlist.append(int(number.group(0))) t = tuple(tlist) addTuple(expect, t) print test print expect assert test == expect, "\n test: %s !=\n expect:%s" % (test, expect) print "pass" if __name__ == '__main__': testout=sys.argv[1] expected=sys.argv[2] ordered = False if len(sys.argv) > 3: if sys.argv[3] == 'o': ordered = True verify(testout, expected, ordered)
import re import sys def verify(testout, expected, ordered): test = ({}, []) expect = ({}, []) def addTuple(tc, t): if ordered: tcl = tc[1] tcl.append(t) else: tcs = tc[0] if t not in tcs: tcs[t] = 1 else: tcs[t]+=1 with open(testout, 'r') as file: for line in file.readlines(): if re.match(r'Materialized', line): tlist = [] for number in re.finditer(r'(\d+)', line): tlist.append(int(number.group(0))) t = tuple(tlist) addTuple(test, t) with open(expected, 'r') as file: for line in file.readlines(): tlist = [] for number in re.finditer(r'(\d+)', line): tlist.append(int(number.group(0))) t = tuple(tlist) addTuple(expect, t) print test print expect assert test == expect, "\n test: %s !=\n expect:%s" % (test, expect) print "pass" if __name__ == '__main__': testout=sys.argv[1] expected=sys.argv[2] ordered = False if len(sys.argv) > 3: if sys.argv[3] == 'o': ordered = True verify(testout, expected, ordered)
bsd-3-clause
Python
82641a936b2215480e29896cdafed3872c2928c6
Remove xfails for newly passing tests in test_recipe_integration.py
ahal/active-data-recipes,ahal/active-data-recipes
test/test_recipes_integration.py
test/test_recipes_integration.py
import pytest import os import subprocess import json # Each test with recipe and appropriate parameters in one line # Using bracket annotation to set it optional (xfail) TEST_CASES = [ "activedata_usage", "backout_rate", ["code_coverage --path caps --rev 45715ece25fc"], "code_coverage_by_suite --path caps --rev 45715ece25fc", "config_durations", "files_with_coverage", "intermittent_tests", "intermittent_test_data", ["raw_coverage --path caps --rev 45715ece25fc"], "test_durations", ["tests_config_times -t test-windows10-64/opt-awsy-e10s"], "tests_in_duration", "try_efficiency", "try_usage", ["try_users"] ] def load_tests(tests): return [pytest.param(test[0], marks=pytest.mark.xfail) if isinstance(test, list) else test for test in tests] @pytest.mark.skipif(os.getenv("TRAVIS_EVENT_TYPE") != "cron", reason="Not run by cron job") @pytest.mark.parametrize("recipe", load_tests(TEST_CASES)) def test_recipe_integration(recipe): command = ['adr', '--format', 'json'] command.extend(recipe.split(" ")) data = subprocess.check_output(command, stderr=subprocess.STDOUT) result = json.loads(data) assert result assert len(result)
import pytest import os import subprocess import json # Each test with recipe and appropriate parameters in one line # Using bracket annotation to set it optional (xfail) TEST_CASES = [ "activedata_usage", ["backout_rate"], ["code_coverage --path caps --rev 45715ece25fc"], "code_coverage_by_suite --path caps --rev 45715ece25fc", "config_durations", "files_with_coverage", ["intermittent_tests"], ["intermittent_test_data"], ["raw_coverage --path caps --rev 45715ece25fc"], "test_durations", ["tests_config_times -t test-windows10-64/opt-awsy-e10s"], ["tests_in_duration"], ["try_efficiency"], ["try_usage"], ["try_users"] ] def load_tests(tests): return [pytest.param(test[0], marks=pytest.mark.xfail) if isinstance(test, list) else test for test in tests] @pytest.mark.skipif(os.getenv("TRAVIS_EVENT_TYPE") != "cron", reason="Not run by cron job") @pytest.mark.parametrize("recipe", load_tests(TEST_CASES)) def test_recipe_integration(recipe): command = ['adr', '--format', 'json'] command.extend(recipe.split(" ")) data = subprocess.check_output(command, stderr=subprocess.STDOUT) result = json.loads(data) assert result assert len(result)
mpl-2.0
Python
5ecab61cd66b821d70e73006f60d8f7908bfb403
Remove comment
davidrobles/mlnd-capstone-code
capstone/mdp/fixed_game_mdp.py
capstone/mdp/fixed_game_mdp.py
from .mdp import MDP from .game_mdp import GameMDP from ..utils import utility class FixedGameMDP(GameMDP): def __init__(self, game, opp_player, opp_idx): ''' opp_player: the opponent player opp_idx: the idx of the opponent player in the game ''' self._game = game self._opp_player = opp_player self._opp_idx = opp_idx self._agent_idx = opp_idx ^ 1 self._states = {} def reward(self, game, move, next_game): return utility(next_game, self._agent_idx) if next_game.is_over() else 0 def start_state(self): new_game = self._game.copy() if not new_game.is_over() and new_game.cur_player() == self._opp_idx: chosen_move = self._opp_player.choose_move(new_game) new_game.make_move(chosen_move) return new_game def transitions(self, game, move): if game.is_over(): return [] new_game = game.copy().make_move(move) if not new_game.is_over() and new_game.cur_player() == self._opp_idx: chosen_move = self._opp_player.choose_move(new_game) new_game.make_move(chosen_move) return [(new_game, 1.0)]
from .mdp import MDP from .game_mdp import GameMDP from ..utils import utility class FixedGameMDP(GameMDP): def __init__(self, game, opp_player, opp_idx): ''' opp_player: the opponent player opp_idx: the idx of the opponent player in the game ''' self._game = game self._opp_player = opp_player self._opp_idx = opp_idx self._agent_idx = opp_idx ^ 1 self._states = {} ####### # MDP # ####### def reward(self, game, move, next_game): return utility(next_game, self._agent_idx) if next_game.is_over() else 0 def start_state(self): new_game = self._game.copy() if not new_game.is_over() and new_game.cur_player() == self._opp_idx: chosen_move = self._opp_player.choose_move(new_game) new_game.make_move(chosen_move) return new_game def transitions(self, game, move): if game.is_over(): return [] new_game = game.copy().make_move(move) if not new_game.is_over() and new_game.cur_player() == self._opp_idx: chosen_move = self._opp_player.choose_move(new_game) new_game.make_move(chosen_move) return [(new_game, 1.0)]
mit
Python
b8c3ad8c9eb4cdf2618839b425b8413181a443ff
Fix bug in writePrecisePathToSnapshot not bactracking prperly to the initial structure
AdaptivePELE/AdaptivePELE,AdaptivePELE/AdaptivePELE,AdaptivePELE/AdaptivePELE,AdaptivePELE/AdaptivePELE
AdaptivePELE/analysis/writePrecisePathToSnapshot.py
AdaptivePELE/analysis/writePrecisePathToSnapshot.py
""" Recreate the trajectory fragments to the led to the discovery of a snapshot, specified by the tuple (epoch, trajectory, snapshot) and write as a pdb file """ import os import sys import argparse import glob import itertools from AdaptivePELE.utilities import utilities def parseArguments(): """ Parse the command-line options :returns: :py:class:`.Clustering`, int, int, int, str -- Clustering object, number of trajectory, number of snapshot, number of epoch, output path where to write the files """ desc = "Write the information related to the conformation network to file\n" parser = argparse.ArgumentParser(description=desc) parser.add_argument("clusteringObject", type=str, help="Path to the clustering object") parser.add_argument("trajectory", type=int, help="Trajectory number") parser.add_argument("snapshot", type=int, help="Snapshot to select (in accepted steps)") parser.add_argument("epoch", type=str, help="Path to the epoch to search the snapshot") parser.add_argument("-o", type=str, default=None, help="Output path where to write the files") args = parser.parse_args() return args.clusteringObject, args.trajectory, args.snapshot, args.epoch, args.o if __name__ == "__main__": clusteringObject, trajectory, snapshot, epoch, outputPath = parseArguments() if outputPath is not None: outputPath = os.path.join(outputPath, "") if not os.path.exists(outputPath): os.makedirs(outputPath) else: outputPath = "" sys.stderr.write("Reading clustering object...\n") cl = utilities.readClusteringObject(clusteringObject) pathway = [] # Strip out trailing backslash if present pathPrefix, epoch = os.path.split(epoch.rstrip("/")) sys.stderr.write("Creating pathway...\n") while True: filename = glob.glob(os.path.join(pathPrefix, epoch, "*traj*_%d.pdb" % trajectory)) snapshots = utilities.getSnapshots(filename[0]) snapshots = snapshots[:snapshot+1] pathway.insert(0, snapshots) if epoch == '0': # Once we get to epoch 0, we just need to append the trajectory # where the cluster was found and we can break out of the loop break procMapping = open(os.path.join(pathPrefix, epoch, "processorMapping.txt")).read().rstrip().split(':') epoch, trajectory, snapshot = map(int, procMapping[trajectory-1][1:-1].split(',')) epoch = str(epoch) sys.stderr.write("Writing pathway...\n") with open(outputPath+"pathway.pdb", "a") as f: f.write("ENDMDL\n".join(itertools.chain.from_iterable(pathway)))
""" Recreate the trajectory fragments to the led to the discovery of a snapshot, specified by the tuple (epoch, trajectory, snapshot) and write as a pdb file """ import os import sys import argparse import glob import itertools from AdaptivePELE.utilities import utilities def parseArguments(): """ Parse the command-line options :returns: :py:class:`.Clustering`, int, int, int, str -- Clustering object, number of trajectory, number of snapshot, number of epoch, output path where to write the files """ desc = "Write the information related to the conformation network to file\n" parser = argparse.ArgumentParser(description=desc) parser.add_argument("clusteringObject", type=str, help="Path to the clustering object") parser.add_argument("trajectory", type=int, help="Trajectory number") parser.add_argument("snapshot", type=int, help="Snapshot to select (in accepted steps)") parser.add_argument("epoch", type=str, help="Path to the epoch to search the snapshot") parser.add_argument("-o", type=str, default=None, help="Output path where to write the files") args = parser.parse_args() return args.clusteringObject, args.trajectory, args.snapshot, args.epoch, args.o if __name__ == "__main__": clusteringObject, trajectory, snapshot, epoch, outputPath = parseArguments() if outputPath is not None: outputPath = os.path.join(outputPath, "") if not os.path.exists(outputPath): os.makedirs(outputPath) else: outputPath = "" sys.stderr.write("Reading clustering object...\n") cl = utilities.readClusteringObject(clusteringObject) pathway = [] # Strip out trailing backslash if present pathPrefix, epoch = os.path.split(epoch.rstrip("/")) sys.stderr.write("Creating pathway...\n") while epoch != "0": filename = glob.glob(os.path.join(pathPrefix,epoch,"*traj*_%d.pdb" % trajectory)) snapshots = utilities.getSnapshots(filename[0]) snapshots = snapshots[:snapshot+1] pathway.insert(0, snapshots) procMapping = open(os.path.join(pathPrefix, epoch, "processorMapping.txt")).read().rstrip().split(':') epoch, trajectory, snapshot = map(int, procMapping[trajectory-1][1:-1].split(',')) epoch = str(epoch) sys.stderr.write("Writing pathway...\n") with open(outputPath+"pathway.pdb", "a") as f: f.write("ENDMDL\n".join(itertools.chain.from_iterable(pathway)))
mit
Python
1549510fd9371818cff6644984896a5a9060cb36
Fix print statements with python3 syntax.
VROOM-Project/vroom-scripts,VROOM-Project/vroom-scripts
benchmarks/TSP/compare_to_BKS.py
benchmarks/TSP/compare_to_BKS.py
# -*- coding: utf-8 -*- import json, sys, os import numpy as np # Compare a set of computed solutions to best known solutions on the # same problems. def s_round(v, d): if d == 0: return str(int(v)) else: return str(round(v, d)) def log_comparisons(BKS, files): print(','.join(["Instance", "Jobs", "Vehicles", "Best known cost", "Solution cost", "Gap (%)", "Computing time (ms)"])) jobs = [] gaps = [] computing_times = [] for f in files: instance = f[0:f.rfind("_sol.json")] instance = instance[instance.rfind('/') + 1:] if instance not in BKS: continue indicators = BKS[instance] BK_cost = indicators['best_known_cost'] nb_job = indicators['jobs'] jobs.append(nb_job) line = [ instance, nb_job, indicators['vehicles'], BK_cost ] with open(f, 'r') as sol_file: solution = json.load(sol_file) if solution['code'] != 0: continue cost = solution['summary']['cost'] line.append(cost) gap = 100 * (float(cost) / BK_cost - 1) line.append(round(gap, 2)) gaps.append(gap) computing_time = solution['summary']['computing_times']['loading'] + solution['summary']['computing_times']['solving'] line.append(computing_time) computing_times.append(computing_time) print(','.join(map(lambda x: str(x), line))) print(',') print('Average,' + s_round(np.mean(jobs), 1) + ',,,,' + s_round(np.mean(gaps), 2) + ',' + s_round(np.mean(computing_times), 0)) # Percentiles print(',') gaps_percentiles = np.percentile(gaps, [0, 10, 25, 50, 75, 90, 100]) ct_percentiles = np.percentile(computing_times, [0, 10, 25, 50, 75, 90, 100]) print(',Gaps,Computing times') titles = ['Min', 'First decile', 'Lower quartile', 'Median', 'Upper quartile', 'Ninth decile', 'Max'] for i in range(len(titles)): print(titles[i] + ',' + s_round(gaps_percentiles[i], 2) + ',' + s_round(ct_percentiles[i], 0)) if __name__ == "__main__": # First argument if the best known solution file. with open(sys.argv[1], 'r') as sol_file: bks = json.load(sol_file) # Remaining arguments are computed solution files to use. log_comparisons(bks, sys.argv[2:])
# -*- coding: utf-8 -*- import json, sys, os import numpy as np # Compare a set of computed solutions to best known solutions on the # same problems. def s_round(v, d): if d == 0: return str(int(v)) else: return str(round(v, d)) def log_comparisons(BKS, files): print ','.join(["Instance", "Jobs", "Vehicles", "Best known cost", "Solution cost", "Gap (%)", "Computing time (ms)"]) jobs = [] gaps = [] computing_times = [] for f in files: instance = f[0:f.rfind("_sol.json")] instance = instance[instance.rfind('/') + 1:] if instance not in BKS: continue indicators = BKS[instance] BK_cost = indicators['best_known_cost'] nb_job = indicators['jobs'] jobs.append(nb_job) line = [ instance, nb_job, indicators['vehicles'], BK_cost ] with open(f, 'r') as sol_file: solution = json.load(sol_file) if solution['code'] != 0: continue cost = solution['summary']['cost'] line.append(cost) gap = 100 * (float(cost) / BK_cost - 1) line.append(round(gap, 2)) gaps.append(gap) computing_time = solution['summary']['computing_times']['loading'] + solution['summary']['computing_times']['solving'] line.append(computing_time) computing_times.append(computing_time) print ','.join(map(lambda x: str(x), line)) print ',' print 'Average,' + s_round(np.mean(jobs), 1) + ',,,,' + s_round(np.mean(gaps), 2) + ',' + s_round(np.mean(computing_times), 0) # Percentiles print ',' gaps_percentiles = np.percentile(gaps, [0, 10, 25, 50, 75, 90, 100]) ct_percentiles = np.percentile(computing_times, [0, 10, 25, 50, 75, 90, 100]) print ',Gaps,Computing times' titles = ['Min', 'First decile', 'Lower quartile', 'Median', 'Upper quartile', 'Ninth decile', 'Max'] for i in range(len(titles)): print titles[i] + ',' + s_round(gaps_percentiles[i], 2) + ',' + s_round(ct_percentiles[i], 0) if __name__ == "__main__": # First argument if the best known solution file. with open(sys.argv[1], 'r') as sol_file: bks = json.load(sol_file) # Remaining arguments are computed solution files to use. log_comparisons(bks, sys.argv[2:])
bsd-2-clause
Python
63bd0d8905ea9392e56f501381c054ba3a4ed1a7
Update __init__.py
ktnyt/chainer,wkentaro/chainer,niboshi/chainer,ronekko/chainer,chainer/chainer,chainer/chainer,okuta/chainer,wkentaro/chainer,ktnyt/chainer,pfnet/chainer,hvy/chainer,keisuke-umezawa/chainer,tkerola/chainer,okuta/chainer,keisuke-umezawa/chainer,hvy/chainer,jnishi/chainer,hvy/chainer,wkentaro/chainer,ktnyt/chainer,jnishi/chainer,chainer/chainer,jnishi/chainer,okuta/chainer,niboshi/chainer,niboshi/chainer,rezoo/chainer,okuta/chainer,keisuke-umezawa/chainer,niboshi/chainer,keisuke-umezawa/chainer,jnishi/chainer,hvy/chainer,anaruse/chainer,wkentaro/chainer,chainer/chainer,ktnyt/chainer
chainer/optimizers/__init__.py
chainer/optimizers/__init__.py
-# import classes and functions from chainer.optimizers.ada_delta import AdaDelta # NOQA from chainer.optimizers.ada_grad import AdaGrad # NOQA from chainer.optimizers.adam import Adam # NOQA from chainer.optimizers.momentum_sgd import MomentumSGD # NOQA from chainer.optimizers.msvag import MSVAG # NOQA from chainer.optimizers.nesterov_ag import NesterovAG # NOQA from chainer.optimizers.rmsprop import RMSprop # NOQA from chainer.optimizers.rmsprop_graves import RMSpropGraves # NOQA from chainer.optimizers.sgd import SGD # NOQA from chainer.optimizers.smorms3 import SMORMS3 # NOQA
from chainer.optimizers.ada_delta import AdaDelta # NOQA from chainer.optimizers.ada_grad import AdaGrad # NOQA from chainer.optimizers.adam import Adam # NOQA from chainer.optimizers.momentum_sgd import MomentumSGD # NOQA from chainer.optimizers.msvag import MSVAG # NOQA from chainer.optimizers.nesterov_ag import NesterovAG # NOQA from chainer.optimizers.rmsprop import RMSprop # NOQA from chainer.optimizers.rmsprop_graves import RMSpropGraves # NOQA from chainer.optimizers.sgd import SGD # NOQA from chainer.optimizers.smorms3 import SMORMS3 # NOQA
mit
Python
7a7661bd03c947212ee46ca598cae5cd316757c1
Fix flake8
yuyu2172/chainercv,chainer/chainercv,yuyu2172/chainercv,chainer/chainercv,pfnet/chainercv
chainercv/datasets/__init__.py
chainercv/datasets/__init__.py
from chainercv.datasets.camvid.camvid_dataset import camvid_ignore_label_color # NOQA from chainercv.datasets.camvid.camvid_dataset import camvid_label_colors # NOQA from chainercv.datasets.camvid.camvid_dataset import camvid_label_names # NOQA from chainercv.datasets.camvid.camvid_dataset import CamVidDataset # NOQA from chainercv.datasets.cityscapes.cityscapes_semantic_segmentation_dataset import CityscapesSemanticSegmentationDataset # NOQA from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_label_colors # NOQA from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_label_names # NOQA from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_labels # NOQA from chainercv.datasets.cub.cub_keypoint_dataset import CUBKeypointDataset # NOQA from chainercv.datasets.cub.cub_label_dataset import CUBLabelDataset # NOQA from chainercv.datasets.cub.cub_utils import cub_label_names # NOQA from chainercv.datasets.directory_parsing_classification_dataset import directory_parsing_label_names # NOQA from chainercv.datasets.directory_parsing_classification_dataset import DirectoryParsingClassificationDataset # NOQA from chainercv.datasets.online_products.online_products_dataset import OnlineProductsDataset # NOQA from chainercv.datasets.transform_dataset import TransformDataset # NOQA from chainercv.datasets.voc.voc_detection_dataset import VOCDetectionDataset # NOQA from chainercv.datasets.voc.voc_semantic_segmentation_dataset import VOCSemanticSegmentationDataset # NOQA from chainercv.datasets.voc.voc_utils import voc_detection_label_names # NOQA from chainercv.datasets.voc.voc_utils import voc_semantic_segmentation_ignore_label_color # NOQA from chainercv.datasets.voc.voc_utils import voc_semantic_segmentation_label_colors # NOQA from chainercv.datasets.voc.voc_utils import voc_semantic_segmentation_label_names # NOQA
from chainercv.datasets.camvid.camvid_dataset import camvid_ignore_label_color # NOQA from chainercv.datasets.camvid.camvid_dataset import camvid_label_colors # NOQA from chainercv.datasets.camvid.camvid_dataset import camvid_label_names # NOQA from chainercv.datasets.camvid.camvid_dataset import CamVidDataset # NOQA from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_labels # NOQA from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_label_names # NOQA from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_label_colors # NOQA from chainercv.datasets.cityscapes.cityscapes_semantic_segmentation_dataset import CityscapesSemanticSegmentationDataset # NOQA from chainercv.datasets.cub.cub_keypoint_dataset import CUBKeypointDataset # NOQA from chainercv.datasets.cub.cub_label_dataset import CUBLabelDataset # NOQA from chainercv.datasets.cub.cub_utils import cub_label_names # NOQA from chainercv.datasets.directory_parsing_classification_dataset import directory_parsing_label_names # NOQA from chainercv.datasets.directory_parsing_classification_dataset import DirectoryParsingClassificationDataset # NOQA from chainercv.datasets.online_products.online_products_dataset import OnlineProductsDataset # NOQA from chainercv.datasets.transform_dataset import TransformDataset # NOQA from chainercv.datasets.voc.voc_detection_dataset import VOCDetectionDataset # NOQA from chainercv.datasets.voc.voc_semantic_segmentation_dataset import VOCSemanticSegmentationDataset # NOQA from chainercv.datasets.voc.voc_utils import voc_detection_label_names # NOQA from chainercv.datasets.voc.voc_utils import voc_semantic_segmentation_ignore_label_color # NOQA from chainercv.datasets.voc.voc_utils import voc_semantic_segmentation_label_colors # NOQA from chainercv.datasets.voc.voc_utils import voc_semantic_segmentation_label_names # NOQA
mit
Python
b27398e4dd246d542c0a82ecc35da60911edc9fd
revert to dev version
mathause/regionmask
regionmask/version.py
regionmask/version.py
version = "0.7.0+dev"
version = "0.7.0"
mit
Python
b318ced455f13477743a6d2d81b3556695b27374
Make to_factorized_noisy support args
toslunar/chainerrl,toslunar/chainerrl
chainerrl/links/noisy_chain.py
chainerrl/links/noisy_chain.py
import chainer from chainer.links import Linear from chainerrl.links.noisy_linear import FactorizedNoisyLinear def to_factorized_noisy(link, *args, **kwargs): """Add noisiness to components of given link Currently this function supports L.Linear (with and without bias) """ def func_to_factorized_noisy(link): if isinstance(link, Linear): return FactorizedNoisyLinear(link, *args, **kwargs) else: return link _map_links(func_to_factorized_noisy, link) def _map_links(func, link): if isinstance(link, chainer.Chain): children_names = link._children.copy() for name in children_names: child = getattr(link, name) new_child = func(child) if new_child is child: _map_links(func, child) else: delattr(link, name) with link.init_scope(): setattr(link, name, func(child)) elif isinstance(link, chainer.ChainList): children = link._children for i in range(len(children)): child = children[i] new_child = func(child) if new_child is child: _map_links(func, child) else: # mimic ChainList.add_link children[i] = func(child) children[i].name = str(i)
import chainer from chainer.links import Linear from chainerrl.links.noisy_linear import FactorizedNoisyLinear def to_factorized_noisy(link): """Add noisiness to components of given link Currently this function supports L.Linear (with and without bias) """ _map_links(_func_to_factorized_noisy, link) def _func_to_factorized_noisy(link): if isinstance(link, Linear): return FactorizedNoisyLinear(link) else: return link def _map_links(func, link): if isinstance(link, chainer.Chain): children_names = link._children.copy() for name in children_names: child = getattr(link, name) new_child = func(child) if new_child is child: _map_links(func, child) else: delattr(link, name) with link.init_scope(): setattr(link, name, func(child)) elif isinstance(link, chainer.ChainList): children = link._children for i in range(len(children)): child = children[i] new_child = func(child) if new_child is child: _map_links(func, child) else: # mimic ChainList.add_link children[i] = func(child) children[i].name = str(i)
mit
Python
6535755cfdc914efc5e1efc6a89ed9dca7c78b87
Correct docstrings of result_suite/sample.py
jessamynsmith/fontbakery,googlefonts/fontbakery,vitalyvolkov/fontbakery,vitalyvolkov/fontbakery,davelab6/fontbakery,googlefonts/fontbakery,moyogo/fontbakery,moyogo/fontbakery,graphicore/fontbakery,moyogo/fontbakery,graphicore/fontbakery,graphicore/fontbakery,googlefonts/fontbakery,vitalyvolkov/fontbakery
checker/result_suite/sample.py
checker/result_suite/sample.py
from checker.base import BakeryTestCase as TestCase class SampleTest(TestCase): target = 'result' path = '.' def setUp(self): # read ttf # self.font = fontforge.open(self.path) pass def test_ok(self): """ This test succeeds """ self.assertTrue(True) def test_failure(self): """ This test fails """ self.assertTrue(False) def test_error(self): """ Unexpected error """ 1 / 0 self.assertTrue(False)
from checker.base import BakeryTestCase as TestCase class SampleTest(TestCase): target = 'result' path = '.' def setUp(self): # read ttf # self.font = fontforge.open(self.path) pass def test_ok(self): """ This test failed """ self.assertTrue(True) def test_failure(self): """ This test failed """ self.assertTrue(False) def test_error(self): """ Unexpected error """ 1 / 0 self.assertTrue(False)
apache-2.0
Python
0df292fbb34a66ee66fce919ea63b68a5f9eff1a
Set up data structures for parsing projects
albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com
app/data.py
app/data.py
import json import os from typing import Dict, List from app.util import cached_function class Projects(): def __init__(self) -> None: self.languages: List[Language] = [] @staticmethod def load_from_file() -> 'Projects': current_directory = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(current_directory, 'data', 'projects.json') with open(path, 'r') as handle: data = handle.read() parsed_data = json.loads(data) return Projects.load(parsed_data) @staticmethod def load(data: Dict[str, Dict[str, Dict[str, str]]]) -> 'Projects': projects = Projects() for key, value in data.items(): language = Language.load(key, value) projects.languages.append(language) return projects class Language(): def __init__(self) -> None: self.name: str = '' self.projects: List[Project] = [] @staticmethod def load(key: str, data: Dict[str, Dict[str, str]]) -> 'Language': language = Language() language.name = key for key, value in data.items(): project = Project.load(key, value) language.projects.append(project) return language class Project(): def __init__(self) -> None: self.name: str = '' self.description: str = '' self.github: str = '' self.rubygems: str = '' self.pypi: str = '' self.npm: str = '' self.web: str = '' @staticmethod def load(key: str, data: Dict[str, str]) -> 'Project': project = Project() project.name = key project.description = data.get('description', '') project.github = data.get('github', '') project.rubygems = data.get('rubygems', '') project.pypi = data.get('pypi', '') project.npm = data.get('npm', '') project.web = data.get('web', '') return project def links(self) -> Dict[str, str]: links: Dict[str, str] = { 'github': self.github, 'rubygems': self.rubygems, 'pypi': self.pypi, 'npm': self.npm, 'web': self.web, } links = dict([(k, v) for k, v in links.items() if v]) return links @cached_function def get_projects() -> Projects: loaded_projects = Projects.load_from_file() return loaded_projects class Shelf(): def __init__(self) -> None: self.data: Dict[str, List[Dict[str, str]]] @staticmethod def load() -> 'Shelf': current_directory = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(current_directory, 'data', 'shelf.json') with open(path, 'r') as handle: shelf_data = handle.read() shelf = Shelf() shelf.data = json.loads(shelf_data) return shelf @cached_function def get_shelf() -> Shelf: loaded_shelf = Shelf.load() return loaded_shelf
import json import os from typing import Dict, List from app.util import cached_function class Projects(): def __init__(self) -> None: self.data: Dict[str, Dict[str, Dict[str, str]]] = {} @staticmethod def load() -> 'Projects': current_directory = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(current_directory, 'data', 'projects.json') with open(path, 'r') as handle: project_data = handle.read() projects = Projects() projects.data = json.loads(project_data) return projects @cached_function def get_projects() -> Projects: loaded_projects = Projects.load() return loaded_projects class Shelf(): def __init__(self) -> None: self.data: Dict[str, List[Dict[str, str]]] @staticmethod def load() -> 'Shelf': current_directory = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(current_directory, 'data', 'shelf.json') with open(path, 'r') as handle: shelf_data = handle.read() shelf = Shelf() shelf.data = json.loads(shelf_data) return shelf @cached_function def get_shelf() -> Shelf: loaded_shelf = Shelf.load() return loaded_shelf
mit
Python
912b1e33eff873a07ca089c69fef51bf05e79051
Add User and Group to admin custom site
neosergio/vote_hackatrix_backend
ideas/admin.py
ideas/admin.py
from .models import Idea, Outstanding from django.contrib import admin from django.contrib.admin import AdminSite from django.contrib.auth.models import User, Group class MyAdminSite(AdminSite): site_header = "Hackatrix Backend" site_title = "Hackatrix Backend" index_title = "Administrator" class IdeaAdmin(admin.ModelAdmin): list_display = ('name', 'votes', 'description', 'register', 'is_active') def save_model(self, request, obj, form, change): if getattr(obj, 'register', None) is None: obj.register = request.user obj.save() class OutstandingAdmin(admin.ModelAdmin): list_display = ('name', 'email', 'comment', 'register') def save_model(self, request, obj, form, change): if getattr(obj, 'register', None) is None: obj.register = request.user obj.save() admin_site = MyAdminSite(name='myadmin') admin_site.register(User) admin_site.register(Group) admin_site.register(Idea, IdeaAdmin) admin_site.register(Outstanding, OutstandingAdmin)
from .models import Idea, Outstanding from django.contrib import admin from django.contrib.admin import AdminSite class MyAdminSite(AdminSite): site_header = "Hackatrix Backend" site_title = "Hackatrix Backend" index_title = "Administrator" class IdeaAdmin(admin.ModelAdmin): list_display = ('name', 'votes', 'description', 'register', 'is_active') def save_model(self, request, obj, form, change): if getattr(obj, 'register', None) is None: obj.register = request.user obj.save() class OutstandingAdmin(admin.ModelAdmin): list_display = ('name', 'email', 'comment', 'register') def save_model(self, request, obj, form, change): if getattr(obj, 'register', None) is None: obj.register = request.user obj.save() admin_site = MyAdminSite(name='myadmin') admin_site.register(Idea, IdeaAdmin) admin_site.register(Outstanding, OutstandingAdmin)
mit
Python
1db8627731a2e23693cd9fe38a455956b783c0cd
Update NoticiasTecnologicas.py
HackLab-Almeria/clubpythonalm-taller-bots-telegram
03-RSSTelegram/NoticiasTecnologicas.py
03-RSSTelegram/NoticiasTecnologicas.py
#!/usr/bin/env python3 # -*- coding: iso-8859-1 -*- """ Ejemplo: Leer Noticias RSS en Telegram (II) Libreria: pyTelegramBotAPI 1.4.2 [ok] Libreria: pyTelegramBotAPI 2.0 [ok] Python: 3.5.1 """ import telebot import sys import feedparser url = "http://blog.bricogeek.com/noticias/arduino/rss/" rss = feedparser.parse(url) servicio="Servicio del Bot de Telegram" inicio_servicio="Iniciando..."+servicio print (inicio_servicio), TOKEN = 'AQUÍ EL NUMERO DE VUESTRO TOKEN' #Ponemos nuestro TOKEN generado con el @BotFather telegram = telebot.TeleBot(TOKEN) # Combinamos la declaración del Token con la función de la API def listener(messages): for m in messages: chatID = m.chat.id if m.content_type == 'text': for noticia in rss.entries: evento=noticia.title+"\n"+noticia.link telegram.send_message(chatID, evento) try: telegram.get_me() # Comprobar el API. Devuelve un objeto print ("-> OK") print ("Token: "+TOKEN) print ("- Presionar Ctrl+C para parar el servicio...") telegram.set_update_listener(listener) except Exception as e: print ("-> ERROR") print (e) sys.exit(0) telegram.polling(none_stop=False) # Interval setup. Sleep 3 secs between request new message. telegram.polling(interval=3) telegram.polling() try: while True: pass except KeyboardInterrupt: print ("Programa Finalizado...") sys.exit(0)
#!/usr/bin/env python3 # -*- coding: iso-8859-1 -*- """ Ejemplo: Leer Noticias RSS en Telegram (III) Libreria: pyTelegramBotAPI 1.4.2 [ok] Libreria: pyTelegramBotAPI 2.0 [ok] Python: 3.5.1 """ import telebot import sys import feedparser url = "http://blog.bricogeek.com/noticias/arduino/rss/" rss = feedparser.parse(url) servicio="Servicio del Bot de Telegram" inicio_servicio="Iniciando..."+servicio print (inicio_servicio), TOKEN = 'AQUÍ EL NUMERO DE VUESTRO TOKEN' #Ponemos nuestro TOKEN generado con el @BotFather telegram = telebot.TeleBot(TOKEN) # Combinamos la declaración del Token con la función de la API def listener(messages): for m in messages: chatID = m.chat.id if m.content_type == 'text': for noticia in rss.entries: evento=noticia.title+"\n"+noticia.link telegram.send_message(chatID, evento) try: telegram.get_me() # Comprobar el API. Devuelve un objeto print ("-> OK") print ("Token: "+TOKEN) print ("- Presionar Ctrl+C para parar el servicio...") telegram.set_update_listener(listener) except Exception as e: print ("-> ERROR") print (e) sys.exit(0) telegram.polling(none_stop=False) # Interval setup. Sleep 3 secs between request new message. telegram.polling(interval=3) telegram.polling() try: while True: pass except KeyboardInterrupt: print ("Programa Finalizado...") sys.exit(0)
mit
Python
f68a10fec5d4dbc743c5d84f8b26d122e81b26e4
Use standard urlencode() for encoding URLs
joshua-stone/DerPyBooru
derpibooru/request.py
derpibooru/request.py
# Copyright (c) 2014, Joshua Stone # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from requests import get, codes from sys import version_info from .image import Image if version_info < (3, 0): from urllib import urlencode else: from urllib.parse import urlencode def url(parameters): p = {} for key, value in parameters.items(): if key == "key": if value: p["key"] = value elif key == "q": p["q"] = ",".join(value) if value else "*" else: p[key] = value url = "https://derpiboo.ru/search.json?{}".format(urlencode(p)) return url def request(parameters): p = parameters p.update({ "page": 1, "perpage": 50}) request = get(url(p)) while request.status_code == codes.ok: for image in request.json()["search"]: yield Image(image) parameters["page"] += 1 request = get(url(p)) yield None
# Copyright (c) 2014, Joshua Stone # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from requests import get, codes from sys import version_info from .image import Image if version_info < (3, 0): from urllib import quote_plus else: from urllib.parse import quote_plus def join_tags(tags): q = quote_plus(",".join(tags)) return q def join_parameters(parameters): p = ["{}={}".format(k, v) for k, v in parameters.items()] return p def url(parameters): url, p = "https://derpiboo.ru/search.json?", {} for key, value in parameters.items(): if key == "key": if value: p["key"] = value elif key == "q": p["q"] = join_tags(value) if value else "*" else: p[key] = value url += "&".join(join_parameters(p)) return url def request(parameters): p = parameters p.update({ "page": 1, "perpage": 50}) request = get(url(p)) while request.status_code == codes.ok: for image in request.json()["search"]: yield Image(image) parameters["page"] += 1 request = get(url(p)) yield None
bsd-2-clause
Python
8188008cf1bd41c1cbe0452ff635dd0319dfecd9
Add trailing slash to url
Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django
derrida/books/urls.py
derrida/books/urls.py
from django.conf.urls import url from django.contrib.admin.views.decorators import staff_member_required from derrida.books.views import ( PublisherAutocomplete, LanguageAutocomplete, InstanceDetailView, InstanceListView ) urlpatterns = [ # TODO: come up with cleaner url patterns/names for autocomplete views url(r'^publishers/autocomplete/$', staff_member_required(PublisherAutocomplete.as_view()), name='publisher-autocomplete'), url(r'^languages/autocomplete/$', staff_member_required(LanguageAutocomplete.as_view()), name='language-autocomplete'), url(r'^(?P<pk>\d+)/$', InstanceDetailView.as_view(), name='detail'), url(r'^$', InstanceListView.as_view(), name='list'), ]
from django.conf.urls import url from django.contrib.admin.views.decorators import staff_member_required from derrida.books.views import ( PublisherAutocomplete, LanguageAutocomplete, InstanceDetailView, InstanceListView ) urlpatterns = [ # TODO: come up with cleaner url patterns/names for autocomplete views url(r'^publishers/autocomplete/$', staff_member_required(PublisherAutocomplete.as_view()), name='publisher-autocomplete'), url(r'^languages/autocomplete/$', staff_member_required(LanguageAutocomplete.as_view()), name='language-autocomplete'), url(r'^(?P<pk>\d+)$', InstanceDetailView.as_view(), name='detail'), url(r'^$', InstanceListView.as_view(), name='list'), ]
apache-2.0
Python
c4ea3ce306d4464ac0bc80286a60689972c7bc63
Test isolation.
pinax/pinax-points,pinax/pinax-points
agon/tests.py
agon/tests.py
from threading import Thread from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.test import TestCase from django.contrib.auth.models import User from agon.models import award_points, points_awarded class PointsTestCase(TestCase): def setUp(self): self.users = [ User.objects.create_user("user_%d" % i, "user_%[email protected]" % i, str(i)) for i in xrange(1) ] def tearDown(self): if hasattr(settings, "AGON_POINT_VALUES"): del settings.AGON_POINT_VALUES def setup_points(self, value): settings.AGON_POINT_VALUES = value def test_improperly_configured(self): user = self.users[0] try: award_points(user, "JOINED_SITE") except ImproperlyConfigured, e: self.assertEqual(str(e), "You must define 'AGON_POINT_VALUES' in settings") self.setup_points({}) try: award_points(user, "JOINED_SITE") except ImproperlyConfigured, e: self.assertEqual(str(e), "You must define a point value for 'JOINED_SITE'") def test_simple_user_point_award(self): self.setup_points({ "JOINED_SITE": 1, }) user = self.users[0] award_points(user, "JOINED_SITE") self.assertEqual(points_awarded(user), 1) def test_concurrent_award(self): user = self.users[0] self.setup_points({ "TEST_1": 10, }) return def run(): award_points(user, "TEST_1") threads = [] for i in xrange(5): t = Thread(target=run) threads.append(t) t.start() for t in threads: t.join() self.assertEqual(points_awarded(user), 50)
from threading import Thread from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.test import TestCase from django.contrib.auth.models import User from agon.models import award_points, points_awarded class PointsTestCase(TestCase): def setUp(self): self.users = [ User.objects.create_user("user_%d" % i, "user_%[email protected]" % i, str(i)) for i in xrange(1) ] def setup_points(self, value): settings.AGON_POINT_VALUES = value def test_improperly_configured(self): user = self.users[0] try: award_points(user, "JOINED_SITE") except ImproperlyConfigured, e: self.assertEqual(str(e), "You must define 'AGON_POINT_VALUES' in settings") self.setup_points({}) try: award_points(user, "JOINED_SITE") except ImproperlyConfigured, e: self.assertEqual(str(e), "You must define a point value for 'JOINED_SITE'") def test_simple_user_point_award(self): self.setup_points({ "JOINED_SITE": 1, }) user = self.users[0] award_points(user, "JOINED_SITE") self.assertEqual(points_awarded(user), 1) def test_concurrent_award(self): user = self.users[0] return def run(): award_points(user, "TEST_1") threads = [] for i in xrange(5): t = Thread(target=run) threads.append(t) t.start() for t in threads: t.join() self.assertEqual(points_awarded(user), 50)
mit
Python
ac084c574b58771bd240af3fa4b4a000fc742229
update to handle different kinds of files
imrehg/labhardware,imrehg/labhardware
projects/allan_cont/showlog_long.py
projects/allan_cont/showlog_long.py
import numpy as np import pylab as pl from ourgui import openFile def plotline(maxx, minx=0, value=0, style="k-", plotfunc=pl.plot): plotfunc([minx, maxx], [value, value], style) def quickplot(filename): alldata = np.loadtxt(filename, comments="#", delimiter=",") datashape = np.shape(alldata) try: col = np.shape(alldata)[1] data = alldata[:, col-1] except (IndexError): data = alldata maxdata, mindata, stddata, meandata = np.max(data), np.min(data), np.std(data), np.mean(data) n = len(data) pl.subplot(211) pl.plot(data,'k.') plotline(n, value=maxdata, style="g-") plotline(n, value=mindata, style="r-") plotline(n, value=meandata, style="k-") plotline(n, value=(meandata+stddata), style="b-") plotline(n, value=(meandata-stddata), style="b-") pl.xlabel('data points') pl.ylabel('Frequency (Hz)') pl.title("Frequency: %f (+- %f) Hz" %(meandata, stddata)) pl.subplot(212) n, bins, patches = pl.hist(data-meandata, 100, normed=1, facecolor='green', alpha=0.75) pl.xlabel('Frequency deviation from mean (Hz)') pl.ylabel('distribution') pl.show() filename = openFile("log") if filename: quickplot(filename)
import numpy as np import pylab as pl from ourgui import openFile def plotline(maxx, minx=0, value=0, style="k-", plotfunc=pl.plot): plotfunc([minx, maxx], [value, value], style) def quickplot(filename): data = np.loadtxt(filename, comments="#") maxdata, mindata, stddata, meandata = np.max(data), np.min(data), np.std(data), np.mean(data) n = len(data) pl.subplot(211) pl.plot(data,'k.') plotline(n, value=maxdata, style="g-") plotline(n, value=mindata, style="r-") plotline(n, value=meandata, style="k-") plotline(n, value=(meandata+stddata), style="b-") plotline(n, value=(meandata-stddata), style="b-") pl.xlabel('data points') pl.ylabel('Frequency (Hz)') pl.title("Frequency: %f (+- %f) Hz" %(meandata, stddata)) pl.subplot(212) n, bins, patches = pl.hist(data-meandata, 100, normed=1, facecolor='green', alpha=0.75) pl.xlabel('Frequency deviation from mean (Hz)') pl.ylabel('distribution') pl.show() filename = openFile("log") if filename: quickplot(filename)
mit
Python
c206936120519912762f30eb269f1733b5593bf8
fix window edges
niklaskorz/pyglet,adamlwgriffiths/Pyglet,niklaskorz/pyglet,adamlwgriffiths/Pyglet,adamlwgriffiths/Pyglet,seeminglee/pyglet64,adamlwgriffiths/Pyglet,seeminglee/pyglet64,seeminglee/pyglet64,niklaskorz/pyglet,niklaskorz/pyglet
contrib/spryte/balls.py
contrib/spryte/balls.py
import random from pyglet import window, clock, gl, event from pyglet.window import key import spryte win = window.Window(vsync=False) fps = clock.ClockDisplay(color=(1, 1, 1, 1)) layer = spryte.Layer() balls = [] for i in range(200): balls.append(spryte.Sprite('ball.png', layer, (win.width - 64) * random.random(), (win.height - 64) * random.random(), dx=-50 + 100*random.random(), dy=-50 + 100*random.random(), dead=False)) def animate(dt): for ball in balls: ball.x += ball.dx * dt ball.y += ball.dy * dt if ball.x + ball.width > win.width or ball.x < 0: ball.dx *= -1 if ball.y + ball.height > win.height or ball.y < 0: ball.dy *= -1 clock.schedule(animate) layer2 = spryte.Layer() car = spryte.Sprite('car.png', layer2, win.width/2, win.height/2) keyboard = key.KeyStateHandler() win.push_handlers(keyboard) def animate(dt): car.x += (keyboard[key.RIGHT] - keyboard[key.LEFT]) * 200 * dt car.y += (keyboard[key.UP] - keyboard[key.DOWN]) * 200 * dt for i, ball in enumerate(balls): if ball.intersects(car): if ball.width > ball.image.width * 2: # pop! balls[i].delete() balls[i] = spryte.Sprite('ball.png', layer, win.width * random.random(), win.height * random.random(), dx=-50 + 100*random.random(), dy=-50 + 100*random.random()) else: ball.width += 1 ball.height += 1 clock.schedule(animate) while not win.has_exit: clock.tick() win.dispatch_events() win.clear() gl.glPushAttrib(gl.GL_ENABLE_BIT) gl.glEnable(gl.GL_BLEND) gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) layer.draw() layer2.draw() gl.glPopAttrib() fps.draw() win.flip()
import random from pyglet import window, clock, gl, event from pyglet.window import key import spryte win = window.Window(vsync=False) fps = clock.ClockDisplay(color=(1, 1, 1, 1)) layer = spryte.Layer() balls = [] for i in range(200): balls.append(spryte.Sprite('ball.png', layer, win.width * random.random(), win.height * random.random(), dx=-50 + 100*random.random(), dy=-50 + 100*random.random(), dead=False)) def animate(dt): for ball in balls: ball.x += ball.dx * dt ball.y += ball.dy * dt if ball.x > win.width or ball.x < 0: ball.dx *= -1 if ball.y > win.height or ball.y < 0: ball.dy *= -1 clock.schedule(animate) layer2 = spryte.Layer() car = spryte.Sprite('car.png', layer2, win.width/2, win.height/2) keyboard = key.KeyStateHandler() win.push_handlers(keyboard) def animate(dt): car.x += (keyboard[key.RIGHT] - keyboard[key.LEFT]) * 200 * dt car.y += (keyboard[key.UP] - keyboard[key.DOWN]) * 200 * dt for i, ball in enumerate(balls): if ball.intersects(car): if ball.width > ball.image.width * 2: # pop! balls[i].delete() balls[i] = spryte.Sprite('ball.png', layer, win.width * random.random(), win.height * random.random(), dx=-50 + 100*random.random(), dy=-50 + 100*random.random()) else: ball.width += 1 ball.height += 1 clock.schedule(animate) while not win.has_exit: clock.tick() win.dispatch_events() win.clear() gl.glPushAttrib(gl.GL_ENABLE_BIT) gl.glEnable(gl.GL_BLEND) gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) layer.draw() layer2.draw() gl.glPopAttrib() fps.draw() win.flip()
bsd-3-clause
Python
b77cb1ac7524e76fd1f29ee6c8e214d12d04226f
Improve variable names.
LuminosoInsight/wordfreq
scripts/gen_regex.py
scripts/gen_regex.py
import unicodedata from ftfy import chardata import pathlib from pkg_resources import resource_filename CATEGORIES = [unicodedata.category(chr(i)) for i in range(0x110000)] DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data')) def func_to_regex(accept_func): """ Given a function that returns True or False for a numerical codepoint, return a regex character class accepting the characters resulting in True. Ranges separated only by unassigned characters are merged for efficiency. """ # Where the last range would end if it also included unassigned codepoints. # If we need to add a codepoint right after this point, we extend the # range; otherwise we start a new one. tentative_end = None ranges = [] for codepoint, category in enumerate(CATEGORIES): if accept_func(codepoint): if tentative_end == codepoint - 1: ranges[-1][1] = codepoint else: ranges.append([codepoint, codepoint]) tentative_end = codepoint elif category == 'Cn' and tentative_end == codepoint - 1: tentative_end = codepoint return '[%s]' % ''.join(chr(r[0]) + '-' + chr(r[1]) for r in ranges) def cache_regex_from_func(filename, func): """ Generates a regex from a function that accepts a single unicode character, and caches it in the data path at filename. """ with (DATA_PATH / filename).open(mode='w') as file: file.write(func_to_regex(func)) def _is_emoji_codepoint(i): """ Report whether a numerical codepoint is (likely) an emoji: a Unicode 'So' character (as future-proofed by the ftfy chardata module) but excluding symbols like © and ™ below U+2600 and the replacement character U+FFFD. """ return chardata.CHAR_CLASS_STRING[i] == '3' and i >= 0x2600 and i != 0xfffd def _is_non_punct_codepoint(i): """ Report whether a numerical codepoint is not one of the following classes: - P: punctuation - S: symbols - Z: separators - C: control characters This will classify symbols, including emoji, as punctuation; users that want to accept emoji should add them separately. """ return CATEGORIES[i][0] not in 'PSZC' def _is_combining_mark_codepoint(i): """ Report whether a numerical codepoint is a combining mark (Unicode 'M'). """ return CATEGORIES[i][0] == 'M' if __name__ == '__main__': cache_regex_from_func('emoji.txt', _is_emoji_codepoint) cache_regex_from_func('non_punct.txt', _is_non_punct_codepoint) cache_regex_from_func('combining_mark.txt', _is_combining_mark_codepoint)
import unicodedata from ftfy import chardata import pathlib from pkg_resources import resource_filename CATEGORIES = [unicodedata.category(chr(i)) for i in range(0x110000)] DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data')) def func_to_regex(func): """ Given a function that returns True or False for a numerical codepoint, return a regex character class accepting the characters resulting in True. Ranges separated only by unassigned characters are merged for efficiency. """ # Where the last range would end if it also included unassigned codepoints. # If we need to add a codepoint right after this point, we extend the # range; otherwise we start a new one. tentative_end = None ranges = [] for i, cat in enumerate(CATEGORIES): if func(i): if tentative_end == i - 1: ranges[-1][1] = i else: ranges.append([i, i]) tentative_end = i elif cat == 'Cn' and tentative_end == i - 1: tentative_end = i return '[%s]' % ''.join(chr(r[0]) + '-' + chr(r[1]) for r in ranges) def cache_regex_from_func(filename, func): """ Generates a regex from a function that accepts a single unicode character, and caches it in the data path at filename. """ with (DATA_PATH / filename).open(mode='w') as file: file.write(func_to_regex(func)) def _is_emoji_codepoint(i): """ Report whether a numerical codepoint is (likely) an emoji: a Unicode 'So' character (as future-proofed by the ftfy chardata module) but excluding symbols like © and ™ below U+2600 and the replacement character U+FFFD. """ return chardata.CHAR_CLASS_STRING[i] == '3' and i >= 0x2600 and i != 0xfffd def _is_non_punct_codepoint(i): """ Report whether a numerical codepoint is not one of the following classes: - P: punctuation - S: symbols - Z: separators - C: control characters This will classify symbols, including emoji, as punctuation; users that want to accept emoji should add them separately. """ return CATEGORIES[i][0] not in 'PSZC' def _is_combining_mark_codepoint(i): """ Report whether a numerical codepoint is a combining mark (Unicode 'M'). """ return CATEGORIES[i][0] == 'M' if __name__ == '__main__': cache_regex_from_func('emoji.txt', _is_emoji_codepoint) cache_regex_from_func('non_punct.txt', _is_non_punct_codepoint) cache_regex_from_func('combining_mark.txt', _is_combining_mark_codepoint)
mit
Python
5ff27451b55cdd03fa7913aee9e0762297341e29
make image printer thingy iterable, and optimize output data
jart/fabulous,jart/fabulous,jart/fabulous
fabulous/image.py
fabulous/image.py
"""Print Images to a 256-Color Terminal """ import sys import fcntl import struct import termios import itertools from PIL import Image as Pills from grapefruit import Color from fabulous.xterm256 import rgb_to_xterm class Image(object): def __init__(self, path, width=None, bgcolor='black'): self.bgcolor = Color.NewFromHtml(bgcolor) self.img = Pills.open(path) self.resize(width) def __str__(self): return "\n".join(self) def __iter__(self): return self.reduce(self.convert()) def resize(self, width=None): (iw, ih) = self.img.size if width is None: width = _term_width() if iw * 2 <= width: return width //= 2 height = int(float(ih) * (float(width) / float(iw))) self.img = self.img.resize((width, height)) def reduce(self, colors): need_reset = False line = [] for color, items in itertools.groupby(colors): if color is None: if need_reset: line.append("\x1b[49m") need_reset = False line.append(' ' * len(list(items))) elif color == "EOL": if need_reset: line.append("\x1b[49m") need_reset = False yield "".join(line) else: line.pop() yield "".join(line) line = [] else: need_reset = True line.append("\x1b[48;5;%dm%s" % (color, ' ' * len(list(items)))) def convert(self): (width, height) = self.img.size pix = self.img.load() for y in xrange(height): for x in xrange(width): rgba = pix[x, y] if len(rgba) == 4 and rgba[3] == 0: yield None elif len(rgba) == 3 or rgba[3] == 255: yield rgb_to_xterm(rgba[:3]) else: color = Color.NewFromRgb(*[c / 255.0 for c in rgba]) yield rgb_to_xterm(color.AlphaBlend(self.bgcolor)) yield "EOL" def _term_width(): call = fcntl.ioctl(0, termios.TIOCGWINSZ, "\000" * 8) height, width = struct.unpack("hhhh", call)[:2] return width
import sys from PIL import Image # from fabulous.ansi import fg from fabulous.test_xterm256 import fg def image(path, resize=None, resize_antialias=None): im = Image.open(path) if resize: im = im.resize(resize) elif resize_antialias: im = im.resize(resize, Image.ANTIALIAS) pix = im.load() (width, height) = im.size for y in xrange(height): for x in xrange(width): color = pix[x, y] if len(color) == 4 and color[3] <= 0.001: s = sys.stdout.write(' ') else: sys.stdout.write(unicode(fg(color, u"\u2588")).encode('utf8')) sys.stdout.write("\n")
apache-2.0
Python
439b977b14b12d42ee886a432f3a4af555d8de10
add storage stuctures
navierula/Research-Fall-2017
minMaxCalc.py
minMaxCalc.py
import pandas as pd # read in dataset xl = pd.ExcelFile("data/130N_Cycles_1-47.xlsx") df = xl.parse("Specimen_RawData_1") df """ This is what the dataset currently looks like - it has 170,101 rows and two columns. The dataset contains data from 47 cycles following an experiment. The output of these experiments form the two columns:<br> - time (seconds) - load (exerted force, in Newtons) My task is to find the local maxima and minima in the dataset, and mark these values in a database. Initially, the database will consist of four columns: time, load, max, and min. It can be modified or condensed later on to fit further requirements. This is the criteria I will use to find the maxima: - write each row in the db to a cache - initialize a flag value to false - if the force in the previous row is smaller than the force in the next row, write the new row to the cache (leave the flag as false) - if the force in the previous row is bigger than the force in the next row, write the new row to cache and mark it as a max cycle (change the flag to true) This is the criteria I will use to find the minima: - write each row in the db to a cache - initialize a flag value to false - if the force in the previous row is bigger than the force in the next row, write the new row to the cache (leave the flag as false) - if the force in the previous row is smaller than the force in the next row, write the new row to the cache and mark it as a min cycle (change the flag to true) """ # append data from time column to list time = [] for item in df.index: time.append(df["Time"][item]) # append data from load column to list load = [] for item in df.index: load.append(df["Load"][item]) # create list of tuples for time and load data = [] for i, j in zip(time, load): data.append((i,j)) # apply algorithm for finding maxima in data max_data = [] for idx, item in enumerate(data): prev = data[idx-1][1] curr = item[1] if prev > curr: max_data.append(item + ("max",)) else: max_data.append(item + ("",)) # apply algorithm for finding minima in data min_data = [] for idx, item in enumerate(max_data): prev = max_data[idx-1][1] curr = item[1] if prev < curr: min_data.append(item + ("min",)) else: min_data.append(item + ("",)) all_data = min_data # count maxima number max_count = 0 for item in all_data: if item[2] == "max": max_count += 1 print(max_count) # count minima number min_count = 0 for item in all_data: if item[3] == "min": min_count += 1 print(min_count) # create db model db = [] # create cache store cache = []
import pandas as pd # read in dataset xl = pd.ExcelFile("data/130N_Cycles_1-47.xlsx") df = xl.parse("Specimen_RawData_1") df """ This is what the dataset currently looks like - it has 170,101 rows and two columns. The dataset contains data from 47 cycles following an experiment. The output of these experiments form the two columns:<br> - time (seconds) - load (exerted force, in Newtons) My task is to find the local maxima and minima in the dataset, and mark these values in a database. Initially, the database will consist of four columns: time, load, max, and min. It can be modified or condensed later on to fit further requirements. This is the criteria I will use to find the maxima: - write each row in the db to a cache - initialize a flag value to false - if the force in the previous row is smaller than the force in the next row, write the new row to the cache (leave the flag as false) - if the force in the previous row is bigger than the force in the next row, write the new row to cache and mark it as a max cycle (change the flag to true) This is the criteria I will use to find the minima: - write each row in the db to a cache - initialize a flag value to false - if the force in the previous row is bigger than the force in the next row, write the new row to the cache (leave the flag as false) - if the force in the previous row is smaller than the force in the next row, write the new row to the cache and mark it as a min cycle (change the flag to true) """ # append data from time column to list time = [] for item in df.index: time.append(df["Time"][item]) # append data from load column to list load = [] for item in df.index: load.append(df["Load"][item]) # create list of tuples for time and load data = [] for i, j in zip(time, load): data.append((i,j)) # apply algorithm for finding maxima in data max_data = [] for idx, item in enumerate(data): prev = data[idx-1][1] curr = item[1] if prev > curr: max_data.append(item + ("max",)) else: max_data.append(item + ("",)) # apply algorithm for finding minima in data min_data = [] for idx, item in enumerate(max_data): prev = max_data[idx-1][1] curr = item[1] if prev < curr: min_data.append(item + ("min",)) else: min_data.append(item + ("",)) all_data = min_data # count maxima number max_count = 0 for item in all_data: if item[2] == "max": max_count += 1 print(max_count) # count minima number min_count = 0 for item in all_data: if item[3] == "min": min_count += 1 print(min_count) df = pd.DataFrame(data, columns=['Time', 'Load', 'Max', 'Min']) df
mit
Python
642cd34041a579fa37ea3790143d79842c7141f3
add implementation for all makers
ghisvail/ismrmrdpy
ismrmrdpy/backend/acquisition.py
ismrmrdpy/backend/acquisition.py
# -*- coding: utf-8 -*- # # Copyright (c) 2014-2015, Ghislain Antony Vaillant # All rights reserved. # # This file is distributed under the BSD License, see the LICENSE file or # checkout the license terms at http://opensource.org/licenses/BSD-2-Clause). from __future__ import absolute_import, division, print_function from .constants import Constants, AcquisitionFlags, DataTypes from .constants import acquisition_header_dtype, ismrmrd_to_numpy_dtypes import numpy def make_header(version=Constants.version, *args, **kwargs): header = numpy.zeros((), dtype=acquisition_header_dtype) header['version'] = version for key in kwargs: if key in acquisition_header_dtype.fields: header[key] = kwargs[key] return header def make_dtype(header): data_dtype = ismrmrd_to_numpy_dtypes[DataTypes.cxfloat] data_shape = (header['active_channels'], header['number_of_samples']) traj_dtype = ismrmrd_to_numpy_dtypes[DataTypes.float] traj_shape = (header['number_of_samples'], header['trajectory_dimensions']) return numpy.dtype([ ('head', acquisition_header_dtype), ('traj', (traj_dtype, traj_shape)), ('data', (data_dtype, data_shape)), ]) def make_array(header=None, *args, **kwargs): header = header or make_header(**kwargs) trajectory = None data = None dtype = make_dtype(header) array = numpy.zeros((), dtype=dtype) array['head'] = header if trajectory is not None: array['traj'] = trajectory if data is not None: array['data'] = data def frombytes(bytestring): pass def set_flags(header, flags=None): pass def clear_flags(header, flags=None): pass def is_flag_set(header, flag): pass def _verify_flags(flags): pass def set_channels(header, channels=None): pass def clear_channels(header, channels=None): pass def is_channel_set(header, channel): pass def _verify_channels(flags): pass
# -*- coding: utf-8 -*- # # Copyright (c) 2014-2015, Ghislain Antony Vaillant # All rights reserved. # # This file is distributed under the BSD License, see the LICENSE file or # checkout the license terms at http://opensource.org/licenses/BSD-2-Clause). from __future__ import absolute_import, division, print_function from .constants import Constants, AcquisitionFlags, acquisition_header_dtype def make_header(*args, **kwargs): pass def make_dtype(header): pass def make_array(header=None, *args, **kwargs): pass def frombytes(bytestring): pass def set_flags(header, flags=None): pass def clear_flags(header, flags=None): pass def is_flag_set(header, flag): pass def _verify_flags(flags): pass def set_channels(header, channels=None): pass def clear_channels(header, channels=None): pass def is_channel_set(header, channel): pass def _verify_channels(flags): pass
bsd-2-clause
Python
59ba038f117744ca0c5fe8c24b97b64830f8e7ec
Put bulk data into db
bschoenfeld/va-court-scraper,bschoenfeld/va-court-scraper
court_bulk_collector.py
court_bulk_collector.py
from courtreader import readers from courtutils.logger import get_logger from datetime import datetime, timedelta import pymongo import os import sys import time # configure logging log = get_logger() log.info('Worker running') def get_db_connection(): return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search'] # Fill in cases court_reader = None current_court_fips = None db = get_db_connection() court_fips = '013' case_type = 'criminal' year = 2015 reader = readers.CircuitCourtReader() reader.connect() def get_cases_on_date(dateStr): log.info('Getting cases on ' + dateStr) cases = reader.get_cases_by_date(court_fips, case_type, dateStr) for case in cases: case['details'] = reader.get_case_details_by_number( \ court_fips, \ case_type, \ case['case_number']) case['details_fetched'] = datetime.utcnow() case['court_fips'] = court_fips print case['case_number'], case['defendant'], case['details']['Filed'] db.circuit_court_detailed_cases.find_one_and_replace({ 'court_fips': case['court_fips'], 'case_number': case['case_number'] }, case, upsert=True) date = datetime(year, 12, 31) while date.year == year: date_search = { 'court_fips': court_fips, 'case_type': case_type, 'date': date } dateStr = date.strftime('%m/%d/%Y') if db.circuit_court_dates_collected.find_one(date_search) != None: log.info(dateStr + ' already searched') else: get_cases_on_date(dateStr) db.circuit_court_dates_collected.insert_one(date_search) date += timedelta(days=-1) reader.log_off()
from courtreader import readers from courtutils.logger import get_logger from datetime import datetime, timedelta import pymongo import os import sys import time # configure logging log = get_logger() log.info('Worker running') def get_db_connection(): return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search'] # Fill in cases court_reader = None current_court_fips = None db = get_db_connection() court_fips = '013' case_type = 'R' year = 2015 reader = readers.CircuitCourtReader() reader.connect() date = datetime(year, 12, 31) while date.year == year: dateStr = date.strftime('%m/%d/%Y') log.info('Getting cases on ' + dateStr) cases = reader.get_cases_by_date(court_fips, case_type, dateStr) for case in cases: case['details'] = reader.get_case_details_by_number( \ court_fips, \ case_type, \ case['case_number']) case['details_fetched'] = datetime.utcnow() print case['case_number'], case['defendant'], case['details']['Filed'] break date += timedelta(days=-1) reader.log_off()
mit
Python
e3a9db58f03eb73635a94ed6249e3c2a308f4ad0
Fix some typos found in staging.
fedora-infra/fedmsg-genacls
fedmsg_genacls.py
fedmsg_genacls.py
# -*- coding: utf-8 -*- """ A fedmsg consumer that listens to pkgdb messages to update gitosis acls Authors: Janez Nemanič <[email protected]> Ralph Bean <[email protected]> """ import pprint import subprocess import os import fedmsg.consumers import moksha.hub.reactor class GenACLsConsumer(fedmsg.consumers.FedmsgConsumer): # Really, we want to use this specific topic to listen to. topic = 'org.fedoraproject.prod.pkgdb.acl.update' # But for testing, we'll just listen to all topics with this: #topic = '*' config_key = 'genacls.consumer.enabled' def __init__(self, hub): super(GenACLsConsumer, self).__init__(hub) # This is required. It is the number of seconds that we should wait # until we ultimately act on a pkgdb message. self.delay = self.hub.config['genacls.consumer.delay'] # We use this to manage our state self.queued_messages = [] def consume(self, msg): msg = msg['body'] self.log.info("Got a message %r" % msg['topic']) def delayed_consume(): if self.queued_messages: try: self.action(self.queued_messages) finally: # Empty our list at the end of the day. self.queued_messages = [] else: self.log.debug("Woke up, but there were no messages.") self.queued_messages.append(msg) moksha.hub.reactor.reactor.callLater(self.delay, delayed_consume) def action(self, messages): self.log.debug("Acting on %s" % pprint.pformat(messages)) # This script and the UID/GID are found in our puppet repo. # The fedmsg user must be given passwordless sudo as the gen-acls user # for this to work correctly. command = '/usr/local/bin/genacls.sh' genacls_UID = 417 genacls_GID = 417 def change_subprocess_id(): os.setuid(genacls_UID) os.setgid(genacls_GID) return_code = subprocess.Popen( args=command, preexec_fn=change_subprocess_id) if return_code == 0: self.log.info("%r successful" % command) else: self.log.error("%r exited with %r" % (command, return_code))
# -*- coding: utf-8 -*- """ A fedmsg consumer that listens to pkgdb messages to update gitosis acls Authors: Janez Nemanič <[email protected]> Ralph Bean <[email protected]> """ import pprint import subprocess import os import fedmsg.consumers import moksha.hub.reactor class GenACLsConsumer(fedmsg.consumers.FedmsgConsumer): # Really, we want to use this specific topic to listen to. topic = 'org.fedoraproject.prod.pkgdb.acl.update' # But for testing, we'll just listen to all topics with this: #topic = '*' config_key = 'genacls.consumer.enabled' def __init__(self, hub): super(GenACLsConsumer, self).__init__(hub) # This is required. It is the number of seconds that we should wait # until we ultimately act on a pkgdb message. self.delay = self.hub.config['genacls.consumer.delay'] # We use this to manage our state self.queued_messages = [] def consume(self, msg): msg = msg['body'] self.log.info("Got a message %r" % msg['topic']) def delayed_consume(): if self.queued_messages: try: self.action(self.queued_messages) finally: # Empty our list at the end of the day. self.queued_messages = [] else: self.log.debug("Woke up, but there were no messages.") self.queued_messages.append(msg) moksha.hub.reactor.reactor.callLater(self.delay, delayed_consume) def action(self, messages): self.log.debug("Acting on %r" % pprint.pformat(messages)) command = '/usr/local/bin/genacls.sh' genacls_UID = 417 genacls_GID = 417 def change_subprocess_id(): os.setuid(user_UID) os.setgid(user_GID) return_code = subprocess.Popen( args=command, preexec_fn=change_subprocess_id) if return_code == 0: self.log.info("%r successful" % command) else: self.log.error("%r exited with %r" % (command, return_code))
lgpl-2.1
Python
d6342967598ae7fa822592b42e0f85de2beaf916
use constants
freedesktop-unofficial-mirror/telepathy__telepathy-rakia,freedesktop-unofficial-mirror/telepathy__telepathy-rakia,freedesktop-unofficial-mirror/telepathy__telepathy-rakia
tests/twisted/test-self-alias.py
tests/twisted/test-self-alias.py
# # Test alias setting for the self handle # from sofiatest import exec_test import constants as cs import dbus def test(q, bus, conn, sip_proxy): conn.Connect() q.expect('dbus-signal', signal='StatusChanged', args=[0, 1]) self_handle = conn.GetSelfHandle() default_alias = conn.Aliasing.GetAliases([self_handle])[self_handle] conn.Aliasing.SetAliases({self_handle: '[email protected]'}) event = q.expect('dbus-signal', signal='AliasesChanged', args=[[(self_handle, u'[email protected]')]]) handle = conn.RequestHandles(1, ['sip:[email protected]'])[0] assert cs.CONN_IFACE_ALIASING in \ conn.Properties.Get(cs.CONN_IFACE_CONTACTS, "ContactAttributeInterfaces") attrs = conn.Contacts.GetContactAttributes([self_handle, handle], [cs.CONN_IFACE_ALIASING], False) assert cs.CONN_IFACE_ALIASING + "/alias" in attrs[self_handle] assert attrs[self_handle][cs.CONN_IFACE_ALIASING + "/alias"] == u'[email protected]' conn.RequestChannel(cs.CHANNEL_TYPE_TEXT, 1, handle, True) event = q.expect('dbus-signal', signal='NewChannel') text_iface = dbus.Interface(bus.get_object(conn.bus_name, event.args[0]), cs.CHANNEL_TYPE_TEXT) text_iface.Send(0, 'Check the display name in From') event = q.expect('sip-message') self_uri = conn.InspectHandles(1, [self_handle])[0] from_header = event.sip_message.headers['from'][0] assert from_header.startswith('"[email protected]" <' + self_uri + '>'), from_header # Test setting of the default alias conn.Aliasing.SetAliases({self_handle: default_alias}) text_iface.Send(0, 'The display name should be missing in From') event = q.expect('sip-message') from_header = event.sip_message.headers['from'][0] assert from_header.startswith('<' + self_uri + '>'), from_header # Test if escaping and whitespace normalization works conn.Aliasing.SetAliases({self_handle: 'foo " bar \\\r\n baz\t'}) text_iface.Send(0, 'Check display name escaping in From') event = q.expect('sip-message') from_header = event.sip_message.headers['from'][0] assert from_header.startswith(r'"foo \" bar \\ baz " <' + self_uri + '>'), from_header if __name__ == '__main__': exec_test(test)
# # Test alias setting for the self handle # from sofiatest import exec_test from servicetest import tp_name_prefix import dbus TEXT_TYPE = tp_name_prefix + '.Channel.Type.Text' ALIASING_INTERFACE = tp_name_prefix + '.Connection.Interface.Aliasing' CONTACTS_INTERFACE = tp_name_prefix + '.Connection.Interface.Contacts' def test(q, bus, conn, sip_proxy): conn.Connect() q.expect('dbus-signal', signal='StatusChanged', args=[0, 1]) self_handle = conn.GetSelfHandle() default_alias = conn.Aliasing.GetAliases([self_handle])[self_handle] conn.Aliasing.SetAliases({self_handle: '[email protected]'}) event = q.expect('dbus-signal', signal='AliasesChanged', args=[[(self_handle, u'[email protected]')]]) handle = conn.RequestHandles(1, ['sip:[email protected]'])[0] assert ALIASING_INTERFACE in \ conn.Properties.Get(CONTACTS_INTERFACE, "ContactAttributeInterfaces") attrs = conn.Contacts.GetContactAttributes([self_handle, handle], [ALIASING_INTERFACE], False) assert ALIASING_INTERFACE + "/alias" in attrs[self_handle] assert attrs[self_handle][ALIASING_INTERFACE + "/alias"] == u'[email protected]' conn.RequestChannel(TEXT_TYPE, 1, handle, True) event = q.expect('dbus-signal', signal='NewChannel') text_iface = dbus.Interface(bus.get_object(conn.bus_name, event.args[0]), TEXT_TYPE) text_iface.Send(0, 'Check the display name in From') event = q.expect('sip-message') self_uri = conn.InspectHandles(1, [self_handle])[0] from_header = event.sip_message.headers['from'][0] assert from_header.startswith('"[email protected]" <' + self_uri + '>'), from_header # Test setting of the default alias conn.Aliasing.SetAliases({self_handle: default_alias}) text_iface.Send(0, 'The display name should be missing in From') event = q.expect('sip-message') from_header = event.sip_message.headers['from'][0] assert from_header.startswith('<' + self_uri + '>'), from_header # Test if escaping and whitespace normalization works conn.Aliasing.SetAliases({self_handle: 'foo " bar \\\r\n baz\t'}) text_iface.Send(0, 'Check display name escaping in From') event = q.expect('sip-message') from_header = event.sip_message.headers['from'][0] assert from_header.startswith(r'"foo \" bar \\ baz " <' + self_uri + '>'), from_header if __name__ == '__main__': exec_test(test)
lgpl-2.1
Python
0a57bcc2faca88d0527bb1f14dae2b0b9b5168f2
bump filer version to 0.9pbs.54
pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer
filer/__init__.py
filer/__init__.py
#-*- coding: utf-8 -*- # version string following pep-0396 and pep-0386 __version__ = '0.9pbs.54' # pragma: nocover
#-*- coding: utf-8 -*- # version string following pep-0396 and pep-0386 __version__ = '0.9pbs.53' # pragma: nocover
bsd-3-clause
Python
006cbb88f2a06cd1411f88126ccf4a43121aa858
Update app startup process with new servicemanager and websocket communication.
supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer
app/main.py
app/main.py
""" The main module for HomePiServer. Initializes SocketIO, ServiceManager, NavigationChannel, View Manager. """ import signal from threading import Thread from gevent import monkey from flask import Flask from flask_socketio import SocketIO from .controllers import CONTROLLERS from .core.logger import configure_logging from .core.websocket_manager import WebSocketManager from .core.servicemanager import ServiceManager from .services import SERVICES monkey.patch_all() class HomePiServer(object): """ Encapsulates the entire server. """ def __init__(self, config): params = { "template_folder": "../templates", "static_folder": "../static" } self.flask_app = Flask(__name__, **params) self.flask_app.config.from_object(config) self.register_blueprints(self.flask_app, CONTROLLERS) self.app = SocketIO(self.flask_app) self.socket_manager = WebSocketManager(self.app) self.service_manager = ServiceManager(SERVICES, self.socket_manager) configure_logging(self.flask_app) self.start_services() def start_services(self): """Starts self.service_manager.start() on a new thread.""" self.service_thread = Thread(target=self.service_manager.start).start() @staticmethod def register_blueprints(app, params): """ Registers all the blueprints in controllers list. Args: app: Flask app to register the blueprint with. controllers: List like: [(prefix, blueprint), ...] """ for prefix, controller in params: app.register_blueprint(controller, url_prefix=prefix) def shutdown(self): pass def setup_signals(app): """ Listen for SIGTERM and SIGINIT and calls app.shutdown()""" def make_new_handler(prev_handler_func): def new_handler(var1, var2): app.shutdown() if prev_handler_func: prev_handler_func(var1, var2) return new_handler for sig in (signal.SIGTERM, signal.SIGINT): prev_handler = signal.getsignal(sig) signal.signal(sig, make_new_handler(prev_handler)) def create_app(config=None): """ Returns a new instance of HomePiServer.""" if config is None: import app.config config = app.config app = HomePiServer(config) setup_signals(app) return app.flask_app, app.app
""" The main module for HomePiServer. Initializes SocketIO, ServiceManager, NavigationChannel, View Manager. """ import signal from threading import Thread from gevent import monkey from flask import Flask from flask_socketio import SocketIO from .controllers import CONTROLLERS from .core.socketchannel import NavigationChannel from .core.logger import configure_logging from .services import ServiceManager, SERVICES from .views import ViewManager monkey.patch_all() class HomePiServer(object): """ Encapsulates the entire server. """ def __init__(self, config): params = { "template_folder": "../templates", "static_folder": "../static" } self.flask_app = Flask(__name__, **params) self.flask_app.config.from_object(config) self.register_blueprints(self.flask_app, CONTROLLERS) self.app = SocketIO(self.flask_app) self.nav_channel = NavigationChannel("/navigation", self.app) self.app.on_namespace(self.nav_channel) self.view_manager = ViewManager(self.nav_channel) self.nav_channel.display = self.view_manager self.service_manager = ServiceManager(SERVICES, self.view_manager) configure_logging(self.flask_app) self.start_services() def start_services(self): """Starts self.service_manager.start() on a new thread.""" self.service_thread = Thread(target=self.service_manager.start).start() @staticmethod def register_blueprints(app, params): """ Registers all the blueprints in controllers list. Args: app: Flask app to register the blueprint with. controllers: List like: [(prefix, blueprint), ...] """ for prefix, controller in params: app.register_blueprint(controller, url_prefix=prefix) def shutdown(self): pass def setup_signals(app): """ Listen for SIGTERM and SIGINIT and calls app.shutdown()""" def make_new_handler(prev_handler_func): def new_handler(var1, var2): app.shutdown() if prev_handler_func: prev_handler_func(var1, var2) return new_handler for sig in (signal.SIGTERM, signal.SIGINT): prev_handler = signal.getsignal(sig) signal.signal(sig, make_new_handler(prev_handler)) def create_app(config=None): """ Returns a new instance of HomePiServer.""" if config is None: import app.config config = app.config app = HomePiServer(config) setup_signals(app) return app.flask_app, app.app
mit
Python
a0a92e237ca91dc8f0318a27dfeec9b9c8e95de5
Add utility to guess livelock file for an owner
grnet/snf-ganeti,mbakke/ganeti,andir/ganeti,ganeti/ganeti,dimara/ganeti,mbakke/ganeti,grnet/snf-ganeti,ganeti-github-testing/ganeti-test-1,leshchevds/ganeti,leshchevds/ganeti,yiannist/ganeti,yiannist/ganeti,yiannist/ganeti,apyrgio/ganeti,onponomarev/ganeti,mbakke/ganeti,andir/ganeti,ganeti/ganeti,dimara/ganeti,andir/ganeti,apyrgio/ganeti,bitemyapp/ganeti,ganeti/ganeti,onponomarev/ganeti,bitemyapp/ganeti,ganeti-github-testing/ganeti-test-1,leshchevds/ganeti
lib/utils/livelock.py
lib/utils/livelock.py
# # # Copyright (C) 2014 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Lockfiles to prove liveliness When requesting resources, like locks, from wconfd, requesters have to provide the name of a file they own an exclusive lock on, to prove that they are still alive. Provide methods to obtain such a file. """ import fcntl import os import struct import time from ganeti.utils.algo import NiceSort from ganeti import pathutils class LiveLock(object): """Utility for a lockfile needed to request resources from WconfD. """ def __init__(self, name=None): if name is None: name = "pid%d_" % os.getpid() # to avoid reusing existing lock files, extend name # by the current time name = "%s_%d" % (name, int(time.time())) fname = os.path.join(pathutils.LIVELOCK_DIR, name) self.lockfile = open(fname, 'w') fcntl.fcntl(self.lockfile, fcntl.F_SETLKW, struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)) def close(self): """Close the lockfile and clean it up. """ self.lockfile.close() os.remove(self.lockfile.name) def GuessLockfileFor(name): """For a given name, take the latest file matching. @return: the file with the latest name matching the given prefix in LIVELOCK_DIR, or the plain name, if none exists. """ lockfiles = filter(lambda n: n.startswith(name), os.listdir(pathutils.LIVELOCK_DIR)) if len(lockfiles) > 0: lockfile = NiceSort(lockfiles)[-1] else: lockfile = name return os.path.join(pathutils.LIVELOCK_DIR, lockfile)
# # # Copyright (C) 2014 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Lockfiles to prove liveliness When requesting resources, like locks, from wconfd, requesters have to provide the name of a file they own an exclusive lock on, to prove that they are still alive. Provide methods to obtain such a file. """ import fcntl import os import struct import time from ganeti import pathutils class LiveLock(object): """Utility for a lockfile needed to request resources from WconfD. """ def __init__(self, name=None): if name is None: name = "pid%d_" % os.getpid() # to avoid reusing existing lock files, extend name # by the current time name = "%s_%d" % (name, int(time.time())) fname = os.path.join(pathutils.LIVELOCK_DIR, name) self.lockfile = open(fname, 'w') fcntl.fcntl(self.lockfile, fcntl.F_SETLKW, struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)) def close(self): """Close the lockfile and clean it up. """ self.lockfile.close() os.remove(self.lockfile.name)
bsd-2-clause
Python
a281fd3c49b86012fd370ae82df19525af89ff1c
Disable swift test
swift-lang/swift-e-lab,Parsl/parsl,swift-lang/swift-e-lab,Parsl/parsl,Parsl/parsl,Parsl/parsl
parsl/tests/test_swift.py
parsl/tests/test_swift.py
import pytest import parsl from parsl import * parsl.set_stream_logger() from parsl.executors.swift_t import * def foo(x, y): return x * y def slow_foo(x, y): import time time.sleep(x) return x * y def bad_foo(x, y): time.sleep(x) return x * y @pytest.mark.skip('fails intermittently') @pytest.mark.local def test_simple(): print("Start") tex = TurbineExecutor() x = tex.submit(foo, 5, 10) print("Got: ", x) print("X result: ", x.result()) assert x.result() == 50, "X != 50" print("done") @pytest.mark.local @pytest.mark.skip('fails intermittently') def test_slow(): futs = {} tex = TurbineExecutor() for i in range(0, 3): futs[i] = tex.submit(slow_foo, 1, 2) total = sum([futs[i].result(timeout=10) for i in futs]) assert total == 6, "expected 6, got {}".format(total) @pytest.mark.local @pytest.mark.skip('fails intermittently') def test_except(): with pytest.raises(NameError): tex = TurbineExecutor() x = tex.submit(bad_foo, 5, 10) x.result() if __name__ == "__main__": # test_simple() # test_slow() test_except() print("Done")
import pytest import parsl from parsl import * parsl.set_stream_logger() from parsl.executors.swift_t import * def foo(x, y): return x * y def slow_foo(x, y): import time time.sleep(x) return x * y def bad_foo(x, y): time.sleep(x) return x * y @pytest.mark.local def test_simple(): print("Start") tex = TurbineExecutor() x = tex.submit(foo, 5, 10) print("Got: ", x) print("X result: ", x.result()) assert x.result() == 50, "X != 50" print("done") @pytest.mark.local def test_slow(): futs = {} tex = TurbineExecutor() for i in range(0, 3): futs[i] = tex.submit(slow_foo, 1, 2) total = sum([futs[i].result(timeout=10) for i in futs]) assert total == 6, "expected 6, got {}".format(total) @pytest.mark.local def test_except(): with pytest.raises(NameError): tex = TurbineExecutor() x = tex.submit(bad_foo, 5, 10) x.result() if __name__ == "__main__": # test_simple() # test_slow() test_except() print("Done")
apache-2.0
Python
e9980d7498c0889ecd795a4d2977c1893e0ad7e3
comment on md5 usage
JsonChiu/openrunlog,JsonChiu/openrunlog,JsonChiu/openrunlog,JsonChiu/openrunlog
app/util.py
app/util.py
import bcrypt import md5 def hash_pwd(password): return bcrypt.hashpw(password, bcrypt.gensalt()) def check_pwd(password, hashed): return bcrypt.hashpw(password, hashed) == hashed def validate_time(time): return True # XXX md5 module deprecated, use hashlib def gravatar_html(email): h = md5.md5(email.lower()).hexdigest() html = '<img src="http://www.gravatar.com/avatar/%s.jpg?s=15" />' % h return html
import bcrypt import md5 def hash_pwd(password): return bcrypt.hashpw(password, bcrypt.gensalt()) def check_pwd(password, hashed): return bcrypt.hashpw(password, hashed) == hashed def validate_time(time): return True def gravatar_html(email): h = md5.md5(email.lower()).hexdigest() html = '<img src="http://www.gravatar.com/avatar/%s.jpg?s=15" />' % h return html
bsd-2-clause
Python
1d443973e8db6265268dd2afe6b6ad7748526335
Add _read_test_file() function.
rossant/ipymd,bollwyvl/ipymd
ipymd/utils.py
ipymd/utils.py
# -*- coding: utf-8 -*- """Utils""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import os import os.path as op import difflib from .six import exec_ #------------------------------------------------------------------------------ # Utils #------------------------------------------------------------------------------ def _script_dir(): return op.dirname(op.realpath(__file__)) def _test_file_path(filename): """Return the full path to an example filename in the 'examples' directory.""" return op.realpath(op.join(_script_dir(), '../examples', filename)) def _exec_test_file(filename): """Return the 'output' object defined in a Python file.""" path = _test_file_path(filename) with open(path, 'r') as f: contents = f.read() ns = {} exec_(contents, ns) return ns.get('output', None) def _read_test_file(filename): """Read a test file.""" path = _test_file_path(filename) with open(path, 'r') as f: return f.read() def _diff_removed_lines(diff): return ''.join(x[2:] for x in diff if x.startswith('- ')) def _diff(text_0, text_1): """Return a diff between two strings.""" diff = difflib.ndiff(text_0.splitlines(), text_1.splitlines()) return _diff_removed_lines(diff)
# -*- coding: utf-8 -*- """Utils""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import os import os.path as op import difflib from .six import exec_ #------------------------------------------------------------------------------ # Utils #------------------------------------------------------------------------------ def _script_dir(): return op.dirname(op.realpath(__file__)) def _test_file_path(filename): """Return the full path to an example filename in the 'examples' directory.""" return op.realpath(op.join(_script_dir(), '../examples', filename)) def _exec_test_file(filename): """Return the 'output' object defined in a Python file.""" path = _test_file_path(filename) with open(path, 'r') as f: contents = f.read() ns = {} exec_(contents, ns) return ns.get('output', None) def _diff_removed_lines(diff): return ''.join(x[2:] for x in diff if x.startswith('- ')) def _diff(text_0, text_1): """Return a diff between two strings.""" diff = difflib.ndiff(text_0.splitlines(), text_1.splitlines()) return _diff_removed_lines(diff)
bsd-3-clause
Python
95ad2c65fb1b4aacea668c8d9474183b4f107d56
Test with multi args
thedrow/paver,cecedille1/paver,phargogh/paver,cecedille1/paver,nikolas/paver
paver/tests/test_shell.py
paver/tests/test_shell.py
import sys from paver.deps.six import b from mock import patch, Mock from paver import easy from subprocess import PIPE, STDOUT @patch('subprocess.Popen') def test_sh_raises_BuildFailure(popen): popen.return_value.returncode = 1 popen.return_value.communicate.return_value = [b('some stderr')] try: easy.sh('foo') except easy.BuildFailure: e = sys.exc_info()[1] args = e.args assert args == ('Subprocess return code: 1', ) else: assert False, 'Failed to raise BuildFailure' assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert 'stdout' not in popen.call_args[1] @patch('paver.shell.error') @patch('subprocess.Popen') def test_sh_with_capture_raises_BuildFailure(popen, error): popen.return_value.returncode = 1 popen.return_value.communicate.return_value = [b('some stderr')] try: easy.sh('foo', capture=True) except easy.BuildFailure: e = sys.exc_info()[1] args = e.args assert args == ('Subprocess return code: 1', ) else: assert False, 'Failed to raise BuildFailure' assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert popen.call_args[1]['stdout'] == PIPE assert popen.call_args[1]['stderr'] == STDOUT assert error.called assert error.call_args == (('some stderr', ), {}) @patch('subprocess.Popen') def test_sh_ignores_error(popen): popen.return_value.communicate.return_value = [b('some stderr')] popen.return_value.returncode = 1 easy.sh('foo', ignore_error=True) assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert 'stdout' not in popen.call_args[1] @patch('subprocess.Popen') def test_sh_ignores_error_with_capture(popen): popen.return_value.returncode = 1 popen.return_value.communicate.return_value = [b('some stderr')] easy.sh('foo', capture=True, ignore_error=True) assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert popen.call_args[1]['stdout'] == PIPE assert popen.call_args[1]['stderr'] == STDOUT @patch('subprocess.Popen') def test_sh_with_multi_command(popen): popen.return_value.returncode = 0 easy.sh(['foo', ' bar', 'fi"zz']) assert popen.called assert popen.call_args[0][0] == "foo ' bar' 'fi\"zz'" assert popen.call_args[1]['shell'] == True
import sys from paver.deps.six import b from mock import patch, Mock from paver import easy from subprocess import PIPE, STDOUT @patch('subprocess.Popen') def test_sh_raises_BuildFailure(popen): popen.return_value.returncode = 1 popen.return_value.communicate.return_value = [b('some stderr')] try: easy.sh('foo') except easy.BuildFailure: e = sys.exc_info()[1] args = e.args assert args == ('Subprocess return code: 1', ) else: assert False, 'Failed to raise BuildFailure' assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert 'stdout' not in popen.call_args[1] @patch('paver.easy.error') @patch('subprocess.Popen') def test_sh_with_capture_raises_BuildFailure(popen, error): popen.return_value.returncode = 1 popen.return_value.communicate.return_value = [b('some stderr')] try: easy.sh('foo', capture=True) except easy.BuildFailure: e = sys.exc_info()[1] args = e.args assert args == ('Subprocess return code: 1', ) else: assert False, 'Failed to raise BuildFailure' assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert popen.call_args[1]['stdout'] == PIPE assert popen.call_args[1]['stderr'] == STDOUT assert error.called assert error.call_args == (('some stderr', ), {}) @patch('subprocess.Popen') def test_sh_ignores_error(popen): popen.return_value.communicate.return_value = [b('some stderr')] popen.return_value.returncode = 1 easy.sh('foo', ignore_error=True) assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert 'stdout' not in popen.call_args[1] @patch('subprocess.Popen') def test_sh_ignores_error_with_capture(popen): popen.return_value.returncode = 1 popen.return_value.communicate.return_value = [b('some stderr')] easy.sh('foo', capture=True, ignore_error=True) assert popen.called assert popen.call_args[0][0] == 'foo' assert popen.call_args[1]['shell'] == True assert popen.call_args[1]['stdout'] == PIPE assert popen.call_args[1]['stderr'] == STDOUT
bsd-3-clause
Python
d329787dc6f862e749ca6f490a155186b48553a7
Fix one more bug; interpreter still broken
ids1024/isbfc
bfinterp.py
bfinterp.py
import sys import collections import getch from parser import parse, optimize from parser import OUTPUT, INPUT, LOOPSTART, LOOPEND, MOVE from parser import ADD, SET, MULCOPY, SCAN BUFSIZE = 8192 def interp(code): tokens = parse(code) tokens = optimize(tokens) i = 0 loops = [] mem = bytearray(BUFSIZE) cur = int(BUFSIZE/2) skiploop = False while i < len(tokens)-1: #print("%d:%s cur:%d mem[cur]:%d" % (i, code[i], cur, mem[cur])) #print(loops) token, value = tokens[i] if skiploop: if token == LOOPEND: skiploop = False i += 1 continue if token == OUTPUT: print(chr(mem[cur]), end='') elif token == INPUT: mem[cur] == ord(getch.getch()) elif token == MOVE: cur += value elif token == ADD: offset, add = value newval = mem[cur+offset] + add newval %= 256 mem[cur+offset] = newval elif token == SET: offset, val = value mem[cur+offset] = val elif token == MULCOPY: src, dest, mul = value newval = mem[cur+dest] + mem[cur+src] * mul newval %= 256 mem[cur+dest] = newval elif token == SCAN: while mem[cur] != 0: cur += value elif token == LOOPSTART: if mem[cur]: loops.append(i) else: skiploop = True elif token == LOOPEND: if mem[cur] == 0: loops.pop() else: i = loops[-1] else: raise ValueError('Token not handled') i += 1 if __name__ == '__main__': with open(sys.argv[1]) as bffile: interp(bffile.read())
import sys import collections import getch from parser import parse, optimize from parser import OUTPUT, INPUT, LOOPSTART, LOOPEND, MOVE from parser import ADD, SET, MULCOPY, SCAN BUFSIZE = 8192 def interp(code): tokens = parse(code) tokens = optimize(tokens) i = 0 loops = [] mem = bytearray(BUFSIZE) cur = int(BUFSIZE/2) skiploop = False while i < len(tokens)-1: #print("%d:%s cur:%d mem[cur]:%d" % (i, code[i], cur, mem[cur])) #print(loops) token, value = tokens[i] if skiploop: if token == LOOPEND: skiploop = False continue if token == OUTPUT: print(chr(mem[cur]), end='') elif token == INPUT: mem[cur] == ord(getch.getch()) elif token == MOVE: cur += value elif token == ADD: offset, add = value newval = mem[cur+offset] + add newval %= 256 mem[cur+offset] = newval elif token == SET: offset, val = value mem[cur+offset] = val elif token == MULCOPY: src, dest, mul = value newval = mem[cur+dest] + mem[cur+src] * mul newval %= 256 mem[cur+dest] = newval elif token == SCAN: while mem[cur] != 0: cur += value elif token == LOOPSTART: if mem[cur]: loops.append(i) else: skiploop = True elif token == LOOPEND: if mem[cur] == 0: loops.pop() else: i = loops[-1] else: raise ValueError('Token not handled') i += 1 if __name__ == '__main__': with open(sys.argv[1]) as bffile: interp(bffile.read())
mit
Python
4cd44a177147569767a8f53aed67cbee0f759667
bump verion to 3.0.0-alpha
widdowquinn/pyani
pyani/__init__.py
pyani/__init__.py
# python package version # should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py """Module with main code for pyani application/package.""" __version__ = '0.3.0-alpha'
# python package version # should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py """Module with main code for pyani application/package.""" __version__ = '0.3.0.dev'
mit
Python
3066837091621720be0b0338d12ed66fd24a86b1
bump version
davidkuep/pyiso,emunsing/pyiso,emunsing/pyiso
pyiso/__init__.py
pyiso/__init__.py
import imp import os.path __version__ = '0.2.7' BALANCING_AUTHORITIES = { 'BPA': {'module': 'bpa', 'class': 'BPAClient'}, 'CAISO': {'module': 'caiso', 'class': 'CAISOClient'}, 'ERCOT': {'module': 'ercot', 'class': 'ERCOTClient'}, 'ISONE': {'module': 'isone', 'class': 'ISONEClient'}, 'MISO': {'module': 'miso', 'class': 'MISOClient'}, 'NEVP': {'module': 'nvenergy', 'class': 'NVEnergyClient'}, 'NYISO': {'module': 'nyiso', 'class': 'NYISOClient'}, 'PJM': {'module': 'pjm', 'class': 'PJMClient'}, 'SPPC': {'module': 'nvenergy', 'class': 'NVEnergyClient'}, 'SPP': {'module': 'spp', 'class': 'SPPClient'}, } def client_factory(client_name, **kwargs): """Return a client for an external data set""" # set up dir_name = os.path.dirname(os.path.abspath(__file__)) error_msg = 'No client found for name %s' % client_name client_key = client_name.upper() # find client try: client_vals = BALANCING_AUTHORITIES[client_key] module_name = client_vals['module'] class_name = client_vals['class'] except KeyError: raise ValueError(error_msg) # find module try: fp, pathname, description = imp.find_module(module_name, [dir_name]) except ImportError: raise ValueError(error_msg) # load try: mod = imp.load_module(module_name, fp, pathname, description) finally: # Since we may exit via an exception, close fp explicitly. if fp: fp.close() # instantiate class try: client_inst = getattr(mod, class_name)(**kwargs) except AttributeError: raise ValueError(error_msg) # set name client_inst.NAME = client_name return client_inst
import imp import os.path __version__ = '0.2.6' BALANCING_AUTHORITIES = { 'BPA': {'module': 'bpa', 'class': 'BPAClient'}, 'CAISO': {'module': 'caiso', 'class': 'CAISOClient'}, 'ERCOT': {'module': 'ercot', 'class': 'ERCOTClient'}, 'ISONE': {'module': 'isone', 'class': 'ISONEClient'}, 'MISO': {'module': 'miso', 'class': 'MISOClient'}, 'NEVP': {'module': 'nvenergy', 'class': 'NVEnergyClient'}, 'NYISO': {'module': 'nyiso', 'class': 'NYISOClient'}, 'PJM': {'module': 'pjm', 'class': 'PJMClient'}, 'SPPC': {'module': 'nvenergy', 'class': 'NVEnergyClient'}, 'SPP': {'module': 'spp', 'class': 'SPPClient'}, } def client_factory(client_name, **kwargs): """Return a client for an external data set""" # set up dir_name = os.path.dirname(os.path.abspath(__file__)) error_msg = 'No client found for name %s' % client_name client_key = client_name.upper() # find client try: client_vals = BALANCING_AUTHORITIES[client_key] module_name = client_vals['module'] class_name = client_vals['class'] except KeyError: raise ValueError(error_msg) # find module try: fp, pathname, description = imp.find_module(module_name, [dir_name]) except ImportError: raise ValueError(error_msg) # load try: mod = imp.load_module(module_name, fp, pathname, description) finally: # Since we may exit via an exception, close fp explicitly. if fp: fp.close() # instantiate class try: client_inst = getattr(mod, class_name)(**kwargs) except AttributeError: raise ValueError(error_msg) # set name client_inst.NAME = client_name return client_inst
apache-2.0
Python
42ea9fef4203d5acd73e732dbe0e4d8672e81d17
bump version for pypi
google/jax,google/jax,google/jax,tensorflow/probability,google/jax,tensorflow/probability
jax/version.py
jax/version.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.1.34"
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.1.33"
apache-2.0
Python
bcc8164f2e6ed4401dc5ecb74a28ebe8554f7b82
Add Windows support.
octalmage/robotjs,BHamrick1/robotjs,octalmage/robotjs,BHamrick1/robotjs,hristoterezov/robotjs,BHamrick1/robotjs,hristoterezov/robotjs,octalmage/robotjs,octalmage/robotjs,BHamrick1/robotjs,hristoterezov/robotjs,hristoterezov/robotjs
binding.gyp
binding.gyp
{ 'targets': [{ 'target_name': 'robotjs', 'include_dirs': [ 'node_modules/nan/' ], 'cflags': [ '-Wall', '-Wparentheses', '-Winline', '-Wbad-function-cast', '-Wdisabled-optimization' ], 'conditions': [ ['OS == "mac"', { 'include_dirs': [ 'System/Library/Frameworks/CoreFoundation.Framework/Headers', 'System/Library/Frameworks/Carbon.Framework/Headers', 'System/Library/Frameworks/ApplicationServices.framework/Headers', 'System/Library/Frameworks/OpenGL.framework/Headers', ], 'link_settings': { 'libraries': [ '-framework Carbon', '-framework CoreFoundation', '-framework ApplicationServices', '-framework OpenGL' ] } }], ['OS == "linux"', { 'link_settings': { 'libraries': [ '-lpng', '-lz', '-lX11', '-lXtst' ] }, 'sources': [ 'src/xdisplay.c' ] }], ["OS=='win'", { 'defines': [ 'IS_WINDOWS'] }] ], 'sources': [ 'src/robotjs.cc', 'src/deadbeef_rand.c', 'src/mouse.c', 'src/keypress.c', 'src/keycode.c', 'src/screen.c', 'src/screengrab.c', 'src/snprintf.c', 'src/MMBitmap.c' ] }] }
{ 'targets': [{ 'target_name': 'robotjs', 'include_dirs': [ '<!(node -e \'require("nan")\')' ], 'cflags': [ '-Wall', '-Wparentheses', '-Winline', '-Wbad-function-cast', '-Wdisabled-optimization' ], 'conditions': [ ['OS == "mac"', { 'include_dirs': [ 'System/Library/Frameworks/CoreFoundation.Framework/Headers', 'System/Library/Frameworks/Carbon.Framework/Headers', 'System/Library/Frameworks/ApplicationServices.framework/Headers', 'System/Library/Frameworks/OpenGL.framework/Headers', ], 'link_settings': { 'libraries': [ '-framework Carbon', '-framework CoreFoundation', '-framework ApplicationServices', '-framework OpenGL' ] } }], ['OS == "linux"', { 'link_settings': { 'libraries': [ '-lpng', '-lz', '-lX11', '-lXtst' ] }, 'sources': [ 'src/xdisplay.c' ] }] ], 'sources': [ 'src/robotjs.cc', 'src/deadbeef_rand.c', 'src/mouse.c', 'src/keypress.c', 'src/keycode.c', 'src/screen.c', 'src/screengrab.c', 'src/MMBitmap.c' ] }] }
mit
Python
c5da75e3acb4ba4c69204ff1ad3e7e89d6710001
Add whitespace in tests
jackzhao-mj/ok,jackzhao-mj/ok,Cal-CS-61A-Staff/ok,jordonwii/ok,jordonwii/ok,Cal-CS-61A-Staff/ok,jordonwii/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,jackzhao-mj/ok,Cal-CS-61A-Staff/ok,jackzhao-mj/ok,jordonwii/ok
client/tests/framework_test.py
client/tests/framework_test.py
#!/usr/bin/python3 import unittest import ok class TestProtocol(ok.Protocol): name = "test" def __init__(self, args, src_files): ok.Protocol.__init__(args, src_files) self.called_start = 0 self.called_interact = 0 def on_start(self, buf): self.called_start += 1 def on_interact(self, buf): self.called_interact += 1 class OkTest(unittest.TestCase): def setUp(self): self.hw1 = './demo_assignments/hw1.py' self.hw1_tests = './demo_assignments/hw1_tests.py' def test_parse_input(self): _ = ok.parse_input() # Does not crash and returns a value. def test_is_src_file(self): self.assertTrue(ok.is_src_file('hw1.py')) self.assertFalse(ok.is_src_file('hw1_tests.py')) self.assertFalse(ok.is_src_file('hw1_tests')) self.assertFalse(ok.is_src_file('hw1.html')) self.assertFalse(ok.is_src_file('ok.py')) def test_get_assignment(self): self.assertTrue(ok.get_assignment(self.hw1) == 'hw1') self.assertFalse(ok.get_assignment(self.hw1_tests)) def test_group_by_assignment(self): paths = [self.hw1, self.hw1_tests] groups = ok.group_by_assignment(paths) self.assertIn('hw1', groups) self.assertEqual(groups['hw1'], paths[0:1]) def test_find_assignment(self): assignment, src_files = ok.find_assignment(None, '.') self.assertEqual(assignment, 'hw1') self.assertEqual(src_files, [self.hw1]) self.assertRaises(Exception, ok.find_assignment, [None, 'tests']) self.assertRaises(Exception, ok.find_assignment, ['hw2', '.'])
#!/usr/bin/python3 import unittest import ok class TestProtocol(ok.Protocol): name = "test" def __init__(self, args, src_files): ok.Protocol.__init__(args, src_files) self.called_start = 0 self.called_interact = 0 def on_start(self, buf): self.called_start += 1 def on_interact(self, buf): self.called_interact += 1 class OkTest(unittest.TestCase): def setUp(self): self.hw1 = './demo_assignments/hw1.py' self.hw1_tests = './demo_assignments/hw1_tests.py' def test_parse_input(self): _ = ok.parse_input() # Does not crash and returns a value. def test_is_src_file(self): self.assertTrue(ok.is_src_file('hw1.py')) self.assertFalse(ok.is_src_file('hw1_tests.py')) self.assertFalse(ok.is_src_file('hw1_tests')) self.assertFalse(ok.is_src_file('hw1.html')) self.assertFalse(ok.is_src_file('ok.py')) def test_get_assignment(self): self.assertTrue(ok.get_assignment(self.hw1) == 'hw1') self.assertFalse(ok.get_assignment(self.hw1_tests)) def test_group_by_assignment(self): paths = [self.hw1, self.hw1_tests] groups = ok.group_by_assignment(paths) self.assertIn('hw1', groups) self.assertEqual(groups['hw1'], paths[0:1]) def test_find_assignment(self): assignment, src_files = ok.find_assignment(None, '.') self.assertEqual(assignment, 'hw1') self.assertEqual(src_files, [self.hw1]) self.assertRaises(Exception, ok.find_assignment, [None, 'tests']) self.assertRaises(Exception, ok.find_assignment, ['hw2', '.'])
apache-2.0
Python
c7764ac8c1363701b4e7fab1d8ae0e3197853b48
Update __init__.py
muteness/Pylsy,gnithin/Pylsy,gnithin/Pylsy,shnode/Pylsy,bcho/Pylsy,suhussai/Pylsy,shaunstanislaus/Pylsy,Maijin/Pylsy,janusnic/Pylsy,huiyi1990/Pylsy,muteness/Pylsy,bcho/Pylsy,Geoion/Pylsy,huiyi1990/Pylsy
pylsy/__init__.py
pylsy/__init__.py
#__init__.py from .pylsy import PylsyTable __version__="1.003"
#__init__.py from .pylsy import PylsyTable __version__="1.001"
mit
Python
eaa17491581cbb52242fbe543dd09929f537a8bc
Add option to ignore static.
meilalina/kotlin-web-site,meilalina/kotlin-web-site,hltj/kotlin-web-site-cn,hltj/kotlin-web-site-cn,JetBrains/kotlin-web-site,JetBrains/kotlin-web-site,hltj/kotlin-web-site-cn,hltj/kotlin-web-site-cn,JetBrains/kotlin-web-site,JetBrains/kotlin-web-site,meilalina/kotlin-web-site,JetBrains/kotlin-web-site,meilalina/kotlin-web-site,hltj/kotlin-web-site-cn,JetBrains/kotlin-web-site,hltj/kotlin-web-site-cn
mysettings.py
mysettings.py
from src.markdown.makrdown import jinja_aware_markdown PREFERRED_URL_SCHEME = 'http' SERVER_NAME = 'localhost:5000' FLATPAGES_EXTENSION = '.md' FLATPAGES_HTML_RENDERER = jinja_aware_markdown FREEZER_IGNORE_404_NOT_FOUND = True FLATPAGES_AUTO_RELOAD = True FREEZER_STATIC_IGNORE = ["*"] GITHUB_URL = 'https://github.com/JetBrains/kotlin' TWITTER_URL = 'https://twitter.com/kotlin' EDIT_ON_GITHUB_URL = 'https://github.com/JetBrains/kotlin-web-site/edit/master/' PDF_URL = '/docs/kotlin-docs.pdf' FORUM_URL = 'http://devnet.jetbrains.com/community/kotlin' SITE_GITHUB_URL = 'http://github.com/JetBrains/kotlin-web-site' CODE_URL = 'https://github.com/JetBrains/kotlin-examples/tree/master' TEXT_USING_GRADLE = "In this tutorial we're going to be using Gradle but the same can be accomplished using either IntelliJ IDEA project structure or Maven. For details on setting up Gradle to work with Kotlin, see [Using Gradle](/docs/reference/using-gradle.html)."
from src.markdown.makrdown import jinja_aware_markdown PREFERRED_URL_SCHEME = 'http' SERVER_NAME = 'localhost:5000' FLATPAGES_EXTENSION = '.md' FLATPAGES_HTML_RENDERER = jinja_aware_markdown FREEZER_IGNORE_404_NOT_FOUND = True FLATPAGES_AUTO_RELOAD = True GITHUB_URL = 'https://github.com/JetBrains/kotlin' TWITTER_URL = 'https://twitter.com/kotlin' EDIT_ON_GITHUB_URL = 'https://github.com/JetBrains/kotlin-web-site/edit/master/' PDF_URL = '/docs/kotlin-docs.pdf' FORUM_URL = 'http://devnet.jetbrains.com/community/kotlin' SITE_GITHUB_URL = 'http://github.com/JetBrains/kotlin-web-site' CODE_URL = 'https://github.com/JetBrains/kotlin-examples/tree/master' TEXT_USING_GRADLE = "In this tutorial we're going to be using Gradle but the same can be accomplished using either IntelliJ IDEA project structure or Maven. For details on setting up Gradle to work with Kotlin, see [Using Gradle](/docs/reference/using-gradle.html)."
apache-2.0
Python
9a40bd0d82c5215a8978a7d1c95f2910ee8f7f09
add UserToken model
CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend
api/models.py
api/models.py
from django.db import models from django.db.models import Q from django.utils import timezone from django.contrib.auth.models import User class MaintenanceRecord(models.Model): start_date = models.DateTimeField() end_date = models.DateTimeField(blank=True, null=True) title = models.CharField(max_length=256) message = models.TextField() disable_login = models.BooleanField(default=True) created_date = models.DateTimeField(auto_now_add=True) modified_date = models.DateTimeField(auto_now=True) @classmethod def active(cls, provider=None): """ Return records that are active """ now = timezone.now() records = MaintenanceRecord.objects.filter( Q(start_date__lt=now), Q(end_date__gt=now) | Q(end_date__isnull=True)) return records.all() @classmethod def disable_login_access(cls, request): """ Return true if any active record wants login disabled """ disable_login = False records = MaintenanceRecord.active() for record in records: if record.disable_login: disable_login = True return disable_login class UserToken(models.Model): token = models.CharField(max_length=128) user = models.ForeignKey(User) created_date = models.DateTimeField(auto_now_add=True) modified_date = models.DateTimeField(auto_now=True)
from django.db import models from django.db.models import Q from django.utils import timezone class MaintenanceRecord(models.Model): start_date = models.DateTimeField() end_date = models.DateTimeField(blank=True, null=True) title = models.CharField(max_length=256) message = models.TextField() disable_login = models.BooleanField(default=True) created_date = models.DateTimeField(auto_now_add=True) modified_date = models.DateTimeField(auto_now=True) @classmethod def active(cls, provider=None): """ Return records that are active """ now = timezone.now() records = MaintenanceRecord.objects.filter( Q(start_date__lt=now), Q(end_date__gt=now) | Q(end_date__isnull=True)) return records.all() @classmethod def disable_login_access(cls, request): """ Return true if any active record wants login disabled """ disable_login = False records = MaintenanceRecord.active() for record in records: if record.disable_login: disable_login = True return disable_login
apache-2.0
Python
379068d31623662c0b349f26d1cd610612963b82
add re module to be more reliable
ttlbyte/scripts,ttlbyte/scripts
joinstsfile.py
joinstsfile.py
#!/usr/bin/env python3 import os, re path=r'/home/ruan/git/stm/' namespace={} data=[] for file in os.listdir(path): if re.match('A\d{6}\.\d{6}\.L\d{4}\.VERT',file): namespace[int(file.split('.')[2][2:])]=file keys=sorted([x for x in namespace.keys()]) with open(os.path.join(path,namespace[keys[0]]),'rb') as fo: for line in fo.readlines()[526:]: data.append([line.decode('ascii').split('\t')[1],]) for i in keys: with open(os.path.join(path,namespace[i]),'rb') as fo: j=0 for line in fo.readlines()[526:]: data[j].append(line.decode('ascii').split('\t')[5]) j+=1 with open(os.path.join(path,'final.txt'),'w') as fout: for line in data: for num in line: fout.write(num+'\t') fout.write('\n')
#!/usr/bin/env python3 import os path='/home/ruan/git/stm/' #path为文件所在目录,windows下如‘D:\\data\\’,直接覆盖源文件,请注意保存原始数据 for file in os.listdir(path): os.rename(os.path.join(path,file),os.path.join(path,file.split('.')[2][2:])) filenu = len(os.listdir(path)) + 1 data=[] with open(os.path.join(path,'001'),'rb') as fo: for line in fo.readlines()[526:]: data.append([line.decode('ascii').split('\t')[1],line.decode('ascii').split('\t')[5]]) j=2 while j<filenu : with open(os.path.join(path,str(j).zfill(3)),'rb') as fo: i=0 for line in fo.readlines()[526:]: data[i].append(line.decode('ascii').split('\t')[5]) i+=1 j+=1 with open(os.path.join(path,'final.txt'),'w') as fout: i=len(data) j=len(data[0]) k=0 while k<i: l=0 while l<j: fout.write(data[k][l]) fout.write('\t') l+=1 fout.write('\n') k=k+1
mit
Python
85d5712fa1dde952783cbc8d78f904e08cfc9b50
Remove duplicated dependency
trailofbits/manticore,trailofbits/manticore,trailofbits/manticore
server/setup.py
server/setup.py
from pathlib import Path from setuptools import Command, find_packages, setup class GenerateCommand(Command): description = "generates manticore_server server protobuf + grpc code from protobuf specification file" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): from grpc.tools import protoc protoc.main( [ "grpc_tools.protoc", "-I.", "--python_out=.", "--grpc_python_out=.", "--mypy_out=.", "./manticore_server/ManticoreServer.proto", ] ) setup( name="manticore_server", version="0.0.1", packages=find_packages(exclude=["tests", "tests.*"]), python_requires=">=3.7", install_requires=[ f"manticore[native] @ file://{Path(__file__).parent.resolve()}/..", "protobuf~=3.20", "grpcio~=1.46", "crytic-compile>=0.2.2", ], extras_require={ "dev": [ "grpcio-tools", "mypy-protobuf", "shiv~=1.0.1", "types-setuptools", "black~=22.0", "isort==5.10.1", "mypy==0.942", ] }, entry_points={ "console_scripts": [ "manticore_server=manticore_server.manticore_server:main", ], "distutils.commands": ["generate = GenerateCommand"], }, cmdclass={ "generate": GenerateCommand, }, )
from pathlib import Path from setuptools import Command, find_packages, setup class GenerateCommand(Command): description = "generates manticore_server server protobuf + grpc code from protobuf specification file" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): from grpc.tools import protoc protoc.main( [ "grpc_tools.protoc", "-I.", "--python_out=.", "--grpc_python_out=.", "--mypy_out=.", "./manticore_server/ManticoreServer.proto", ] ) setup( name="manticore_server", version="0.0.1", packages=find_packages(exclude=["tests", "tests.*"]), python_requires=">=3.7", install_requires=[ f"manticore[native] @ file://{Path(__file__).parent.resolve()}/..", "protobuf~=3.20", "grpcio~=1.46", "crytic-compile>=0.2.2", ], extras_require={ "dev": [ "grpcio-tools", "mypy-protobuf", "shiv~=1.0.1", "types-setuptools", "mypy-protobuf", "black~=22.0", "isort==5.10.1", "mypy==0.942", ] }, entry_points={ "console_scripts": [ "manticore_server=manticore_server.manticore_server:main", ], "distutils.commands": ["generate = GenerateCommand"], }, cmdclass={ "generate": GenerateCommand, }, )
agpl-3.0
Python
ba43de958266a2906f3ee4cad23b20361db2637a
Add arguments to job
indodutch/sim-city-client,indodutch/sim-city-client,NLeSC/sim-city-client,NLeSC/sim-city-client
scripts/submitJob.py
scripts/submitJob.py
#!/usr/bin/env python # SIM-CITY client # # Copyright 2015 Netherlands eScience Center # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Combines createTask and startJob, to create a task from a command and then start a job. ''' from __future__ import print_function import simcity import argparse import sys import json if __name__ == '__main__': parser = argparse.ArgumentParser(description="start a job") parser.add_argument('host', help="host to run pilot job on") parser.add_argument('command', help="command to run") parser.add_argument('args', nargs='*', help="command arguments") parser.add_argument( '-m', '--max', help="only run if there are less than MAX jobs running", default=2) parser.add_argument( '-c', '--config', help="configuration file", default=None) parser.add_argument( '-i', '--input', help="JSON parameter file", default=None) args = parser.parse_args() simcity.init(config=args.config) try: properties = { 'command': args.command, 'arguments': args.args, } try: with open(args.input) as f: properties['input'] = json.load(f) except TypeError: pass task, job = simcity.run_task(properties, args.host, int(args.max)) except Exception as ex: print("Task could not be added to the database: %s" % str(ex), file=sys.stderr) sys.exit(1) print("Task %s added to the database" % task.id) if job is None: print("Let task be processed by existing pilot-job scripts") else: print("Job %s (ID: %s) will process task" % (job['batch_id'], job.id))
#!/usr/bin/env python # SIM-CITY client # # Copyright 2015 Netherlands eScience Center # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Combines createTask and startJob, to create a task from a command and then start a job. ''' from __future__ import print_function import simcity import argparse import sys import json if __name__ == '__main__': parser = argparse.ArgumentParser(description="start a job") parser.add_argument('command', help="command to run") parser.add_argument('host', help="host to run pilot job on") parser.add_argument( '-m', '--max', help="only run if there are less than MAX jobs running", default=2) parser.add_argument( '-c', '--config', help="configuration file", default=None) parser.add_argument( '-i', '--input', help="JSON parameter file", default=None) args = parser.parse_args() simcity.init(config=args.config) try: properties = {'command': args.command} try: with open(args.input) as f: properties['input'] = json.load(f) except TypeError: pass task, job = simcity.run_task(properties, args.host, int(args.max)) except Exception as ex: print("Task could not be added to the database: %s" % str(ex), file=sys.stderr) sys.exit(1) print("Task %s added to the database" % task.id) if job is None: print("Let task be processed by existing pilot-job scripts") else: print("Job %s (ID: %s) will process task" % (job['batch_id'], job.id))
apache-2.0
Python
8d56a45d0b01dff3e8cd041e7ba09c882d7cbb30
add logging to file and stdout
google/llvm-premerge-checks,google/llvm-premerge-checks
phabricator-proxy/main.py
phabricator-proxy/main.py
from cmath import log from flask.logging import default_handler from urllib.parse import urlparse, parse_qs import flask import json import logging import logging.handlers import os import requests buildkite_api_token = os.getenv("BUILDKITE_API_TOKEN", "") app = flask.Flask(__name__) app.config["DEBUG"] = False formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') errHandler = logging.FileHandler('error.log', encoding='utf-8',) errHandler.setLevel(logging.ERROR) errHandler.setFormatter(formatter) app.logger.addHandler(errHandler) rotatingHandler = logging.handlers.TimedRotatingFileHandler('info.log', when='D', encoding='utf-8', backupCount=8) rotatingHandler.setFormatter(formatter) app.logger.addHandler(rotatingHandler) app.logger.setLevel(logging.INFO) stdoutLog = logging.StreamHandler() stdoutLog.setFormatter(formatter) app.logger.addHandler(stdoutLog) app.logger.removeHandler(default_handler) @app.route('/', methods=['GET']) def home(): return "Hi LLVM!" @app.route('/build', methods=['POST', 'GET']) def build(): app.logger.info('request: %s %s', flask.request, flask.request.url) app.logger.info('headers: %s', flask.request.headers) if flask.request.method == 'POST': app.logger.info('data: %s', flask.request.data) app.logger.info('form: %s', flask.request.form) url = urlparse(flask.request.url) params = parse_qs(url.query) build_env = {} for k, v in params.items(): if len(v) == 1: build_env['ph_' + k] = v[0] refspec = 'main' if 'ph_scripts_refspec' in build_env: refspec = build_env['ph_scripts_refspec'] build_request = { 'commit': 'HEAD', 'branch': refspec, 'env': build_env, 'message': f'D{build_env["ph_buildable_revision"]}', } app.logger.info('buildkite request: %s', build_request) headers = {'Authorization': f'Bearer {buildkite_api_token}'} response = requests.post( 'https://api.buildkite.com/v2/organizations/llvm-project' '/pipelines/diff-checks/builds', json=build_request, headers=headers) app.logger.info('buildkite response: %s %s', response.status_code, response.text) rjs = json.loads(response.text) return rjs['web_url'] else: return "expected POST request" if __name__ == '__main__': app.run(host='0.0.0.0:8080')
import flask import requests import os from urllib.parse import urlparse, parse_qs import json app = flask.Flask(__name__) app.config["DEBUG"] = False buildkite_api_token = os.getenv("BUILDKITE_API_TOKEN", "") @app.route('/', methods=['GET']) def home(): return "Hi LLVM!" @app.route('/build', methods=['POST', 'GET']) def build(): app.logger.info('request: %s %s', flask.request, flask.request.url) app.logger.info('headers: %s', flask.request.headers) if flask.request.method == 'POST': app.logger.info('data: %s', flask.request.data) app.logger.info('form: %s', flask.request.form) url = urlparse(flask.request.url) params = parse_qs(url.query) build_env = {} for k, v in params.items(): if len(v) == 1: build_env['ph_' + k] = v[0] refspec = 'main' if 'ph_scripts_refspec' in build_env: refspec = build_env['ph_scripts_refspec'] build_request = { 'commit': 'HEAD', 'branch': refspec, 'env': build_env, 'message': f'D{build_env["ph_buildable_revision"]}', } app.logger.info('buildkite request: %s', build_request) headers = {'Authorization': f'Bearer {buildkite_api_token}'} response = requests.post( 'https://api.buildkite.com/v2/organizations/llvm-project' '/pipelines/diff-checks/builds', json=build_request, headers=headers) app.logger.info('buildkite response: %s %s', response.status_code, response.text) rjs = json.loads(response.text) return rjs['web_url'] else: return "expected POST request" if __name__ == '__main__': app.run(host='0.0.0.0:8080')
apache-2.0
Python
c90dbc5007b5627b264493c2d16af79cff9c2af0
Add better custom has_permission check.
MJB47/Jokusoramame,MJB47/Jokusoramame,MJB47/Jokusoramame
joku/checks.py
joku/checks.py
""" Specific checks. """ from discord.ext.commands import CheckFailure, check def is_owner(ctx): if not ctx.bot.owner_id == ctx.message.author.id: raise CheckFailure(message="You are not the owner.") return True def has_permissions(**perms): def predicate(ctx): if ctx.bot.owner_id == ctx.message.author.id: return True msg = ctx.message ch = msg.channel permissions = ch.permissions_for(msg.author) if all(getattr(permissions, perm, None) == value for perm, value in perms.items()): return True # Raise a custom error message raise CheckFailure(message="You do not have any of the required permissions: {}".format( ', '.join([perm.upper() for perm in perms]) )) return check(predicate)
""" Specific checks. """ from discord.ext.commands import CheckFailure def is_owner(ctx): if not ctx.bot.owner_id == ctx.message.author.id: raise CheckFailure(message="You are not the owner.") return True
mit
Python
f9a827b41ed925e22bf1e873e5989bdd327fabbf
Add RefugeeCamp name formatting
HStg2015/backend
api/models.py
api/models.py
from django.db import models class RefugeeCamp(models.Model): # Location city = models.CharField(max_length=64) postcode = models.CharField(max_length=16) street = models.CharField(max_length=128) streetnumber = models.CharField(max_length=32) def __str__(self): return "{0} {1}: {2} {3}".format(self.postcode, self.city, self.street, self.streetnumber) class ObjectCategory(models.Model): title = models.CharField(max_length=64) def __str__(self): return self.title class ObjectSubCategory(models.Model): title = models.CharField(max_length=64) parent = models.ForeignKey(ObjectCategory) def __str__(self): return "{0}/{1}".format(self.parent, self.title) class SimpleOffer(models.Model): category = models.ForeignKey(ObjectCategory, null=True) title = models.CharField(max_length=64) description = models.CharField(max_length=4096) create_time = models.DateTimeField(auto_now_add=True) image = models.ImageField(upload_to='api.UploadedFile/bytes/filename/mimetype', blank=True, null=True) # Owner's info city = models.CharField(max_length=64) telephone = models.CharField(max_length=64) email = models.CharField(max_length=128) class HelpTimeSearch(models.Model): start_time = models.DateTimeField() end_time = models.DateTimeField() camp = models.ForeignKey(RefugeeCamp) class UploadedFile(models.Model): bytes = models.TextField() filename = models.CharField(max_length=255) mimetype = models.CharField(max_length=50)
from django.db import models class RefugeeCamp(models.Model): # Location city = models.CharField(max_length=64) postcode = models.CharField(max_length=16) street = models.CharField(max_length=128) streetnumber = models.CharField(max_length=32) class ObjectCategory(models.Model): title = models.CharField(max_length=64) def __str__(self): return self.title class ObjectSubCategory(models.Model): title = models.CharField(max_length=64) parent = models.ForeignKey(ObjectCategory) def __str__(self): return "{0}/{1}".format(self.parent, self.title) class SimpleOffer(models.Model): category = models.ForeignKey(ObjectCategory, null=True) title = models.CharField(max_length=64) description = models.CharField(max_length=4096) create_time = models.DateTimeField(auto_now_add=True) image = models.ImageField(upload_to='api.UploadedFile/bytes/filename/mimetype', blank=True, null=True) # Owner's info city = models.CharField(max_length=64) telephone = models.CharField(max_length=64) email = models.CharField(max_length=128) class HelpTimeSearch(models.Model): start_time = models.DateTimeField() end_time = models.DateTimeField() camp = models.ForeignKey(RefugeeCamp) class UploadedFile(models.Model): bytes = models.TextField() filename = models.CharField(max_length=255) mimetype = models.CharField(max_length=50)
mit
Python
ee1f958cb3611ecc3af0329deda7fde5d5281c32
remove obsolete model creation
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
core/models/__init__.py
core/models/__init__.py
# -*- coding: utf-8 -*- # flake8: noqa """ Collection of models """ from core.models.allocation_strategy import Allocation, AllocationStrategy from core.models.application import Application, ApplicationMembership,\ ApplicationScore, ApplicationBookmark from core.models.application_tag import ApplicationTag from core.models.application_version import ApplicationVersion, ApplicationVersionMembership from core.models.cloud_admin import CloudAdministrator from core.models.credential import Credential, ProviderCredential from core.models.export_request import ExportRequest from core.models.group import Group, IdentityMembership,\ InstanceMembership, Leadership from core.models.identity import Identity from core.models.instance_tag import InstanceTag from core.models.profile import UserProfile from core.models.project import Project from core.models.project_instance import ProjectInstance from core.models.project_volume import ProjectVolume from core.models.provider import AccountProvider, ProviderType, PlatformType,\ Provider, ProviderInstanceAction, ProviderDNSServerIP from core.models.license import LicenseType, License, ApplicationVersionLicense from core.models.machine import ProviderMachine, ProviderMachineMembership from core.models.machine_request import MachineRequest from core.models.match import PatternMatch, MatchType from core.models.maintenance import MaintenanceRecord from core.models.instance import Instance, InstanceStatusHistory,\ InstanceStatus, InstanceAction, InstanceSource from core.models.node import NodeController from core.models.boot_script import ScriptType, BootScript, ApplicationVersionBootScript from core.models.quota import Quota from core.models.resource_request import ResourceRequest from core.models.size import Size from core.models.t import T from core.models.tag import Tag from core.models.user import AtmosphereUser from core.models.volume import Volume
from core.models.allocation_strategy import Allocation, AllocationStrategy from core.models.application import Application, ApplicationMembership,\ ApplicationScore, ApplicationBookmark from core.models.application_tag import ApplicationTag from core.models.application_version import ApplicationVersion, ApplicationVersionMembership from core.models.cloud_admin import CloudAdministrator from core.models.credential import Credential, ProviderCredential from core.models.export_request import ExportRequest from core.models.group import Group, IdentityMembership,\ InstanceMembership, Leadership from core.models.identity import Identity from core.models.instance_tag import InstanceTag from core.models.profile import UserProfile from core.models.project import Project from core.models.project_instance import ProjectInstance from core.models.project_volume import ProjectVolume from core.models.provider import AccountProvider, ProviderType, PlatformType,\ Provider, ProviderInstanceAction, ProviderDNSServerIP from core.models.license import LicenseType, License, ApplicationVersionLicense from core.models.machine import ProviderMachine, ProviderMachineMembership from core.models.machine_request import MachineRequest from core.models.match import PatternMatch, MatchType from core.models.maintenance import MaintenanceRecord from core.models.instance import Instance, InstanceStatusHistory,\ InstanceStatus, InstanceAction, InstanceSource from core.models.node import NodeController from core.models.boot_script import ScriptType, BootScript, ApplicationVersionBootScript from core.models.quota import Quota from core.models.resource_request import ResourceRequest from core.models.size import Size from core.models.t import T from core.models.tag import Tag from core.models.user import AtmosphereUser from core.models.volume import Volume def get_or_create(Model, *args, **kwargs): return Model.objects.get_or_create(*args, **kwargs)[0] def create_machine_model(name, provider, provider_alias, created_by, description): name = _get_valid_name(name, provider_alias) new_machine = get_or_create(Application, name=name, description=description, created_by=created_by) provider_machine = get_or_create(ProviderMachine, machine=new_machine, provider=provider, identifier=provider_alias) return (new_machine, provider_machine) def get_or_create_instance_model(name, provider, provider_alias, image_alias, ip_address, created_by): name = _get_valid_name(name, provider_alias) provider_machine = _get_or_create_provider_machine( provider, image_alias, created_by ) return get_or_create(Instance, name=name, provider_alias=provider_alias, provider_machine=provider_machine, ip_address=ip_address, created_by=created_by) def _get_valid_name(name, alias): """ Make sure there is a good default name if no name exists. """ if name is None or len(name) == 0: name = alias return name def _get_or_create_provider_machine(provider, image_alias, created_by): """ Get or create a ProviderMachine. If ProviderMachine does not already exist create a new Machine and related ProviderMachine. """ provider_machine = None filtered_machines = ProviderMachine.objects.filter(identifier=image_alias) if filtered_machines: provider_machine = filtered_machines[0] else: (created, provider_machine) = create_machine_model( None, provider, image_alias, created_by, "Created to support instanceModel") return provider_machine
apache-2.0
Python
1b2f0be67a8372a652b786c8b183cd5edf1807cd
Swap back to Fuzzer, no monkey patching
jmiserez/sts,jmiserez/sts,ucb-sts/sts,ucb-sts/sts
config/fuzz_pox_mesh.py
config/fuzz_pox_mesh.py
from experiment_config_lib import ControllerConfig from sts.topology import MeshTopology from sts.control_flow import Fuzzer, Interactive from sts.input_traces.input_logger import InputLogger from sts.invariant_checker import InvariantChecker from sts.simulation_state import SimulationConfig # Use POX as our controller command_line = ('''./pox.py --verbose openflow.debug ''' #sts.syncproto.pox_syncer ''' '''forwarding.l2_multi ''' #'''sts.util.socket_mux.pox_monkeypatcher ''' '''openflow.of_01 --address=__address__ --port=__port__''') controllers = [ControllerConfig(command_line, cwd="betta")] topology_class = MeshTopology topology_params = "num_switches=2" dataplane_trace = "dataplane_traces/ping_pong_same_subnet.trace" simulation_config = SimulationConfig(controller_configs=controllers, topology_class=topology_class, topology_params=topology_params, dataplane_trace=dataplane_trace, multiplex_sockets=False) control_flow = Fuzzer(simulation_config, check_interval=80, halt_on_violation=False, input_logger=InputLogger(), invariant_check=InvariantChecker.check_connectivity) #control_flow = Interactive(simulation_config, input_logger=InputLogger())
from experiment_config_lib import ControllerConfig from sts.topology import MeshTopology from sts.control_flow import Fuzzer from sts.input_traces.input_logger import InputLogger from sts.invariant_checker import InvariantChecker from sts.simulation_state import SimulationConfig # Use POX as our controller command_line = ('''./pox.py --verbose --no-cli sts.syncproto.pox_syncer ''' '''samples.topo forwarding.l2_multi ''' '''sts.util.socket_mux.pox_monkeypatcher ''' '''openflow.of_01 --address=../sts_socket_pipe''') controllers = [ControllerConfig(command_line, address="sts_socket_pipe", cwd="pox", sync="tcp:localhost:18899")] topology_class = MeshTopology topology_params = "num_switches=4" dataplane_trace = "dataplane_traces/ping_pong_same_subnet_4_switches.trace" simulation_config = SimulationConfig(controller_configs=controllers, topology_class=topology_class, topology_params=topology_params, dataplane_trace=dataplane_trace, multiplex_sockets=True) control_flow = Fuzzer(simulation_config, check_interval=1, halt_on_violation=True, input_logger=InputLogger(), invariant_check=InvariantChecker.check_liveness)
apache-2.0
Python
d0ca9aa6cf39c4743e398f65e4c7f5bbc3c03d78
Clarify API sample
nabla-c0d3/sslyze
api_sample.py
api_sample.py
# Add ./lib to the path for importing nassl import os import sys sys.path.insert(1, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')) from sslyze.plugins_finder import PluginsFinder from sslyze.plugins_process_pool import PluginsProcessPool from sslyze.server_connectivity import ServerConnectivityInfo, ServerConnectivityError from sslyze.ssl_settings import TlsWrappedProtocolEnum # Setup the servers to scan and ensure they are reachable hostname = 'smtp.gmail.com' try: server_info = ServerConnectivityInfo(hostname=hostname, port=587, tls_wrapped_protocol=TlsWrappedProtocolEnum.STARTTLS_SMTP) server_info.test_connectivity_to_server() except ServerConnectivityError as e: # Could not establish an SSL connection to the server raise RuntimeError('Error when connecting to {}: {}'.format(hostname, e.error_msg)) # Get the list of available plugins sslyze_plugins = PluginsFinder() # Create a process pool to run scanning commands concurrently plugins_process_pool = PluginsProcessPool(sslyze_plugins) # Queue some scan commands; the commands are same as what is described in the SSLyze CLI --help text. print '\nQueuing some commands...' plugins_process_pool.queue_plugin_task(server_info, 'sslv3') plugins_process_pool.queue_plugin_task(server_info, 'reneg') plugins_process_pool.queue_plugin_task(server_info, 'certinfo_basic') # Process the results reneg_result = None print '\nProcessing results...' for server_info, plugin_command, plugin_result in plugins_process_pool.get_results(): # Each plugin result has attributes with the information you're looking for, specific to each plugin # All these attributes are documented within each plugin's module if plugin_result.plugin_command == 'sslv3': # Do something with the result print 'SSLV3 cipher suites' for cipher in plugin_result.accepted_cipher_list: print ' {}'.format(cipher.name) elif plugin_result.plugin_command == 'reneg': reneg_result = plugin_result print 'Client renegotiation: {}'.format(plugin_result.accepts_client_renegotiation) print 'Secure renegotiation: {}'.format(plugin_result.supports_secure_renegotiation) elif plugin_result.plugin_command == 'certinfo_basic': print 'Server Certificate CN: {}'.format(plugin_result.certificate_chain[0].as_dict['subject']['commonName']) # All plugin results also always expose two APIs: # What the SSLyze CLI would output to the console print '\nSSLyze text output' for line in reneg_result.as_text(): print line print '\nSSLyze XML node' # The XML node for the SSLyze CLI XML output print reneg_result.as_xml() # You should use the process pool to make scans quick, but you can also call plugins directly from sslyze.plugins.openssl_cipher_suites_plugin import OpenSslCipherSuitesPlugin print '\nCalling a plugin directly...' plugin = OpenSslCipherSuitesPlugin() plugin_result = plugin.process_task(server_info, 'tlsv1') for cipher in plugin_result.accepted_cipher_list: print ' {}'.format(cipher.name)
# Add ./lib to the path for importing nassl import os import sys sys.path.insert(1, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')) from sslyze.plugins_finder import PluginsFinder from sslyze.plugins_process_pool import PluginsProcessPool from sslyze.server_connectivity import ServerConnectivityInfo, ServerConnectivityError from sslyze.ssl_settings import TlsWrappedProtocolEnum # Setup the servers to scan and ensure they are reachable hostname = 'smtp.gmail.com' try: server_info = ServerConnectivityInfo(hostname=hostname, port=587, tls_wrapped_protocol=TlsWrappedProtocolEnum.STARTTLS_SMTP) server_info.test_connectivity_to_server() except ServerConnectivityError as e: # Could not establish an SSL connection to the server raise RuntimeError('Error when connecting to {}: {}'.format(hostname, e.error_msg)) # Get the list of available plugins sslyze_plugins = PluginsFinder() # Create a process pool to run scanning commands concurrently plugins_process_pool = PluginsProcessPool(sslyze_plugins) # Queue some scan commands print '\nQueuing some commands...' plugins_process_pool.queue_plugin_task(server_info, 'sslv3') plugins_process_pool.queue_plugin_task(server_info, 'reneg') plugins_process_pool.queue_plugin_task(server_info, 'certinfo_basic') # Process the results reneg_result = None print '\nProcessing results...' for server_info, plugin_command, plugin_result in plugins_process_pool.get_results(): # Each plugin result has attributes with the information you're looking for, specific to each plugin # All these attributes are documented within each plugin's module if plugin_result.plugin_command == 'sslv3': # Do something with the result print 'SSLV3 cipher suites' for cipher in plugin_result.accepted_cipher_list: print ' {}'.format(cipher.name) elif plugin_result.plugin_command == 'reneg': reneg_result = plugin_result print 'Client renegotiation: {}'.format(plugin_result.accepts_client_renegotiation) print 'Secure renegotiation: {}'.format(plugin_result.supports_secure_renegotiation) elif plugin_result.plugin_command == 'certinfo_basic': print 'Server Certificate CN: {}'.format(plugin_result.certificate_chain[0].as_dict['subject']['commonName']) # All plugin results also always expose two APIs: # What the SSLyze CLI would output to the console print '\nSSLyze text output' for line in reneg_result.as_text(): print line print '\nSSLyze XML node' # The XML node for the SSLyze CLI XML output print reneg_result.as_xml() # You should use the process pool to make scans quick, but you can also call plugins directly from sslyze.plugins.openssl_cipher_suites_plugin import OpenSslCipherSuitesPlugin print '\nCalling a plugin directly...' plugin = OpenSslCipherSuitesPlugin() plugin_result = plugin.process_task(server_info, 'tlsv1') for cipher in plugin_result.accepted_cipher_list: print ' {}'.format(cipher.name)
agpl-3.0
Python
a57e38233679bf6d95dad533d87ce1c69c00cc26
Include process name
aknuds1/docker-memusage
docker-memusage.py
docker-memusage.py
#!/usr/bin/env python from collections import OrderedDict import os.path import re def parse_mem_file(filename): data = OrderedDict() with open(filename, 'rb') as f: for line in f: splittage = line.split(':') data[splittage[0]] = splittage[1].strip() return data def get_system_mem_usage(): """Return the information in /proc/meminfo as a dictionary.""" return parse_mem_file('/proc/meminfo') def get_process_mem_usage(): re_pid = re.compile(r'^\d+$') re_mem = re.compile(r'^(\d+) .+$') pid2usage = {} for pid in [d for d in os.listdir('/proc') if re_pid.match(d)]: fpath = os.path.join('/proc', pid, 'status') try: data = parse_mem_file(fpath) except IOError: continue try: name = data['name'] pid2usage[(pid, name)] = int( re_mem.match(data['VmHWM']).group(1)) / 1024. except KeyError: continue return OrderedDict( sorted(pid2usage.iteritems(), key=lambda x: x[1], reverse=True)) pid2usage = get_process_mem_usage() total_usage = sum(pid2usage.values()) print('Total memory usage: {:.2f}'.format(total_usage)) for pid_etc, usage in pid2usage.iteritems(): [pid, name] = pid print('{} ({}): {:.2f} MB'.format(name, pid, usage))
#!/usr/bin/env python from collections import OrderedDict from pprint import pprint import os.path import re import sys def parse_mem_file(filename): data = OrderedDict() with open(filename, 'rb') as f: for line in f: splittage = line.split(':') data[splittage[0]] = splittage[1].strip() return data def get_system_mem_usage(): """Return the information in /proc/meminfo as a dictionary.""" return parse_mem_file('/proc/meminfo') def get_process_mem_usage(): re_pid = re.compile(r'^\d+$') re_mem = re.compile(r'^(\d+) .+$') pid2usage = {} for pid in [d for d in os.listdir('/proc') if re_pid.match(d)]: fpath = os.path.join('/proc', pid, 'status') try: data = parse_mem_file(fpath) except IOError: continue try: pid2usage[pid] = int(re_mem.match(data['VmHWM']).group(1)) / 1024. except KeyError: continue return OrderedDict( sorted(pid2usage.iteritems(), key=lambda x: x[1], reverse=True)) pid2usage = get_process_mem_usage() total_usage = sum(pid2usage.values()) print('Total memory usage: {:.2f}'.format(total_usage)) for pid, usage in pid2usage.iteritems(): print('{}: {:.2f} MB'.format(pid, usage))
mit
Python
57b707b7f7e7076f8c1f84e57ba3a3db45135340
Fix compilations for macos mountain lion
avail/protobuf-int64,JacksonTian/protobuf,JacksonTian/protobuf,avail/protobuf-int64,avail/protobuf-int64,avail/protobuf-int64,pzduniak/protobuf-int64,XadillaX/protobuf,chrisdew/protobuf,XadillaX/protobuf,chrisdew/protobuf,chrisdew/protobuf,pzduniak/protobuf-int64,pzduniak/protobuf-int64,XadillaX/protobuf,JacksonTian/protobuf,pzduniak/protobuf-int64,chrisdew/protobuf,XadillaX/protobuf,pzduniak/protobuf-int64,chrisdew/protobuf,JacksonTian/protobuf,XadillaX/protobuf,avail/protobuf-int64,JacksonTian/protobuf
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "protobuf_for_node", "include_dirs": ["protobuf/src"], "dependencies": ["protobuf/protobuf.gyp:protobuf_full_do_not_use"], "sources": [ "protobuf_for_node.cc", "addon.cc" ], 'conditions': [ [ 'OS =="mac"',{ 'xcode_settings':{ 'OTHER_CFLAGS' : [ '-mmacosx-version-min=10.7' ] } } ] ] } ] }
{ "targets": [ { "target_name": "protobuf_for_node", "include_dirs": ["protobuf/src"], "dependencies": ["protobuf/protobuf.gyp:protobuf_full_do_not_use"], "sources": [ "protobuf_for_node.cc", "addon.cc" ] } ] }
apache-2.0
Python
644fbef7030f0685be7dd056606ab23daaefdc72
Fix typo in error message variable
pipex/gitbot,pipex/gitbot,pipex/gitbot
app/gitlab.py
app/gitlab.py
from __future__ import absolute_import from __future__ import unicode_literals from .webhooks import WebHook from werkzeug.exceptions import BadRequest, NotImplemented EVENTS = { 'Push Hook': 'push', 'Tag Push Hook': 'tag_push', 'Issue Hook': 'issue', 'Note Hook': 'note', 'Merge Request Hook': 'merge_request' } class GitlabWebHook(WebHook): def event(self, request): gitlab_header = request.headers.get('X-Gitlab-Event', None) if not gitlab_header: raise BadRequest('Gitlab requests must provide a X-Gitlab-Event header') event = EVENTS.get(gitlab_header, None) if not event: raise NotImplemented('Header not understood %s' % gitlab_header) if event == 'note': if 'commit' in request.json: event = 'commit_comment' elif 'merge_request' in request.json: event = 'merge_request_comment' elif 'issue' in request.json: event = 'issue_comment' elif 'snippet' in request.json: event = 'snippet_comment' return event
from __future__ import absolute_import from __future__ import unicode_literals from .webhooks import WebHook from werkzeug.exceptions import BadRequest, NotImplemented EVENTS = { 'Push Hook': 'push', 'Tag Push Hook': 'tag_push', 'Issue Hook': 'issue', 'Note Hook': 'note', 'Merge Request Hook': 'merge_request' } class GitlabWebHook(WebHook): def event(self, request): gitlab_header = request.headers.get('X-Gitlab-Event', None) if not gitlab_header: raise BadRequest('Gitlab requests must provide a X-Gitlab-Event header') event = EVENTS.get(gitlab_header, None) if not event: raise NotImplemented('Header not understood %s' % githab_header) if event == 'note': if 'commit' in request.json: event = 'commit_comment' elif 'merge_request' in request.json: event = 'merge_request_comment' elif 'issue' in request.json: event = 'issue_comment' elif 'snippet' in request.json: event = 'snippet_comment' return event
apache-2.0
Python
893b9947ef8d884ff67c84a60ea2c251b408a6d0
update build_db.py script
Mause/TransperthCached,Mause/TransperthCached,Mause/TransperthCached
build_db.py
build_db.py
import json import os import sqlite3 WEEKDAYS = 0x1 SATURDAY = 0x2 SUNDAY = 0x3 def setup(conn): cursor = conn.cursor() cursor.execute( ''' CREATE TABLE IF NOT EXISTS visit ( stop_num text, visit_day_type integer, route_num integer, hour integer, minute integer ) ''' ) cursor.execute('CREATE INDEX visit_stop_num_idx ON visit (stop_num);') cursor.execute( 'CREATE TABLE "android_metadata" ("locale" TEXT DEFAULT "en_US");' ) cursor.execute( 'INSERT INTO "android_metadata" VALUES ("en_US")' ) def dump_data(data, conn): cursor = conn.cursor() for stop_num, day_types in data.items(): types = zip([WEEKDAYS, SATURDAY, SUNDAY], day_types) for day_type_num, day_type in types: for visit in day_type: hour, minute = map(int, visit[1].split(':')) cursor.execute( 'INSERT INTO visit VALUES (?, ?, ?, ?, ?)', ( str(stop_num), day_type_num, visit[0], hour, minute ) ) def main(): db = 'assets/transperthcache.db' if os.path.exists(db): os.unlink(db) conn = sqlite3.connect(db) setup(conn) with open('transperthcache.json') as fh: dump_data( json.load(fh), conn ) conn.commit() conn.close() if __name__ == '__main__': main()
import json import os import sqlite3 WEEKDAYS = 0x1 SATURDAY = 0x2 SUNDAY = 0x3 def setup(conn): cursor = conn.cursor() cursor.execute( ''' CREATE TABLE IF NOT EXISTS visit ( stop_num text, visit_day_type integer, route_num integer, hour integer, minute integer ) ''' ) cursor.execute('CREATE INDEX visit_stop_num_idx ON visit (stop_num);') def dump_data(data, conn): cursor = conn.cursor() for stop_num, day_types in data.items(): types = zip([WEEKDAYS, SATURDAY, SUNDAY], day_types) for day_type_num, day_type in types: for visit in day_type: hour, minute = map(int, visit[1].split(':')) cursor.execute( 'INSERT INTO visit VALUES (?, ?, ?, ?, ?)', ( str(stop_num), day_type_num, visit[0], hour, minute ) ) def main(): db = 'Assets/transperthcache.db' if os.path.exists(db): os.unlink(db) conn = sqlite3.connect(db) setup(conn) with open('transperthcache.json') as fh: dump_data( json.load(fh), conn ) conn.commit() if __name__ == '__main__': main()
apache-2.0
Python
7f01aa6deaa9a13ca388fb4c84849bce53d34d5f
Make sure C++11 is used under Mac OS
ozra/mmap-io,ozra/mmap-io,ozra/mmap-io,ozra/mmap-io,ozra/mmap-io
binding.gyp
binding.gyp
{ "targets": [{ "target_name": "mmap-io", "sources": [ "src/mmap-io.cc" ], "include_dirs": [ "<!(node -e \"require('nan')\")" ], "cflags_cc": [ "-std=c++11" ], "conditions": [ [ 'OS=="mac"', { "xcode_settings": { 'OTHER_CPLUSPLUSFLAGS' : ['-std=c++11','-stdlib=libc++'], 'OTHER_LDFLAGS': ['-stdlib=libc++'], 'MACOSX_DEPLOYMENT_TARGET': '10.7' }} ] ] }] }
{ "targets": [{ "target_name": "mmap-io", "sources": [ "src/mmap-io.cc" ], "include_dirs": [ "<!(node -e \"require('nan')\")" ], "cflags_cc": [ "-std=c++11" ] }] }
mit
Python
6b6948b4dcf7400eefcfb2a499c0180d03052550
Remove unnecessary string formatting
sampadsaha5/sympy,jaimahajan1997/sympy,sampadsaha5/sympy,skidzo/sympy,jaimahajan1997/sympy,jaimahajan1997/sympy,souravsingh/sympy,sampadsaha5/sympy,souravsingh/sympy,chaffra/sympy,skidzo/sympy,aktech/sympy,drufat/sympy,postvakje/sympy,yashsharan/sympy,yashsharan/sympy,aktech/sympy,madan96/sympy,drufat/sympy,madan96/sympy,madan96/sympy,aktech/sympy,kaushik94/sympy,chaffra/sympy,souravsingh/sympy,postvakje/sympy,drufat/sympy,chaffra/sympy,rahuldan/sympy,skidzo/sympy,postvakje/sympy,kaushik94/sympy,rahuldan/sympy,yashsharan/sympy,rahuldan/sympy,kaushik94/sympy
sympy/matrices/expressions/dotproduct.py
sympy/matrices/expressions/dotproduct.py
from __future__ import print_function, division from sympy.core import Basic from sympy.core.sympify import _sympify from sympy.matrices.expressions.transpose import transpose from sympy.matrices.expressions.matexpr import MatrixExpr class DotProduct(MatrixExpr): """ Dot Product of vector matrices """ def __new__(cls, arg1, arg2): arg1, arg2 = _sympify((arg1, arg2)) if not arg1.is_Matrix: raise TypeError("Argument 1 of DotProduct is not a matrix") if not arg2.is_Matrix: raise TypeError("Argument 2 of DotProduct is not a matrix") if not (1 in arg1.shape): raise TypeError("Argument 1 of DotProduct is not a vector") if not (1 in arg2.shape): raise TypeError("Argument 2 of DotProduct is not a vector") if arg1.shape != arg2.shape: raise TypeError("Input to Dot Product, %s and %s, are not of same dimensions" % (str(arg1), str(arg2))) return Basic.__new__(cls, arg1, arg2) def doit(self, expand=False): try: if self.args[0].shape[0] == 1: return (self.args[0]*transpose(self.args[1])).doit()[0] else: return (transpose(self.args[0])*self.args[1]).doit()[0] except (AttributeError, NotImplementedError): return self
from __future__ import print_function, division from sympy.core import Basic from sympy.core.sympify import _sympify from sympy.matrices.expressions.transpose import transpose from sympy.matrices.expressions.matexpr import MatrixExpr class DotProduct(MatrixExpr): """ Dot Product of vector matrices """ def __new__(cls, arg1, arg2): arg1, arg2 = _sympify((arg1, arg2)) if not arg1.is_Matrix: raise TypeError("Argument 1 of DotProduct is not a matrix" % str(arg1)) if not arg2.is_Matrix: raise TypeError("Argument 2 of DotProduct is not a matrix" % str(arg2)) if not (1 in arg1.shape): raise TypeError("Argument 1 of DotProduct is not a vector" % str(arg1)) if not (1 in arg2.shape): raise TypeError("Argument 2 of DotProduct is not a vector" % str(arg1)) if arg1.shape != arg2.shape: raise TypeError("Input to Dot Product, %s and %s, are not of same dimensions" % (str(arg1), str(arg2))) return Basic.__new__(cls, arg1, arg2) def doit(self, expand=False): try: if self.args[0].shape[0] == 1: return (self.args[0]*transpose(self.args[1])).doit()[0] else: return (transpose(self.args[0])*self.args[1]).doit()[0] except (AttributeError, NotImplementedError): return self
bsd-3-clause
Python
5e2ef9885a65d61edcdffaef9e4f8a960bef567e
Refactor CAS tests.
jgosmann/fridge,jgosmann/fridge
fridge/test/test_cas.py
fridge/test/test_cas.py
import pytest from fridge.cas import ContentAddressableStorage from fridge.fstest import ( assert_file_content_equal, assert_open_raises, write_file) from fridge.memoryfs import MemoryFS @pytest.fixture def fs(): return MemoryFS() @pytest.fixture def cas(fs): return ContentAddressableStorage('cas', fs) class TestContentAddressableStorage(object): def has_root_property(self, fs): cas = ContentAddressableStorage(root='cas_root', fs=fs) assert cas.root == 'cas_root' def test_allows_to_store_and_retrieve_files(self, fs): write_file(fs, 'testfile', u'dummy content') cas = ContentAddressableStorage('cas', fs=fs) key = cas.store('testfile') # Close and reopen del cas cas = ContentAddressableStorage('cas', fs=fs) with fs.open(cas.get_path(key), 'r') as f: content = f.read() assert content == u'dummy content' def test_file_removed_after_store(self, fs, cas): with fs.open('testfile', 'w') as f: f.write(u'dummy content') cas.store('testfile') assert not fs.exists('testfile') def test_writing_original_files_keeps_stored_file_unchanged(self, fs): write_file(fs, 'testfile', u'dummy content') cas = ContentAddressableStorage('cas', fs=fs) key = cas.store('testfile') del cas # Close write_file(fs, 'testfile', u'replaced content') cas = ContentAddressableStorage('cas', fs=fs) assert_file_content_equal(fs, cas.get_path(key), u'dummy content')
import pytest from fridge.cas import ContentAddressableStorage from fridge.memoryfs import MemoryFS class TestContentAddressableStorage(object): def create_cas(self, fs=None, path='cas'): if fs is None: fs = MemoryFS() return ContentAddressableStorage(path, fs) def has_root_property(self): cas = self.create_cas(path='cas_root') assert cas.root == 'cas_root' def test_allows_to_store_and_retrieve_files(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') key = cas.store('testfile') # Close and reopen del cas cas = self.create_cas(fs) with fs.open(cas.get_path(key), 'r') as f: content = f.read() assert content == u'dummy content' def test_file_removed_after_store(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') cas.store('testfile') assert not fs.exists('testfile') def test_writing_original_files_keeps_stored_file_unchanged(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') key = cas.store('testfile') del cas # Close with fs.open('testfile', 'w') as f: f.write(u'replaced content') cas = self.create_cas(fs) with fs.open(cas.get_path(key), 'r') as f: content = f.read() assert content == u'dummy content' def test_stores_blobs_write_protected(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') key = cas.store('testfile') with pytest.raises(OSError): with fs.open(cas.get_path(key), 'w'): pass
mit
Python
167101baa4d57d22bc6a40d7ff8afd3688e23580
fix ControlText focusout bug
UmSenhorQualquer/pyforms
pyforms/gui/Controls/ControlText.py
pyforms/gui/Controls/ControlText.py
#!/usr/bin/python # -*- coding: utf-8 -*- ''' @author: Ricardo Ribeiro @credits: Ricardo Ribeiro @license: MIT @version: 0.0 @maintainer: Ricardo Ribeiro @email: [email protected] @status: Development @lastEditedBy: Carlos Mão de Ferro ([email protected]) ''' from pyforms.gui.Controls.ControlBase import ControlBase import pyforms.Utils.tools as tools from PyQt4 import uic class ControlText(ControlBase): def initForm(self): control_path = tools.getFileInSameDirectory(__file__, "textInput.ui") self._form = uic.loadUi(control_path) self.form.label.setText(self._label) self.form.lineEdit.setText(self._value) self.form.setToolTip(self.help) super(ControlText, self).initForm() self.form.lineEdit.editingFinished.connect(self.finishEditing) def finishEditing(self): """Function called when the lineEdit widget is edited""" self.changed() ################################################################### ############ Properties ########################################### ################################################################### @property def value(self): self._value = str(self._form.lineEdit.text()) return self._value @value.setter def value(self, value): self._form.lineEdit.setText(value) ControlBase.value.fset(self, value) @property def label(self): return self.form.label.text() @label.setter def label(self, value): self.form.label.setText(value) ControlBase.label.fset(self, value)
#!/usr/bin/python # -*- coding: utf-8 -*- ''' @author: Ricardo Ribeiro @credits: Ricardo Ribeiro @license: MIT @version: 0.0 @maintainer: Ricardo Ribeiro @email: [email protected] @status: Development @lastEditedBy: Carlos Mão de Ferro ([email protected]) ''' from pyforms.gui.Controls.ControlBase import ControlBase import pyforms.Utils.tools as tools from PyQt4 import uic class ControlText(ControlBase): def initForm(self): control_path = tools.getFileInSameDirectory(__file__, "textInput.ui") self._form = uic.loadUi(control_path) self.form.label.setText(self._label) self.form.lineEdit.setText(self._value) self.form.setToolTip(self.help) super(ControlText, self).initForm() self.form.lineEdit.editingFinished.connect(self.finishEditing) def finishEditing(self): """Function called when the lineEdit widget is edited""" self.changed() self.form.lineEdit.focusNextChild() ################################################################### ############ Properties ########################################### ################################################################### @property def value(self): self._value = str(self._form.lineEdit.text()) return self._value @value.setter def value(self, value): self._form.lineEdit.setText(value) ControlBase.value.fset(self, value) @property def label(self): return self.form.label.text() @label.setter def label(self, value): self.form.label.setText(value) ControlBase.label.fset(self, value)
mit
Python
22cf663731bc556ef625695ab3213e87432ed4f9
fix docs link
angr/pyvex,angr/pyvex
pyvex/__init__.py
pyvex/__init__.py
""" PyVEX provides an interface that translates binary code into the VEX intermediate represenation (IR). For an introduction to VEX, take a look here: https://docs.angr.io/advanced-topics/ir """ __version__ = (8, 19, 4, 5) if bytes is str: raise Exception("This module is designed for python 3 only. Please install an older version to use python 2.") import os import sys import cffi import pkg_resources from .vex_ffi import ffi_str as _ffi_str ffi = cffi.FFI() import logging logging.getLogger("pyvex").addHandler(logging.NullHandler()) def _find_c_lib(): # Load the c library for calling into VEX if sys.platform in ('win32', 'cygwin'): library_file = 'pyvex.dll' elif sys.platform == 'darwin': library_file = "libpyvex.dylib" else: library_file = "libpyvex.so" pyvex_path = pkg_resources.resource_filename(__name__, os.path.join('lib', library_file)) ffi.cdef(_ffi_str) # RTLD_GLOBAL used for sim_unicorn.so lib = ffi.dlopen(pyvex_path) lib.vex_init() # this looks up all the definitions (wtf) dir(lib) return lib pvc = _find_c_lib() # pylint: disable=wildcard-import from .enums import * from . import stmt, expr, const from .block import IRSB, IRTypeEnv from .expr import get_op_retty from .const import tag_to_const_class, get_type_size, get_type_spec_size from .lifting import lift, lifters from .errors import PyVEXError # aliases.... IRStmt = stmt IRExpr = expr IRConst = const
""" PyVEX provides an interface that translates binary code into the VEX intermediate represenation (IR). For an introduction to VEX, take a look here: https://docs.angr.io/docs/ir.html """ __version__ = (8, 19, 4, 5) if bytes is str: raise Exception("This module is designed for python 3 only. Please install an older version to use python 2.") import os import sys import cffi import pkg_resources from .vex_ffi import ffi_str as _ffi_str ffi = cffi.FFI() import logging logging.getLogger("pyvex").addHandler(logging.NullHandler()) def _find_c_lib(): # Load the c library for calling into VEX if sys.platform in ('win32', 'cygwin'): library_file = 'pyvex.dll' elif sys.platform == 'darwin': library_file = "libpyvex.dylib" else: library_file = "libpyvex.so" pyvex_path = pkg_resources.resource_filename(__name__, os.path.join('lib', library_file)) ffi.cdef(_ffi_str) # RTLD_GLOBAL used for sim_unicorn.so lib = ffi.dlopen(pyvex_path) lib.vex_init() # this looks up all the definitions (wtf) dir(lib) return lib pvc = _find_c_lib() # pylint: disable=wildcard-import from .enums import * from . import stmt, expr, const from .block import IRSB, IRTypeEnv from .expr import get_op_retty from .const import tag_to_const_class, get_type_size, get_type_spec_size from .lifting import lift, lifters from .errors import PyVEXError # aliases.... IRStmt = stmt IRExpr = expr IRConst = const
bsd-2-clause
Python
73b66a32763b7efe36612db7f3a3b4566d8e44a2
set uid=197610(OIdiot) gid=197610 groups=197610 as primary_key instead of
OIdiotLin/ClassManager-backends
app/models.py
app/models.py
from django.db import models # Create your models here. class Person(models.Model): id = models.AutoField(verbose_name = '索引', primary_key = True, unique = True) student_number = models.CharField(verbose_name = '学号', max_length = 12, unique = True) name = models.CharField(verbose_name = '姓名', max_length = 10) pinyin = models.CharField(verbose_name = '拼音', max_length = 25) gender = models.CharField(verbose_name = '性别', choices = (('F', 'Female'), ('M', 'Male')), max_length = 2) native_province = models.CharField(verbose_name = '籍贯', max_length = 10, blank = True) dormitory = models.CharField(verbose_name = '寝室', blank = True, max_length = 7) birthday = models.DateField(verbose_name = '生日', blank = True) phone_number = models.CharField(verbose_name = '手机号码', max_length = 11, blank = True) position = models.CharField(verbose_name = '职务', max_length = 20, blank = True) participation = models.PositiveSmallIntegerField(verbose_name = '活动参与分', default = 0) def __unicode__(self): return self.name def __str__(self): return self.name class Activity(models.Model): id = models.AutoField(verbose_name = '索引', primary_key = True, unique = True) name = models.CharField(verbose_name = '活动名称', max_length = 15) date = models.DateField(verbose_name = '日期', blank = True) time = models.TimeField(verbose_name = '开始时间', blank = True) place = models.CharField(verbose_name = '地点', max_length = 15, blank = True) content = models.TextField(verbose_name = '内容', blank = True) participation = models.SmallIntegerField(verbose_name = '参与得分', default = 0) participator = models.TextField(verbose_name = '参与者学号', blank = True) images = models.TextField(verbose_name = '相关图片urls', blank = True) def __unicode__(self): return self.name def __str__(self): return self.name
from django.db import models # Create your models here. class Person(models.Model): student_number = models.CharField(verbose_name = '学号', max_length = 12, unique = True, primary_key = True) name = models.CharField(verbose_name = '姓名', max_length = 10) pinyin = models.CharField(verbose_name = '拼音', max_length = 25) gender = models.CharField(verbose_name = '性别', choices = (('F', 'Female'), ('M', 'Male')), max_length = 2) native_province = models.CharField(verbose_name = '籍贯', max_length = 10, blank = True) dormitory = models.CharField(verbose_name = '寝室', blank = True, max_length = 7) birthday = models.DateField(verbose_name = '生日', blank = True) phone_number = models.CharField(verbose_name = '手机号码', max_length = 11, blank = True) position = models.CharField(verbose_name = '职务', max_length = 20, blank = True) participation = models.PositiveSmallIntegerField(verbose_name = '活动参与分', default = 0) def __unicode__(self): return self.name def __str__(self): return self.name class Activity(models.Model): id = models.AutoField(verbose_name = '索引', primary_key = True, unique = True) name = models.CharField(verbose_name = '活动名称', max_length = 15) date = models.DateField(verbose_name = '日期', blank = True) time = models.TimeField(verbose_name = '开始时间', blank = True) place = models.CharField(verbose_name = '地点', max_length = 15, blank = True) content = models.TextField(verbose_name = '内容', blank = True) participation = models.SmallIntegerField(verbose_name = '参与得分', default = 0) participator = models.TextField(verbose_name = '参与者学号', blank = True) images = models.TextField(verbose_name = '相关图片urls', blank = True) def __unicode__(self): return self.name def __str__(self): return self.name
mit
Python
c908db488f3e1d7aab0993780b38baaf4c995eb1
add docstrings
googlefonts/fontelemetry,googlefonts/fontelemetry
Lib/fontelemetry/datastructures/source.py
Lib/fontelemetry/datastructures/source.py
# Copyright 2019 Fontelemetry Authors and Contributors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Library version from fontelemetry import __version__ # ----------------------- # Base classes # ----------------------- class Source(object): """A source specification specific object that holds source data. The Source object is instantiated with an external library object that is instantiated on source read and used to manipulate source file data along with object attributes that maintain the original source file path and define a retrievable calling code defined unique ID field. Attributes: obj: (instance-specific) A source file object that is instantiated with an external library path: (string) source file or directory path id: (string) unique ID for an instantiated Source object For glyphs source, the object is a glyphsLib.GSFont object. For UFO source, the object is a fontTools.ufoLib.glifLib.GlyphSet object """ def __init__(self, source_object, path=None, source_id=None): """Inits Source object with source file read data from external libraries. Args: source_object: (instance-specific) A source file object that is instantiated with an external library path: (string) path to file or directory used to instantiate source_object source_id: (string) unique ID value for this object """ self.obj = source_object self.path = path self.id = source_id def __repr__(self): return "({} v{} is defined as: {})".format(self.__class__, __version__, self.__dict__) def __str__(self): return "{}".format(self.__dict__) def get_source_path(self): """Returns source path attribute string.""" return self.path def yield_ordered_glyphobj(self): """Generator that yields ordered external library glyph-level objects""" raise NotImplementedError # ------------------------------------ # Inherited classes # ------------------------------------ class GlyphsSource(Source): """See base class.""" def __init__(self, source_object, path=None, source_id=None): Source.__init__(self, source_object, path=path, source_id=source_id) def yield_ordered_glyphobj(self): for glyph in self.obj.glyphs: yield glyph class UFOSource(Source): """See base class.""" def __init__(self, source_object, path=None, source_id=None): Source.__init__(self, source_object, path=path, source_id=source_id) def yield_ordered_glyphobj(self): # TODO pass
# Copyright 2019 Fontelemetry Authors and Contributors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Library version from fontelemetry import __version__ # ----------------------- # Base classes # ----------------------- class Source(object): def __init__(self, source_object, path=None, source_id=None): self.obj = source_object self.path = path self.id = source_id def __repr__(self): return "({} v{} is defined as: {})".format(self.__class__, __version__, self.__dict__) def __str__(self): return "{}".format(self.__dict__) def get_source_path(self): return self.path def yield_ordered_glyphobj(self): raise NotImplementedError # ------------------------------------ # Inherited classes # ------------------------------------ class GlyphsSource(Source): def __init__(self, source_object, path=None, source_id=None): Source.__init__(self, source_object, path=path, source_id=source_id) def yield_ordered_glyphobj(self): for glyph in self.obj.glyphs: yield glyph class UFOSource(Source): def __init__(self, source_object, path=None, source_id=None): Source.__init__(self, source_object, path=path, source_id=source_id) def yield_ordered_glyphobj(self): # TODO pass
apache-2.0
Python
0e913b3fc20e69a6ff77bafcc144e00175f8ed83
Put new classes to submodule level import
bgyori/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,bgyori/indra,sorgerlab/belpy,johnbachman/belpy,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,johnbachman/indra,johnbachman/indra,johnbachman/belpy,johnbachman/indra,sorgerlab/indra
indra/assemblers/english/__init__.py
indra/assemblers/english/__init__.py
from .assembler import EnglishAssembler, AgentWithCoordinates, SentenceBuilder
from .assembler import EnglishAssembler
bsd-2-clause
Python
f6f3073322684beaf49b6fd88f766502b98ee889
reduce unused imports
PookMook/Scripts,PookMook/Scripts
bibparse.py
bibparse.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os, sys, bibtexparser from bibtexparser.bwriter import BibTexWriter folder=sys.argv[1] if len(sys.argv) > 1 else "bib" if os.path.exists(folder+'-clean'): print 'cleaning '+folder+'-clean/' for file in os.listdir(folder+'-clean'): try: if os.path.isfile(folder+'-clean/'+file): os.unlink(folder+'-clean/'+file) except Exception as e: print(e) else: os.makedirs(folder+'-clean') #Writer customization writer = BibTexWriter() writer.contents = ['entries'] writer.indent = ' ' writer.order_entries_by = ('ENTRYTYPE', 'author', 'year') print 'Parsing files in '+folder+'/' for file in os.listdir(folder): if file.endswith(".bib"): print(os.path.join(folder, file)) with open(os.path.join(folder, file)) as bibtex_file: bib_database = bibtexparser.load(bibtex_file) for entry in bib_database.entries: entry['keywords'] = entry.get('keywords', '') if(entry['keywords'] != ''): entry['keywords'] = 'cleBib/' + entry['ID'] + ', article/' + os.path.splitext(file)[0] + ', ' + entry['keywords'] else: entry['keywords'] = 'cleBib/' + entry['ID'] + ', article/' + os.path.splitext(file)[0] with open(os.path.join(folder+'-clean', file), 'w') as bibtex_export: bibtex_export_str = bibtexparser.dumps(bib_database, writer) bibtex_export.write(bibtex_export_str.encode('utf8'))
#!/usr/bin/env python # -*- coding: utf-8 -*- import os, sys, bibtexparser from bibtexparser.bparser import BibTexParser from bibtexparser.bwriter import BibTexWriter folder=sys.argv[1] if len(sys.argv) > 1 else "bib" if os.path.exists(folder+'-clean'): print 'cleaning '+folder+'-clean/' for file in os.listdir(folder+'-clean'): try: if os.path.isfile(folder+'-clean/'+file): os.unlink(folder+'-clean/'+file) except Exception as e: print(e) else: os.makedirs(folder+'-clean') #Writer customization writer = BibTexWriter() writer.contents = ['entries'] writer.indent = ' ' writer.order_entries_by = ('ENTRYTYPE', 'author', 'year') print 'Parsing files in '+folder+'/' for file in os.listdir(folder): if file.endswith(".bib"): print(os.path.join(folder, file)) with open(os.path.join(folder, file)) as bibtex_file: bib_database = bibtexparser.load(bibtex_file) for entry in bib_database.entries: entry['keywords'] = entry.get('keywords', '') if(entry['keywords'] != ''): entry['keywords'] = 'cleBib/' + entry['ID'] + ', article/' + os.path.splitext(file)[0] + ', ' + entry['keywords'] else: entry['keywords'] = 'cleBib/' + entry['ID'] + ', article/' + os.path.splitext(file)[0] with open(os.path.join(folder+'-clean', file), 'w') as bibtex_export: bibtex_export_str = bibtexparser.dumps(bib_database, writer) bibtex_export.write(bibtex_export_str.encode('utf8'))
mit
Python
36608c6bd0035e4a78da2cd30d9fcca2c660ec3a
Add prepare in rpc client
redhat-cip/numeter,enovance/numeter,enovance/numeter,redhat-cip/numeter,redhat-cip/numeter,redhat-cip/numeter,enovance/numeter,enovance/numeter
common/numeter/queue/client.py
common/numeter/queue/client.py
from oslo import messaging from oslo.config import cfg import logging LOG = logging.getLogger(__name__) class BaseAPIClient(messaging.RPCClient): def __init__(self, transport): target = messaging.Target(topic='default_topic') super(BaseAPIClient, self).__init__(transport, target) def ping(self, context, topic, args=None): print 'Launch ping topic=%s' % topic cctxt = self.prepare(topic=topic) #return cctxt.call(context,'ping', args=args) return cctxt.cast(context,'ping', args=args) def poller_msg(self, context, topic, args=None): LOG.info('Send message %s context %s' % (topic, context)) cctxt = self.prepare(topic=topic) return cctxt.cast(context,'poller_msg', args=args) def get_rpc_client(hosts=[]): conf = cfg.CONF conf.transport_url = 'rabbit://' conf.rabbit_max_retries = 1 conf.rabbit_hosts = hosts transport = messaging.get_transport(conf) return BaseAPIClient(transport)
from oslo import messaging from oslo.config import cfg import logging LOG = logging.getLogger(__name__) class BaseAPIClient(messaging.RPCClient): def __init__(self, transport): target = messaging.Target(topic='default_topic') super(BaseAPIClient, self).__init__(transport, target) def ping(self, context, topic, args=None): print 'Launch ping topic=%s' % topic cctxt = self.prepare(topic=topic) #return cctxt.call(context,'ping', args=args) return cctxt.cast(context,'ping', args=args) def poller_msg(self, context, topic, args=None): LOG.info('Send message %s context %s' % (topic, context)) args['topic'] = topic return self.cast(context,'poller_msg', args=args) def get_rpc_client(hosts=[]): conf = cfg.CONF conf.transport_url = 'rabbit://' conf.rabbit_max_retries = 1 conf.rabbit_hosts = hosts transport = messaging.get_transport(conf) return BaseAPIClient(transport)
agpl-3.0
Python
34a811429e2025f396f8997aeb628253487537fb
Change Sparser call pattern along with actual exec
pvtodorov/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,johnbachman/indra,pvtodorov/indra,sorgerlab/belpy,johnbachman/belpy,sorgerlab/indra,sorgerlab/belpy,johnbachman/indra,bgyori/indra,johnbachman/indra,bgyori/indra,johnbachman/belpy,pvtodorov/indra,sorgerlab/indra
indra/sources/sparser/sparser_api.py
indra/sources/sparser/sparser_api.py
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import os import json import logging import subprocess import xml.etree.ElementTree as ET from indra.util import UnicodeXMLTreeBuilder as UTB from .processor import SparserXMLProcessor, SparserJSONProcessor logger = logging.getLogger('sparser') sparser_path_var = 'SPARSERPATH' sparser_path = os.environ.get(sparser_path_var) def process_xml(xml_str): try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp def process_nxml(fname, output_format='json'): if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_format == 'xml': format_flag = '-x' suffix = '.xml' elif output_format == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_format) sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh') subprocess.call([sparser_exec_path, format_flag, fname]) output_fname = fname.split('.')[0] + '-semantics' + suffix with open(output_fname, 'rt') as fh: json_dict = json.load(fh) return process_json_dict(json_dict) def process_json_dict(json_dict): sp = SparserJSONProcessor(json_dict) sp.get_statements() return sp def _process_elementtree(tree): sp = SparserXMLProcessor(tree) sp.get_modifications() sp.get_activations() return sp
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import os import logging import subprocess import xml.etree.ElementTree as ET from indra.util import UnicodeXMLTreeBuilder as UTB from .processor import SparserProcessor logger = logging.getLogger('sparser') sparser_path_var = 'SPARSERPATH' sparser_path = os.environ.get(sparser_path_var) def process_xml(xml_str): try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp def process_nxml(fname, output_format='json'): if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_format == 'xml': format_flag = '-x' suffix = '.xml' elif output_format == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_format) subprocess.call([sparser_path, format_flag, fname]) output_fname = fname.split('.')[0] + '-semantics' + suffix with open(output_fname, 'rb') as fh: json_dict = json.load(fh) return process_json_dict(json_dict) def process_json_dict(json_dict): sp = SparserJSONProcessor(json_dict) sp.get_statements() return sp def _process_elementtree(tree): sp = SparserXMLProcessor(tree) sp.get_modifications() sp.get_activations() return sp
bsd-2-clause
Python
a9365aa4a32fbe358a6f74b5730a7a3a0a8b3cda
Convert journal to pickled extra
pianohacker/qualia
qualia/journal.py
qualia/journal.py
import base64 import datetime import pickle import sqlite3 class Journal: def __init__(self, filename): self.db = sqlite3.connect( filename, detect_types = sqlite3.PARSE_DECLTYPES ) self.upgrade_if_needed() self.f = open(filename, 'ab') def upgrade_if_needed(self): version = self.db.execute('PRAGMA user_version').fetchone()[0] if version < 1: self.db.executescript(""" CREATE TABLE journal ( serial INTEGER PRIMARY KEY, timestamp TIMESTAMP, source TEXT, file TEXT, op TEXT, extra BLOB ); """) self.db.execute("PRAGMA user_version = 1") def append(self, source, file, op, *args, time = None): cur = self.db.cursor() cur.execute(''' INSERT INTO journal(timestamp, source, file, op, extra) VALUES(?, ?, ?, ?, ?) ''', (time or datetime.datetime.now(), source, file, op, pickle.dumps(args))) self.db.commit()
import datetime import sqlite3 class Journal: def __init__(self, filename): self.db = sqlite3.connect( filename, detect_types = sqlite3.PARSE_DECLTYPES ) self.upgrade_if_needed() self.f = open(filename, 'ab') def upgrade_if_needed(self): version = self.db.execute('PRAGMA user_version').fetchone()[0] if version < 1: self.db.executescript(""" CREATE TABLE journal ( serial INTEGER PRIMARY KEY, timestamp TIMESTAMP, source TEXT, file TEXT, op TEXT, extra BLOB ); """) self.db.execute("PRAGMA user_version = 1") def append(self, source, file, op, *args, time = None): cur = self.db.cursor() cur.execute(''' INSERT INTO journal(timestamp, source, file, op, extra) VALUES(?, ?, ?, ?, ?) ''', (time or datetime.datetime.now(), source, file, op, (b'\t'.join(str(arg).encode('unicode-escape') for arg in args)))) self.db.commit()
mpl-2.0
Python
ce6e67890b5860d89e9c3ea6628a7a94ad9e10b3
Update Default_Settings.py
jossthomas/Enigma-Machine
components/Default_Settings.py
components/Default_Settings.py
#Sequences of actual rotors used in WWII, format is name, sequences, turnover notch(es) rotor_sequences = { 'I': ('EKMFLGDQVZNTOWYHXUSPAIBRCJ', ('Q')), 'II': ('AJDKSIRUXBLHWTMCQGZNPYFVOE', ('E')), 'III': ('BDFHJLCPRTXVZNYEIWGAKMUSQO', ('V')), 'IV': ('ESOVPZJAYQUIRHXLNFTGKDCMWB', ('J')), 'V': ('VZBRGITYUPSDNHLXAWMJQOFECK', ('Z')), 'VI': ('JPGVOUMFYQBENHZRDKASXLICTW', ('Z', 'M')), 'VII': ('NZJHGRCXMYSWBOUFAIVLPEKQDT', ('Z', 'M')), 'VIII': ('FKQHTLXOCBJSPDZRAMEWNIUYGV', ('Z', 'M')), 'IC': ('DMTWSILRUYQNKFEJCAZBPGXOHV', ('Q')), #civilian 'IIC': ('HQZGPJTMOBLNCIFDYAWVEUSRKX', ('Q')), #civilian 'IIIC': ('UQNTLSZFMREHDPXKIBVYGJCWOA', ('Q')), #civilian 'BETA': ('LEYJVCNIXWPBQMDRTAKZGFUHOS', None), #Position 4 Only 'GAMMA': ('FSOKANUERHMBTIYCWLQPZXVGJD', None) #Position 4 Only } #Simple letter substitutions before the sequence is sent back through the rotors. Notably a letter cannot be encoded as itself here. reflector_sequences = { 'A': 'EJMZALYXVBWFCRQUONTSPIKHGD', 'B': 'YRUHQSLDPXNGOKMIEBFZCWVJAT', 'C': 'FVPJIAOYEDRZXWGCTKUQSBNMHL', 'B Thin': 'ENKQAUYWJICOPBLMDXZVFTHRGS', 'C Thin': 'RDOBJNTKVEHMLFCWZAXGYIPSUQ', 'None': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' #Early models had no reflector } #Entry wheel for Enigma I ETW = { 'Standard': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'Navy': 'QWERTZUIOPASDFGHJKLYXCVBNM' }
#Sequences of actual rotors used in WWII, format is name, sequences, turnover notch(es) rotor_sequences = { 'I': ('EKMFLGDQVZNTOWYHXUSPAIBRCJ', ('Q')), 'II': ('AJDKSIRUXBLHWTMCQGZNPYFVOE', ('E')), 'III': ('BDFHJLCPRTXVZNYEIWGAKMUSQO', ('V')), 'IV': ('ESOVPZJAYQUIRHXLNFTGKDCMWB', ('J')), 'V': ('VZBRGITYUPSDNHLXAWMJQOFECK', ('Z')), 'VI': ('JPGVOUMFYQBENHZRDKASXLICTW', ('Z', 'M')), 'VII': ('NZJHGRCXMYSWBOUFAIVLPEKQDT', ('Z', 'M')), 'VIII': ('FKQHTLXOCBJSPDZRAMEWNIUYGV', ('Z', 'M')), 'IC': ('DMTWSILRUYQNKFEJCAZBPGXOHV', ('Q')), #civilian 'IIC': ('HQZGPJTMOBLNCIFDYAWVEUSRKX', ('Q')), #civilian 'IIIC': ('UQNTLSZFMREHDPXKIBVYGJCWOA', ('Q')), #civilian 'BETA': ('LEYJVCNIXWPBQMDRTAKZGFUHOS', None), #Position 4 Only 'GAMMA': ('FSOKANUERHMBTIYCWLQPZXVGJD', None) #Position 4 Only } #Simple letter substitutions before the sequence is sent back through the rotors. Notably a letter cannot be encoded as itself here. reflector_sequences = { 'A': 'EJMZALYXVBWFCRQUONTSPIKHGD', 'B': 'YRUHQSLDPXNGOKMIEBFZCWVJAT', 'C': 'FVPJIAOYEDRZXWGCTKUQSBNMHL', 'B Thin': 'ENKQAUYWJICOPBLMDXZVFTHRGS', 'C Thin': 'RDOBJNTKVEHMLFCWZAXGYIPSUQ', 'None': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' #Early models had no reflector } #Entry wheel for Enigma I ETW = { 'Standard': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'Navy': 'QWERTZIOPASDFGHJKLYXCVBNM' }
mit
Python
64bd44d4338d57a68ff07527d1d2c3b37960c63b
call parent filter, cleanup
masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
# MIT License # Copyright (c) 2019 MassChallenge, Inc. from django.db.models import Value as V from django.db.models.functions import Concat from impact.v1.views.base_list_view import BaseListView from impact.v1.helpers import ( MentorProgramOfficeHourHelper, ) ID_FIELDS = ['mentor_id', 'finalist_id'] NAME_FIELDS = ['mentor_name', 'finalist_name'] class MentorProgramOfficeHourListView(BaseListView): view_name = "office_hour" helper_class = MentorProgramOfficeHourHelper def filter(self, qs): qs = super().filter(qs) if not self.request.query_params.keys(): return qs if self._has_participant_filter(NAME_FIELDS): return self._filter_by_participant_name(qs) if self._has_participant_filter(ID_FIELDS): param_items = self.request.query_params.dict().items() return self._filter_by_participant_id(qs, param_items) def _filter_by_participant_name(self, qs): params = self.request.query_params mentor_name = params.get('mentor_name', None) finalist_name = params.get('finalist_name', None) if mentor_name: return self._generate_name_query(qs, 'mentor', mentor_name) if finalist_name: return self._generate_name_query(qs, 'finalist', finalist_name) return qs.none() def _generate_name_query(self, qs, user, name_value): first_name_field = '{}__first_name'.format(user) last_name_field = '{}__last_name'.format(user) result = qs.annotate( full_name=Concat( first_name_field, V(' '), last_name_field)).filter( full_name__icontains=name_value) return result def _filter_by_participant_id(self, qs, param_items): filter_values = { key: value for key, value in param_items if key in ID_FIELDS and value.isdigit()} if filter_values: return qs.filter(**filter_values) return qs.none() def _has_participant_filter(self, fields): return any( field in self.request.query_params.keys() for field in fields)
# MIT License # Copyright (c) 2019 MassChallenge, Inc. from django.db.models import Value as V from django.db.models.functions import Concat from impact.v1.views.base_list_view import BaseListView from impact.v1.helpers import ( MentorProgramOfficeHourHelper, ) ID_FIELDS = ['mentor_id', 'finalist_id'] NAME_FIELDS = ['mentor_name', 'finalist_name'] class MentorProgramOfficeHourListView(BaseListView): view_name = "office_hour" helper_class = MentorProgramOfficeHourHelper def filter(self, qs): if not self.request.query_params.keys(): return qs if self._has_participant_filter(NAME_FIELDS): return self._filter_by_participant_name(qs) if self._has_participant_filter(ID_FIELDS): param_items = self.request.query_params.dict().items() return self._filter_by_participant_id(qs, param_items) def _filter_by_participant_name(self, qs): params = self.request.query_params mentor_name = params.get('mentor_name', None) finalist_name = params.get('finalist_name', None) if mentor_name: return self._generate_name_query(qs, 'mentor', mentor_name) if finalist_name: return self._generate_name_query(qs, 'finalist', finalist_name) return qs.none() def _generate_name_query(self, qs, user, name_value): first_name_field = '{}__first_name'.format(user) last_name_field = '{}__last_name'.format(user) result = qs.annotate( full_name=Concat( first_name_field, V(' '), last_name_field)).filter( full_name__icontains=name_value) return result def _filter_by_participant_id(self, qs, param_items): filter_values = { key: value for key, value in param_items if key in ID_FIELDS and value.isdigit()} if filter_values: return qs.filter(**filter_values) return qs.none() def _has_participant_filter(self, fields): return any( key in self.request.query_params.keys() for key in fields)
mit
Python
abb00ac993154071776488b5dcaef32cc2982f4c
Fix broken functional tests on windows
josephharrington/ClusterRunner,box/ClusterRunner,josephharrington/ClusterRunner,nickzuber/ClusterRunner,Medium/ClusterRunner,nickzuber/ClusterRunner,box/ClusterRunner,Medium/ClusterRunner
test/functional/master/test_endpoints.py
test/functional/master/test_endpoints.py
import os import tempfile import yaml from test.framework.functional.base_functional_test_case import BaseFunctionalTestCase from test.functional.job_configs import BASIC_JOB class TestMasterEndpoints(BaseFunctionalTestCase): def setUp(self): super().setUp() self._project_dir = tempfile.TemporaryDirectory() def _start_master_only_and_post_a_new_job(self): master = self.cluster.start_master() build_resp = master.post_new_build({ 'type': 'directory', 'config': yaml.safe_load(BASIC_JOB.config[os.name])['BasicJob'], 'project_directory': self._project_dir.name, }) build_id = build_resp['build_id'] return master, build_id def test_cancel_build(self): master, build_id = self._start_master_only_and_post_a_new_job() master.cancel_build(build_id) master.block_until_build_finished(build_id) self.assert_build_has_canceled_status(build_id=build_id) def test_get_artifact_before_it_is_ready(self): master, build_id = self._start_master_only_and_post_a_new_job() # Since we didn't start any slaves so the artifacts is actually not ready. _, status_code = master.get_build_artifacts(build_id) self.assertEqual(status_code, 202) # Cancel the started build just to speed up teardown (avoid teardown timeout waiting for empty queue) master.cancel_build(build_id)
import os import yaml from test.framework.functional.base_functional_test_case import BaseFunctionalTestCase from test.functional.job_configs import BASIC_JOB class TestMasterEndpoints(BaseFunctionalTestCase): def _start_master_only_and_post_a_new_job(self): master = self.cluster.start_master() build_resp = master.post_new_build({ 'type': 'directory', 'config': yaml.safe_load(BASIC_JOB.config[os.name])['BasicJob'], 'project_directory': '/tmp', }) build_id = build_resp['build_id'] return master, build_id def test_cancel_build(self): master, build_id = self._start_master_only_and_post_a_new_job() master.cancel_build(build_id) master.block_until_build_finished(build_id) self.assert_build_has_canceled_status(build_id=build_id) def test_get_artifact_before_it_is_ready(self): master, build_id = self._start_master_only_and_post_a_new_job() # Since we didn't start any slaves so the artifacts is actually not ready. _, status_code = master.get_build_artifacts(build_id) self.assertEqual(status_code, 202) # Cancel the started build just to speed up teardown (avoid teardown timeout waiting for empty queue) master.cancel_build(build_id)
apache-2.0
Python
8d4c7c94dba6708758732d74228e1337bd9f0b83
raise version number
trichter/yam
yam/__init__.py
yam/__init__.py
__version__ = '0.2.2-dev' from yam.main import run from yam.commands import read_dicts
__version__ = '0.2.1' from yam.main import run from yam.commands import read_dicts
mit
Python
71d0f02e1274829a302cdd6f716f2fc0680cce49
Update fab.py
ArabellaTech/ydcommon,ArabellaTech/ydcommon,ArabellaTech/ydcommon
ydcommon/fab.py
ydcommon/fab.py
from fabric.api import local, sudo, run from fabric.operations import prompt from fabric.colors import red from fabric.contrib.console import confirm def get_branch_name(on_local=True): cmd = "git branch --no-color 2> /dev/null | sed -e '/^[^*]/d'" if on_local: name = local(cmd, capture=True).replace("* ", "") else: name = run(cmd) return name.replace("* ", "").strip() def switch(stage): """ Switch to given stage (dev/qa/production) + pull """ stage = stage.lower() local("git pull") if stage in ['dev', 'devel', 'develop']: branch_name = 'develop' elif stage in ['qa', 'release']: branches = local('git branch -r', capture=True) possible_branches = [] for b in branches.split("\n"): b_parts = b.split('/') if b_parts[1] == 'release': possible_branches.append(b_parts[2]) if len(possible_branches) == 0: raise Exception('No release branches found. Please create a new release first.') possible_branches = sorted(possible_branches, reverse=True) branch_name = 'release/%s' % possible_branches[0] elif stage in ['production', 'master']: branch_name = 'master' else: raise NotImplemented local("git checkout %s" % branch_name) local("git pull") def release_qa(): """ Release code to QA server """ name = prompt(red('Sprint name?'), default='Sprint 1').lower().replace(' ', "_") date = prompt(red('Sprint start date (Y-m-d)?'), default='2013-01-20').replace('-', '') release_name = '%s_%s' % (date, name) local('git flow release start %s' % release_name) local('git flow release publish %s' % release_name) print red('PLEASE DEPLOY CODE: fab deploy:all') def update_qa(): """ Merge code from develop to qa """ switch('dev') switch('qa') local('git merge --no-edit develop') local('git push') print red('PLEASE DEPLOY CODE: fab deploy:all') def check_branch(environment, user): if environment == 'qa': local_branch = get_branch_name() remote_branch = get_branch_name(False) if local_branch != remote_branch: change = confirm(red('Branch on server is different, do you want to checkout %s ?' % local_branch), default=True) if change: sudo('git checkout %s' % local_branch, user=user)
from fabric.api import local, sudo, run from fabric.operations import prompt from fabric.colors import red from fabric.contrib.console import confirm def get_branch_name(on_local=True): cmd = "git branch --no-color 2> /dev/null | sed -e '/^[^*]/d'" if on_local: name = local(cmd, capture=True).replace("* ", "") else: name = run(cmd) return name.replace("* ", "").strip() def switch(stage): """ Switch to given stage (dev/qa/production) + pull """ stage = stage.lower() local("git pull") if stage in ['dev', 'devel', 'develop']: branch_name = 'develop' elif stage in ['qa', 'release']: branches = local('git branch -r', capture=True) possible_branches = [] for b in branches.split("\n"): b_parts = b.split('/') if b_parts[1] == 'release': possible_branches.append(b_parts[2]) if len(possible_branches) == 0: raise Exception('No release branches found. Please create a new release first.') possible_branches = sorted(possible_branches, reverse=True) branch_name = 'release/%s' % possible_branches[0] elif stage in ['production', 'master']: branch_name = 'master' else: raise NotImplemented local("git checkout %s" % branch_name) local("git pull") def release_qa(): """ Release code to QA server """ name = prompt(red('Sprint name?'), default='Sprint 1').lower().replace(' ', "_") date = prompt(red('Sprint start date (Y-m-d)?'), default='2013-01-20').replace('-', '') release_name = '%s_%s' % (date, name) local('git flow release start %s' % release_name) local('git flow release publish %s' % release_name) print red('PLEASE DEPLOY CODE: fab deploy:all') def update_qa(): """ Merge code from develop to qa """ switch('dev') switch('qa') local('git merge --no-edit develop') local('git push') print red('PLEASE DEPLOY CODE: fab deploy:all') def check_branch(environment, user): if environment == 'qa': local_branch = get_branch_name() remote_branch = get_branch_name(False) if local_branch != remote_branch: change = confirm(red('Branch on server is different, do you want to replace your local branch with server version?'), default=True) if change: sudo('git checkout %s' % local_branch, user=user)
mit
Python
95aa4c210c735bd9ac74a65cdbef418d99beb319
Bump to v0.2.0
gisce/sii
sii/__init__.py
sii/__init__.py
# -*- coding: utf-8 -*- __LIBRARY_VERSION__ = '0.2.0' __SII_VERSION__ = '0.7'
# -*- coding: utf-8 -*- __LIBRARY_VERSION__ = '0.1.0alpha' __SII_VERSION__ = '0.7'
mit
Python
510e04dfd68eeca2e940487eeca9e7474e7f2383
Fix methodcheck.py for the new API documentation style (split into subsections)
tjfontaine/linode-python,ryanshawty/linode-python
linode/methodcheck.py
linode/methodcheck.py
#!/usr/bin/python """ A quick script to verify that api.py is in sync with Linode's published list of methods. Copyright (c) 2010 Josh Wright <[email protected]> Copyright (c) 2009 Ryan Tucker <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #The list of subsections found in the API documentation. This should #probably be discovered automatically in the future api_subsections = ('linode', 'nodebalancer', 'stackscript', 'dns', 'utility') import api import re import itertools from HTMLParser import HTMLParser from urllib import unquote from urllib2 import urlopen class SubsectionParser(HTMLParser): base_url = 'http://www.linode.com/api/' def __init__(self, subsection): HTMLParser.__init__(self) self.subsection_re = re.compile('/api/%s/(.*)$' % subsection) self.methods = [] url = self.base_url + subsection req = urlopen(url) self.feed(req.read()) def handle_starttag(self, tag, attrs): if tag == 'a' and attrs: attr_dict = dict(attrs) match = self.subsection_re.match(attr_dict.get('href', '')) if match: self.methods.append(unquote(match.group(1)).replace('.','_')) local_methods = api.Api.valid_commands() remote_methods = list(itertools.chain(*[SubsectionParser(subsection).methods for subsection in api_subsections])) # Cross-check! for i in local_methods: if i not in remote_methods: print('REMOTE Missing: ' + i) for i in remote_methods: if i not in local_methods: print('LOCAL Missing: ' + i)
#!/usr/bin/python """ A quick script to verify that api.py is in sync with Linode's published list of methods. Copyright (c) 2010 Josh Wright <[email protected]> Copyright (c) 2009 Ryan Tucker <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # URL of API documentation apidocurl = 'http://www.linode.com/api/autodoc.cfm' import api import re import urllib tmpfile, httpheaders = urllib.urlretrieve(apidocurl) tmpfd = open(tmpfile) local_methods = api.Api.valid_commands() remote_methods = [] # Read in the list of methods Linode has rg = re.compile('.*?\\?method=((?:[a-z][a-z\\.\\d\\-]+)\\.(?:[a-z][a-z\\-]+))(?![\\w\\.])') for i in tmpfd.readlines(): m = rg.search(i) if m: remote_methods.append(m.group(1).replace('.','_')) # Cross-check! for i in local_methods: if i not in remote_methods: print('REMOTE Missing: ' + i) for i in remote_methods: if i not in local_methods: print('LOCAL Missing: ' + i)
mit
Python
e0521c1f9a12819fd89f12aed01c623628dc4c4d
Build options added.
JDevlieghere/InTeXration,JDevlieghere/InTeXration
intexration/propertyhandler.py
intexration/propertyhandler.py
import configparser import os class Build: def __init__(self, name, idx, bib): self._name = name self._idx = idx self._bib = bib def get_name(self): return self._name def get_idx(self): return self._idx def get_bib(self): return self._bib def get_tex(self): return self._name + '.tex' def get_pdf(self): return self._name + '.pdf' def get_log(self): return self._name + '.log' class PropertyHandler: def __init__(self, path): self._path = path def get_builds(self): builds = [] if os.path.exists(self._path): parser = configparser.ConfigParser() parser.read(self._path) for build_name in parser.sections(): if parser.has_option(build_name, 'idx'): idx = parser[build_name]['idx'] else: idx = build_name + '.idx' if parser.has_option(build_name, 'bib'): bib = parser[build_name]['bib'] else: bib = build_name builds.append(Build(build_name, idx, bib)) return builds
import configparser import os class Build: def __init__(self, name, idx, bib): self._name = name self._idx = idx self._bib = bib def get_name(self): return self._name def get_idx(self): return self._idx def get_bib(self): return self._bib def get_tex(self): return self._name + '.tex' def get_pdf(self): return self._name + '.pdf' def get_log(self): return self._name + '.log' class PropertyHandler: def __init__(self, path): self._path = path def get_builds(self): builds = [] if os.path.exists(self._path): parser = configparser.ConfigParser() parser.read(self._path) for build_name in parser.sections(): idx = build_name + '.idx' bib = build_name builds.append(Build(build_name, idx, bib)) return builds
apache-2.0
Python
f27dc9d2793bb555d80a5c8e6635ba246278d017
Add DES support
boppreh/simplecrypto
simplecrypto.py
simplecrypto.py
import hashlib import math from base64 import b64encode, b64decode from Crypto.Cipher import DES, AES from Crypto import Random random_instance = Random.new() algorithms = {'aes': AES, 'des': DES} def sha1(message): return hashlib.sha1(message).hexdigest() def md5(message): return hashlib.md5(message).hexdigest() def sha256(message): return hashlib.sha256(message).hexdigest() def sha512(message): return hashlib.sha152(message).hexdigest() def str_to_base64(message): return b64encode(message) def base64_to_str(message): return b64decode(message) base64 = str_to_base64 def pad(message, length, padding=' '): return message + (length - len(message)) * padding def pad_multiple(message, len_multiple, padding=' '): next_length = math.ceil(len(message) / float(len_multiple)) * len_multiple return pad(message, int(next_length), padding) def random(n_bytes): return random_instance.read(n_bytes) def encrypt(message, password, algorithm='aes'): cls = algorithms[algorithm] iv = random(cls.block_size) instance = cls.new(pad_multiple(password, 16), cls.MODE_CFB, iv) return str_to_base64(iv + instance.encrypt(message)) def decrypt(message, password, algorithm='aes'): message = base64_to_str(message) iv, message = message[:AES.block_size], message[AES.block_size:] instance = AES.new(pad_multiple(password, 16), AES.MODE_CFB, iv) return instance.decrypt(message) def encrypt_aes(message, password): return encrypt(message, password, 'aes') def decrypt_aes(message, password): return decrypt(message, password, 'aes') def encrypt_des(message, password): return encrypt(message, password, 'des') def decrypt_des(message, password): return decrypt(message, password, 'des')
import hashlib import math import base64 from Crypto.Cipher import DES, AES from Crypto import Random random_instance = Random.new() algorithms = {'aes': AES, 'des': DES} def sha1(message): return hashlib.sha1(message).hexdigest() def md5(message): return hashlib.md5(message).hexdigest() def sha256(message): return hashlib.sha256(message).hexdigest() def sha512(message): return hashlib.sha152(message).hexdigest() def str_to_base64(message): return base64.b64encode(message) def base64_to_str(message): return base64.b64decode(message) def pad(message, length, padding=' '): return message + (length - len(message)) * padding def pad_multiple(message, len_multiple, padding=' '): next_length = math.ceil(len(message) / float(len_multiple)) * len_multiple return pad(message, int(next_length), padding) def random(n_bytes): return random_instance.read(n_bytes) def encrypt(message, password, algorithm='aes'): cls = algorithms[algorithm] iv = random(cls.block_size) instance = cls.new(pad_multiple(password, 16), cls.MODE_CFB, iv) return str_to_base64(iv + instance.encrypt(message)) def decrypt(message, password, algorithm='aes'): message = base64_to_str(message) iv, message = message[:AES.block_size], message[AES.block_size:] instance = AES.new(pad_multiple(password, 16), AES.MODE_CFB, iv) return instance.decrypt(message) def encrypt_aes(message, password): return encrypt(message, password, 'aes') def decrypt_aes(message, password): return decrypt(message, password, 'aes')
mit
Python
1bd814df2c5175ac7745b2d58fbe6b82c5a941ae
add 'debug' hack
jmiserez/sts,jmiserez/sts,ucb-sts/sts,ucb-sts/sts
sts/util/console.py
sts/util/console.py
BEGIN = '\033[1;' END = '\033[1;m' class color(object): GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num : BEGIN + str(num) + "m", range(30, 39)) B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: BEGIN + str(num) + "m", range(40, 49)) NORMAL = END class msg(): global_io_master = None BEGIN = '\033[1;' END = '\033[1;m' GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num: str(num) + "m", range(30, 39)) B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: str(num) + "m", range(40, 49)) @staticmethod def interactive(message): # todo: would be nice to simply give logger a color arg, but that doesn't exist... print msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def event(message): print msg.BEGIN + msg.CYAN + message + msg.END @staticmethod def raw_input(message): prompt = msg.BEGIN + msg.WHITE + message + msg.END if msg.global_io_master: s = msg.global_io_master.raw_input(prompt) else: s = raw_input(prompt) if s == "debug": import pdb pdb.set_trace() return s @staticmethod def success(message): print msg.BEGIN + msg.B_GREEN + msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def fail(message): print msg.BEGIN + msg.B_RED + msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def set_io_master(io_master): msg.global_io_master = io_master @staticmethod def unset_io_master(): msg.global_io_master = None
BEGIN = '\033[1;' END = '\033[1;m' class color(object): GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num : BEGIN + str(num) + "m", range(30, 39)) B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: BEGIN + str(num) + "m", range(40, 49)) NORMAL = END class msg(): global_io_master = None BEGIN = '\033[1;' END = '\033[1;m' GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num: str(num) + "m", range(30, 39)) B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: str(num) + "m", range(40, 49)) @staticmethod def interactive(message): # todo: would be nice to simply give logger a color arg, but that doesn't exist... print msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def event(message): print msg.BEGIN + msg.CYAN + message + msg.END @staticmethod def raw_input(message): prompt = msg.BEGIN + msg.WHITE + message + msg.END if msg.global_io_master: return msg.global_io_master.raw_input(prompt) else: return raw_input(prompt) @staticmethod def success(message): print msg.BEGIN + msg.B_GREEN + msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def fail(message): print msg.BEGIN + msg.B_RED + msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def set_io_master(io_master): msg.global_io_master = io_master @staticmethod def unset_io_master(): msg.global_io_master = None
apache-2.0
Python
4987412578744db64984cb40841994b3852287f7
update evalrunner
selentd/pythontools
pytools/src/IndexEval/evalrunner.py
pytools/src/IndexEval/evalrunner.py
''' Created on 04.11.2015 @author: selen00r ''' import datetime from pymongo.mongo_client import MongoClient import evalresult class EvalRunner(object): ''' Base class to run an evaluation of an index. ''' def __init__(self): ''' Constructor ''' self.dbName = "indexdb" self.idxATX = "atx" self.idxCAC = "cac" self.idxDax = "dax" self.idxDowJones = "dowjones" self.idxEStoxx50 = "estoxx50" self.idxFTS100 = "ftse100" self.idxFtseMib = "ftsemib" self.idxHangSeng = "hangseng" self.idxIbex = "ibex" self.idxMDax = "mdax" self.idxNasdaq100 = "nasdaq100" self.idxNikkei = "nikkei" self.idxSMI = "smi" self.idxSP500 = "sp500" self.idxTecDax = "tecdax" self.allIndices = [self.idxATX, self.idxCAC, self.idxDax, self.idxDowJones, self.idxEStoxx50, self.idxFTS100, self.idxFtseMib, self.idxHangSeng, self.idxIbex, self.idxMDax, self.idxNasdaq100, self.idxNikkei, self.idxSMI, self.idxTecDax] def setUp(self): self.mongoClient = MongoClient() self.database = self.mongoClient[self.dbName] self.startDate = datetime.datetime( 2000, 1, 1 ) self.endDate = datetime.datetime( 2015, 12, 1 ) self.startInvest = 1000.0 self.fixedInvest = True self.excludeChecker = evalresult.ExcludeTransaction() self.resultCalculator = evalresult.ResultCalculator() self.resultCalculatorEuro = evalresult.ResultCalculatorEuro(self.startInvest, self.fixedInvest) def tearDown(self): pass
''' Created on 04.11.2015 @author: selen00r ''' import datetime from pymongo.mongo_client import MongoClient import evalresult class EvalRunner(object): ''' Base class to run an evaluation of an index. ''' def __init__(self): ''' Constructor ''' self.dbName = "indexdb" self.idxDax = "dax" self.idxMDax = "mdax" self.idxTecDax = "tecdax" self.idxSP500 = "sp500" self.idxNasdaq100 = "nasdaq100" self.idxEStoxx50 = "estoxx50" self.idxNikkei = "nikkei" self.idxSMI = "smi" self.idxATX = "atx" self.idxCAC = "cac" self.idxDowJones = "dowjones" self.idxFTS100 = "fts100" self.idxFtseMib = "ftsemib" self.idxHangSeng = "hangseng" self.idxIbex = "ibex" self.allIndices = [self.idxATX, self.idxCAC, self.idxDax, self.idxDowJones, self.idxEStoxx50, self.idxFTS100, self.idxFtseMib, self.idxHangSeng, self.idxIbex, self.idxMDax, self.idxNasdaq100, self.idxNikkei, self.idxSMI, self.idxTecDax] def setUp(self): self.mongoClient = MongoClient() self.database = self.mongoClient[self.dbName] self.startDate = datetime.datetime( 2000, 1, 1 ) self.endDate = datetime.datetime( 2015, 10, 1 ) self.startInvest = 1000.0 self.fixedInvest = True self.excludeChecker = evalresult.ExcludeTransaction() self.resultCalculator = evalresult.ResultCalculator() self.resultCalculatorEuro = evalresult.ResultCalculatorEuro(self.startInvest, self.fixedInvest) def tearDown(self): pass
apache-2.0
Python
5ba73b9dd92b55b3f02f76ae981e53744abac750
Add an option to time SQL statements
jeffweeksio/sir
sir/__main__.py
sir/__main__.py
# Copyright (c) 2014 Wieland Hoffmann # License: MIT, see LICENSE for details import argparse import logging import multiprocessing from . import config from .indexing import reindex from .schema import SCHEMA from sqlalchemy import exc as sa_exc logger = logging.getLogger("sir") def watch(args): raise NotImplementedError def main(): loghandler = logging.StreamHandler() formatter = logging.Formatter(fmt="%(processName)s %(asctime)s %(levelname)s: %(message)s") loghandler.setFormatter(formatter) logger.addHandler(loghandler) mplogger = multiprocessing.get_logger() mplogger.setLevel(logging.ERROR) mplogger.addHandler(loghandler) parser = argparse.ArgumentParser() parser.add_argument("-d", "--debug", action="store_true") parser.add_argument("--sqltimings", action="store_true") subparsers = parser.add_subparsers() reindex_parser = subparsers.add_parser("reindex", help="Reindexes all or a single entity type") reindex_parser.set_defaults(func=reindex) reindex_parser.add_argument('--entities', action='append', help="""Which entity types to index. Available are: %s""" % (", ".join(SCHEMA.keys()))) watch_parser = subparsers.add_parser("watch", help="Watches for incoming messages on an AMQP queue") watch_parser.set_defaults(func=watch) args = parser.parse_args() if args.debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) if args.sqltimings: from sqlalchemy import event from sqlalchemy.engine import Engine import time sqltimelogger = logging.getLogger("sqltimer") sqltimelogger.setLevel(logging.DEBUG) sqltimelogger.addHandler(loghandler) @event.listens_for(Engine, "before_cursor_execute") def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault('query_start_time', []).append(time.time()) sqltimelogger.debug("Start Query: %s" % statement) @event.listens_for(Engine, "after_cursor_execute") def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = time.time() - conn.info['query_start_time'].pop(-1) sqltimelogger.debug("Query Complete!") sqltimelogger.debug("Total Time: %f" % total) config.read_config() func = args.func args = vars(args) func(args["entities"]) if __name__ == '__main__': main()
# Copyright (c) 2014 Wieland Hoffmann # License: MIT, see LICENSE for details import argparse import logging import multiprocessing from . import config from .indexing import reindex from .schema import SCHEMA from sqlalchemy import exc as sa_exc logger = logging.getLogger("sir") def watch(args): raise NotImplementedError def main(): loghandler = logging.StreamHandler() formatter = logging.Formatter(fmt="%(processName)s %(asctime)s %(levelname)s: %(message)s") loghandler.setFormatter(formatter) logger.addHandler(loghandler) mplogger = multiprocessing.get_logger() mplogger.setLevel(logging.ERROR) mplogger.addHandler(loghandler) parser = argparse.ArgumentParser() parser.add_argument("-d", "--debug", action="store_true") subparsers = parser.add_subparsers() reindex_parser = subparsers.add_parser("reindex", help="Reindexes all or a single entity type") reindex_parser.set_defaults(func=reindex) reindex_parser.add_argument('--entities', action='append', help="""Which entity types to index. Available are: %s""" % (", ".join(SCHEMA.keys()))) watch_parser = subparsers.add_parser("watch", help="Watches for incoming messages on an AMQP queue") watch_parser.set_defaults(func=watch) args = parser.parse_args() if args.debug: logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) config.read_config() func = args.func args = vars(args) func(args["entities"]) if __name__ == '__main__': main()
mit
Python
dde133a9ae751ce3caab8e8896c1e04e48c0cc1e
fix typo
adamrp/qiita,adamrp/qiita,RNAer/qiita,josenavas/QiiTa,biocore/qiita,ElDeveloper/qiita,adamrp/qiita,squirrelo/qiita,adamrp/qiita,antgonza/qiita,ElDeveloper/qiita,RNAer/qiita,antgonza/qiita,josenavas/QiiTa,josenavas/QiiTa,biocore/qiita,RNAer/qiita,josenavas/QiiTa,squirrelo/qiita,squirrelo/qiita,wasade/qiita,biocore/qiita,RNAer/qiita,antgonza/qiita,wasade/qiita,ElDeveloper/qiita,ElDeveloper/qiita,antgonza/qiita,biocore/qiita,wasade/qiita,squirrelo/qiita
qiita_pet/handlers/base_handlers.py
qiita_pet/handlers/base_handlers.py
from tornado.web import RequestHandler class BaseHandler(RequestHandler): def get_current_user(self): '''Overrides default method of returning user curently connected''' user = self.get_secure_cookie("user") if user is None: self.clear_cookie("user") return None else: return user.strip('" ') def write_error(self, status_code, **kwargs): '''Overrides the error page created by Tornado''' from traceback import format_exception if self.settings.get("debug") and "exc_info" in kwargs: exc_info = kwargs["exc_info"] trace_info = ''.join(["%s<br />" % line for line in format_exception(*exc_info)]) request_info = ''.join(["<strong>%s</strong>: %s<br />" % (k, self.request.__dict__[k]) for k in self.request.__dict__.keys()]) error = exc_info[1] self.render('error.html', error=error, trace_info=trace_info, request_info=request_info, user=self.current_user) def head(self): """Adds proper response for head requests""" self.finish() class MainHandler(BaseHandler): '''Index page''' def get(self): username = self.current_user completedanalyses = [] self.render("index.html", user=username, analyses=completedanalyses) class MockupHandler(BaseHandler): def get(self): self.render("mockup.html", user=self.current_user) class NoPageHandler(BaseHandler): def get(self): self.render("404.html", user=self.current_user)
from tornado.web import RequestHandler class BaseHandler(RequestHandler): def get_current_user(self): '''Overrides default method of returning user curently connected''' user = self.get_secure_cookie("user") if user is None: self.clear_cookie("user") return None else: return user.strip('" ') def write_error(self, status_code, **kwargs): '''Overrides the error page created by Tornado''' from traceback import format_exception if self.settings.get("debug") and "exc_info" in kwargs: exc_info = kwargs["exc_info"] trace_info = ''.join(["%s<br />" % line for line in format_exception(*exc_info)]) request_info = ''.join(["<strong>%s</strong>: %s<br />" % (k, self.request.__dict__[k]) for k in self.request.__dict__.keys()]) error = exc_info[1] self.render('error.html', error=error, trace_info=trace_info, request_info=request_info, user=self.current_user) def head(self): """Adds proper resonse for head requests""" self.finish() class MainHandler(BaseHandler): '''Index page''' def get(self): username = self.current_user completedanalyses = [] self.render("index.html", user=username, analyses=completedanalyses) class MockupHandler(BaseHandler): def get(self): self.render("mockup.html", user=self.current_user) class NoPageHandler(BaseHandler): def get(self): self.render("404.html", user=self.current_user)
bsd-3-clause
Python
f0f3a7ab0b285f447f0573ff537e6252a8752528
Use pkg-build in binding.gyp
blackbeam/tiff-multipage,blackbeam/tiff-multipage,blackbeam/tiff-multipage,blackbeam/tiff-multipage
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "tiff-multipage", "sources": [ "src/module.cc", "src/sync.cc", "src/async.cc", "src/tiff_multipage.cc" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "<!@(pkg-config libtiff-4 --cflags-only-I | sed s/-I//g)" ], "libraries": [ "-ltiff", "<!@(pkg-config --libs libtiff-4)" ] } ] }
{ "targets": [ { "target_name": "tiff-multipage", "sources": [ "src/module.cc", "src/sync.cc", "src/async.cc", "src/tiff_multipage.cc" ], "include_dirs": ["<!(node -e \"require('nan')\")"], "libraries": [ "-ltiff" ] } ] }
mit
Python
c34f040ba19c27277d6cc9a1ad46e4c8d668e77b
Apply -DNDEBUG globally on release builds
ranisalt/node-argon2,ranisalt/node-argon2,ranisalt/node-argon2
binding.gyp
binding.gyp
{ "target_defaults": { "target_conditions": [ ["OS != 'win'", { "cflags": ["-fdata-sections", "-ffunction-sections", "-fvisibility=hidden"], "ldflags": ["-Wl,--gc-sections"] }], ["OS == 'mac'", { "xcode_settings": { "MACOSX_DEPLOYMENT_TARGET": "10.9", } }] ], "target_configurations": [ [{ "Release": { "defines+": ["NDEBUG"] } }] ] }, "targets": [ { "target_name": "libargon2", "sources": [ "argon2/src/argon2.c", "argon2/src/core.c", "argon2/src/blake2/blake2b.c", "argon2/src/thread.c", "argon2/src/encoding.c", ], "include_dirs": ["argon2/include"], "cflags": ["-march=native", "-Wno-type-limits"], "conditions": [ ["target_arch == 'ia32' or target_arch == 'x64'", { "cflags+": ["-msse", "-msse2"], "sources+": ["argon2/src/opt.c"] }, { "sources+": ["argon2/src/ref.c"] }] ], "type": "static_library" }, { "target_name": "argon2", "sources": [ "src/argon2_node.cpp" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "argon2/include" ], "dependencies": ["libargon2"], "configurations": { "Debug": { "conditions": [ ["OS == 'linux'", { "cflags": ["--coverage", "-Wall", "-Wextra"], "ldflags": ["-fprofile-arcs", "-ftest-coverage"], }] ] } } } ] }
{ "target_defaults": { "target_conditions": [ ["OS != 'win'", { "cflags": ["-fdata-sections", "-ffunction-sections", "-fvisibility=hidden"], "ldflags": ["-Wl,--gc-sections"] }], ["OS == 'mac'", { "xcode_settings": { "MACOSX_DEPLOYMENT_TARGET": "10.9", } }] ] }, "targets": [ { "target_name": "libargon2", "sources": [ "argon2/src/argon2.c", "argon2/src/core.c", "argon2/src/blake2/blake2b.c", "argon2/src/thread.c", "argon2/src/encoding.c", ], "include_dirs": ["argon2/include"], "cflags": ["-march=native", "-Wno-type-limits"], "conditions": [ ["target_arch == 'ia32' or target_arch == 'x64'", { "cflags+": ["-msse", "-msse2"], "sources+": ["argon2/src/opt.c"] }, { "sources+": ["argon2/src/ref.c"] }] ], "type": "static_library" }, { "target_name": "argon2", "sources": [ "src/argon2_node.cpp" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "argon2/include" ], "dependencies": ["libargon2"], "configurations": { "Debug": { "conditions": [ ["OS == 'linux'", { "cflags": ["--coverage", "-Wall", "-Wextra"], "ldflags": ["-fprofile-arcs", "-ftest-coverage"], }] ] }, "Release": { "defines+": ["NDEBUG"] } } } ] }
mit
Python
b0dea361dfb27e537c0165dac69e71c20f33e883
Add helpers to bindings.gyp
musocrat/jsaudio,JsAudioOrg/jsaudio,musocrat/jsaudio,JsAudioOrg/jsaudio,JsAudioOrg/jsaudio,musocrat/jsaudio,JsAudioOrg/jsaudio
binding.gyp
binding.gyp
{ 'targets': [ { 'target_name': 'jsaudio', 'sources': ['src/jsaudio.cc', 'src/helpers.cc'], 'include_dirs': [ '<!(node -e "require(\'nan\')")', '<(module_root_dir)/vendor/' ], "conditions": [ [ 'OS=="win"', { "conditions": [ [ 'target_arch=="ia32"', { "libraries": [ '<(module_root_dir)/vendor/portaudio_x86.lib' ], 'copies': [ { 'destination': '<(module_root_dir)/build/Release/', 'files': [ '<(module_root_dir)/vendor/portaudio_x86.dll', '<(module_root_dir)/vendor/portaudio_x86.lib', ] }] } ], [ 'target_arch=="x64"', { "libraries": [ '<(module_root_dir)/vendor/portaudio_x64.lib' ], 'copies': [ { 'destination': '<(module_root_dir)/build/Release/', 'files': [ '<(module_root_dir)/vendor/portaudio_x64.dll', '<(module_root_dir)/vendor/portaudio_x64.lib', ] }] } ] ], "include_dirs": ["gyp/include"] } ] ] }] }
{ 'targets': [ { 'target_name': 'jsaudio', 'sources': ['src/jsaudio.cc'], 'include_dirs': [ '<!(node -e "require(\'nan\')")', '<(module_root_dir)/vendor/' ], "conditions": [ [ 'OS=="win"', { "conditions": [ [ 'target_arch=="ia32"', { "libraries": [ '<(module_root_dir)/vendor/portaudio_x86.lib' ], 'copies': [ { 'destination': '<(module_root_dir)/build/Release/', 'files': [ '<(module_root_dir)/vendor/portaudio_x86.dll', '<(module_root_dir)/vendor/portaudio_x86.lib', ] }] } ], [ 'target_arch=="x64"', { "libraries": [ '<(module_root_dir)/vendor/portaudio_x64.lib' ], 'copies': [ { 'destination': '<(module_root_dir)/build/Release/', 'files': [ '<(module_root_dir)/vendor/portaudio_x64.dll', '<(module_root_dir)/vendor/portaudio_x64.lib', ] }] } ] ], "include_dirs": ["gyp/include"] } ] ] }] }
mit
Python
7e5cafed3908f829bb8ff334a7d8f6ebb939a7cc
fix test import for python3
Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService
d4s2_api/dukeds_auth.py
d4s2_api/dukeds_auth.py
from gcb_web_auth.dukeds_auth import DukeDSTokenAuthentication from gcb_web_auth.backends.dukeds import DukeDSAuthBackend from gcb_web_auth.backends.base import BaseBackend from .models import DukeDSUser class D4S2DukeDSTokenAuthentication(DukeDSTokenAuthentication): """ Extends authorization to save users to DukeDSUser """ def __init__(self): self.backend = DukeDSAuthBackend() class D4S2DukeDSAuthBackend(DukeDSAuthBackend): """ Backend for DukeDS Auth that save users to DukeDSUser Conveniently, the keys used by DukeDS user objects are a superset of the django ones, so we rely on the filtering in the base class """ def __init__(self, save_tokens=True, save_dukeds_users=True): super(D4S2DukeDSAuthBackend, self).__init__(save_tokens, save_dukeds_users) self.save_tokens = save_tokens self.save_dukeds_users = save_dukeds_users self.failure_reason = None def save_dukeds_user(self, user, raw_user_dict): user_dict = DukeDSAuthBackend.harmonize_dukeds_user_details(raw_user_dict) dukeds_user, created = DukeDSUser.objects.get_or_create(user=user, dds_id=raw_user_dict.get('id')) if created: BaseBackend.update_model(dukeds_user, user_dict)
from gcb_web_auth.dukeds_auth import DukeDSTokenAuthentication from gcb_web_auth.backends.dukeds import DukeDSAuthBackend from gcb_web_auth.backends.base import BaseBackend from models import DukeDSUser class D4S2DukeDSTokenAuthentication(DukeDSTokenAuthentication): """ Extends authorization to save users to DukeDSUser """ def __init__(self): self.backend = DukeDSAuthBackend() class D4S2DukeDSAuthBackend(DukeDSAuthBackend): """ Backend for DukeDS Auth that save users to DukeDSUser Conveniently, the keys used by DukeDS user objects are a superset of the django ones, so we rely on the filtering in the base class """ def __init__(self, save_tokens=True, save_dukeds_users=True): super(D4S2DukeDSAuthBackend, self).__init__(save_tokens, save_dukeds_users) self.save_tokens = save_tokens self.save_dukeds_users = save_dukeds_users self.failure_reason = None def save_dukeds_user(self, user, raw_user_dict): user_dict = DukeDSAuthBackend.harmonize_dukeds_user_details(raw_user_dict) dukeds_user, created = DukeDSUser.objects.get_or_create(user=user, dds_id=raw_user_dict.get('id')) if created: BaseBackend.update_model(dukeds_user, user_dict)
mit
Python
7c788c868323aa8c6237caab208d726c5cce24ac
address first time new user condition where user_id may be none
mozilla-iam/cis,mozilla-iam/cis
cis/user.py
cis/user.py
"""First class object to represent a user and data about that user.""" import logging from cis.settings import get_config logger = logging.getLogger(__name__) class Profile(object): def __init__(self, boto_session=None, profile_data=None): """ :param boto_session: The boto session object from the constructor. :param profile_data: The decrypted user profile JSON. """ self.boto_session = boto_session self.config = get_config() self.profile_data = profile_data self.dynamodb_table = None @property def exists(self): if self._retrieve_from_vault() is not None: return True else: return False def retrieve_from_vault(self): logger.info( 'Attempting to retrieve the following from the vault: {}'.format( self.profile_data.get('user_id') ) ) if not self.dynamodb_table: self._connect_dynamo_db() user_key = {'user_id': self.profile_data.get('user_id')} if user_key is not None: response = self.dynamodb_table.get_item(Key=user_key) else: response = None self.profile_data = response return response def store_in_vault(self): logger.info( 'Attempting storage of the following user to the vault: {}'.format( self.profile_data.get('user_id') ) ) if not self.dynamodb_table: self._connect_dynamo_db() response = self.dynamodb_table.put_item( Item=self.profile_data ) return (response['ResponseMetadata']['HTTPStatusCode'] is 200) def _connect_dynamo_db(self): """New up a dynamodb resource from boto session.""" dynamodb = self.boto_session.resource('dynamodb') dynamodb_table = self.config('dynamodb_table', namespace='cis') self.dynamodb_table = dynamodb.Table(dynamodb_table)
"""First class object to represent a user and data about that user.""" import logging from cis.settings import get_config logger = logging.getLogger(__name__) class Profile(object): def __init__(self, boto_session=None, profile_data=None): """ :param boto_session: The boto session object from the constructor. :param profile_data: The decrypted user profile JSON. """ self.boto_session = boto_session self.config = get_config() self.profile_data = profile_data self.dynamodb_table = None @property def exists(self): if self._retrieve_from_vault() is not None: return True else: return False def retrieve_from_vault(self): logger.info( 'Attempting to retrieve the following from the vault: {}'.format( self.profile_data.get('user_id') ) ) if not self.dynamodb_table: self._connect_dynamo_db() user_key = {'user_id': self.profile_data.get('user_id')} response = self.dynamodb_table.get_item(Key=user_key) self.profile_data = response return response def store_in_vault(self): logger.info( 'Attempting storage of the following user to the vault: {}'.format( self.profile_data.get('user_id') ) ) if not self.dynamodb_table: self._connect_dynamo_db() response = self.dynamodb_table.put_item( Item=self.profile_data ) return (response['ResponseMetadata']['HTTPStatusCode'] is 200) def _connect_dynamo_db(self): """New up a dynamodb resource from boto session.""" dynamodb = self.boto_session.resource('dynamodb') dynamodb_table = self.config('dynamodb_table', namespace='cis') self.dynamodb_table = dynamodb.Table(dynamodb_table)
mpl-2.0
Python
81833470d1eb831e27e9e34712b983efbc38a735
Convert entire table to cartesian
mikeireland/chronostar,mikeireland/chronostar,mikeireland/chronostar,mikeireland/chronostar
solar_neighbourhood/prepare_data_add_kinematics.py
solar_neighbourhood/prepare_data_add_kinematics.py
""" Add very large RV errors for stars with no known RVs. Convert to cartesian. """ import numpy as np import sys sys.path.insert(0, '..') from chronostar import tabletool from astropy.table import Table datafile = Table.read('../data/ScoCen_box_result.fits') d = Table.read(datafile) # Set missing radial velocities (nan) to 0 d['radial_velocity'] = np.nan_to_num(d['radial_velocity']) # Set missing radial velocity errors (nan) to 1e+10 d['radial_velocity_error'][np.isnan(d['radial_velocity_error'])] = 1e+4 print('Convert to cartesian') tabletool.convert_table_astro2cart(table=d, return_table=True) d.write('../data/ScoCen_box_result_15M_ready_for_bg_ols.fits') print('Cartesian written.', len(d))
""" Add very large RV errors for stars with no known RVs. Convert to cartesian. """ import numpy as np import sys sys.path.insert(0, '..') from chronostar import tabletool from astropy.table import Table datafile = Table.read('../data/ScoCen_box_result.fits') d = tabletool.read(datafile) # Set missing radial velocities (nan) to 0 d['radial_velocity'] = np.nan_to_num(d['radial_velocity']) # Set missing radial velocity errors (nan) to 1e+10 d['radial_velocity_error'][np.isnan(d['radial_velocity_error'])] = 1e+4 print('Convert to cartesian') tabletool.convert_table_astro2cart(table=d, return_table=True) d.write('../data/ScoCen_box_result_15M_ready_for_bg_ols.fits') print('Cartesian written.', len(d))
mit
Python
0700e25b4dce989fcfc6ee367c7516578c8aaf5b
Update heartbeat in idle times
Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server
lava_scheduler_daemon/service.py
lava_scheduler_daemon/service.py
# Copyright (C) 2013 Linaro Limited # # Author: Senthil Kumaran <[email protected]> # # This file is part of LAVA Scheduler. # # LAVA Scheduler is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License version 3 as # published by the Free Software Foundation # # LAVA Scheduler is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>. import logging import xmlrpclib from twisted.application.service import Service from twisted.internet import defer from twisted.internet.task import LoopingCall from lava_scheduler_app import utils from lava_scheduler_daemon.job import JobRunner, catchall_errback from lava_scheduler_daemon.worker import WorkerData class JobQueue(Service): def __init__(self, source, dispatcher, reactor, daemon_options): self.logger = logging.getLogger(__name__ + '.JobQueue') self.source = source self.dispatcher = dispatcher self.reactor = reactor self.daemon_options = daemon_options self._check_job_call = LoopingCall(self._checkJobs) self._check_job_call.clock = reactor def _checkJobs(self): # Update Worker Heartbeat # # NOTE: This will recide here till we finalize scheduler refactoring # and a separte module for worker specific daemon gets created. self.logger.debug("Worker heartbeat") worker = WorkerData() # Record the scheduler tick (timestamp). worker.record_master_scheduler_tick() try: worker.put_heartbeat_data() except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as err: worker.logger.error("Heartbeat update failed!") self.logger.debug("Refreshing jobs") return self.source.getJobList().addCallback( self._startJobs).addErrback(catchall_errback(self.logger)) def _startJobs(self, jobs): for job in jobs: new_job = JobRunner(self.source, job, self.dispatcher, self.reactor, self.daemon_options) self.logger.info("Starting Job: %d " % job.id) new_job.start() def startService(self): self.logger.info("\n\nLAVA Scheduler starting\n\n") self._check_job_call.start(20) def stopService(self): self._check_job_call.stop() return None
# Copyright (C) 2013 Linaro Limited # # Author: Senthil Kumaran <[email protected]> # # This file is part of LAVA Scheduler. # # LAVA Scheduler is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License version 3 as # published by the Free Software Foundation # # LAVA Scheduler is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>. import logging import xmlrpclib from twisted.application.service import Service from twisted.internet import defer from twisted.internet.task import LoopingCall from lava_scheduler_app import utils from lava_scheduler_daemon.job import JobRunner, catchall_errback from lava_scheduler_daemon.worker import WorkerData class JobQueue(Service): def __init__(self, source, dispatcher, reactor, daemon_options): self.logger = logging.getLogger(__name__ + '.JobQueue') self.source = source self.dispatcher = dispatcher self.reactor = reactor self.daemon_options = daemon_options self._check_job_call = LoopingCall(self._checkJobs) self._check_job_call.clock = reactor def _checkJobs(self): self.logger.debug("Refreshing jobs") return self.source.getJobList().addCallback( self._startJobs).addErrback(catchall_errback(self.logger)) def _startJobs(self, jobs): # Update Worker Heartbeat # # NOTE: This will recide here till we finalize scheduler refactoring # and a separte module for worker specific daemon gets created. worker = WorkerData() # Record the scheduler tick (timestamp). worker.record_master_scheduler_tick() try: worker.put_heartbeat_data() except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as err: worker.logger.error("Heartbeat update failed!") for job in jobs: new_job = JobRunner(self.source, job, self.dispatcher, self.reactor, self.daemon_options) self.logger.info("Starting Job: %d " % job.id) new_job.start() def startService(self): self.logger.info("\n\nLAVA Scheduler starting\n\n") self._check_job_call.start(20) def stopService(self): self._check_job_call.stop() return None
agpl-3.0
Python
fedb3768539259568555d5a62d503c7995f4b9a2
Handle orgs that you don’t own personally.
istresearch/readthedocs.org,CedarLogic/readthedocs.org,KamranMackey/readthedocs.org,SteveViss/readthedocs.org,stevepiercy/readthedocs.org,soulshake/readthedocs.org,kenshinthebattosai/readthedocs.org,takluyver/readthedocs.org,cgourlay/readthedocs.org,soulshake/readthedocs.org,attakei/readthedocs-oauth,hach-que/readthedocs.org,CedarLogic/readthedocs.org,sid-kap/readthedocs.org,davidfischer/readthedocs.org,VishvajitP/readthedocs.org,singingwolfboy/readthedocs.org,davidfischer/readthedocs.org,singingwolfboy/readthedocs.org,kenshinthebattosai/readthedocs.org,rtfd/readthedocs.org,dirn/readthedocs.org,espdev/readthedocs.org,raven47git/readthedocs.org,davidfischer/readthedocs.org,clarkperkins/readthedocs.org,mrshoki/readthedocs.org,cgourlay/readthedocs.org,atsuyim/readthedocs.org,d0ugal/readthedocs.org,raven47git/readthedocs.org,kdkeyser/readthedocs.org,dirn/readthedocs.org,michaelmcandrew/readthedocs.org,fujita-shintaro/readthedocs.org,CedarLogic/readthedocs.org,titiushko/readthedocs.org,hach-que/readthedocs.org,michaelmcandrew/readthedocs.org,techtonik/readthedocs.org,mhils/readthedocs.org,clarkperkins/readthedocs.org,clarkperkins/readthedocs.org,atsuyim/readthedocs.org,attakei/readthedocs-oauth,cgourlay/readthedocs.org,Carreau/readthedocs.org,clarkperkins/readthedocs.org,raven47git/readthedocs.org,kdkeyser/readthedocs.org,tddv/readthedocs.org,Tazer/readthedocs.org,d0ugal/readthedocs.org,sils1297/readthedocs.org,mhils/readthedocs.org,agjohnson/readthedocs.org,asampat3090/readthedocs.org,espdev/readthedocs.org,Carreau/readthedocs.org,wanghaven/readthedocs.org,mrshoki/readthedocs.org,agjohnson/readthedocs.org,GovReady/readthedocs.org,stevepiercy/readthedocs.org,wijerasa/readthedocs.org,sils1297/readthedocs.org,hach-que/readthedocs.org,wijerasa/readthedocs.org,fujita-shintaro/readthedocs.org,gjtorikian/readthedocs.org,soulshake/readthedocs.org,kenwang76/readthedocs.org,wanghaven/readthedocs.org,GovReady/readthedocs.org,stevepiercy/readthedocs.org,LukasBoersma/readthedocs.org,takluyver/readthedocs.org,attakei/readthedocs-oauth,emawind84/readthedocs.org,Carreau/readthedocs.org,michaelmcandrew/readthedocs.org,laplaceliu/readthedocs.org,dirn/readthedocs.org,safwanrahman/readthedocs.org,titiushko/readthedocs.org,kdkeyser/readthedocs.org,istresearch/readthedocs.org,rtfd/readthedocs.org,sils1297/readthedocs.org,techtonik/readthedocs.org,nikolas/readthedocs.org,LukasBoersma/readthedocs.org,sid-kap/readthedocs.org,sunnyzwh/readthedocs.org,GovReady/readthedocs.org,jerel/readthedocs.org,sils1297/readthedocs.org,nikolas/readthedocs.org,laplaceliu/readthedocs.org,emawind84/readthedocs.org,tddv/readthedocs.org,laplaceliu/readthedocs.org,d0ugal/readthedocs.org,raven47git/readthedocs.org,emawind84/readthedocs.org,d0ugal/readthedocs.org,stevepiercy/readthedocs.org,Tazer/readthedocs.org,asampat3090/readthedocs.org,soulshake/readthedocs.org,istresearch/readthedocs.org,kenshinthebattosai/readthedocs.org,attakei/readthedocs-oauth,VishvajitP/readthedocs.org,mrshoki/readthedocs.org,dirn/readthedocs.org,LukasBoersma/readthedocs.org,sunnyzwh/readthedocs.org,titiushko/readthedocs.org,nikolas/readthedocs.org,CedarLogic/readthedocs.org,royalwang/readthedocs.org,VishvajitP/readthedocs.org,kenwang76/readthedocs.org,mhils/readthedocs.org,Tazer/readthedocs.org,hach-que/readthedocs.org,jerel/readthedocs.org,safwanrahman/readthedocs.org,takluyver/readthedocs.org,GovReady/readthedocs.org,safwanrahman/readthedocs.org,kenwang76/readthedocs.org,KamranMackey/readthedocs.org,fujita-shintaro/readthedocs.org,Carreau/readthedocs.org,wanghaven/readthedocs.org,asampat3090/readthedocs.org,techtonik/readthedocs.org,istresearch/readthedocs.org,sunnyzwh/readthedocs.org,tddv/readthedocs.org,laplaceliu/readthedocs.org,nikolas/readthedocs.org,kenwang76/readthedocs.org,sid-kap/readthedocs.org,espdev/readthedocs.org,KamranMackey/readthedocs.org,michaelmcandrew/readthedocs.org,rtfd/readthedocs.org,singingwolfboy/readthedocs.org,mrshoki/readthedocs.org,emawind84/readthedocs.org,pombredanne/readthedocs.org,espdev/readthedocs.org,espdev/readthedocs.org,KamranMackey/readthedocs.org,sid-kap/readthedocs.org,kdkeyser/readthedocs.org,atsuyim/readthedocs.org,agjohnson/readthedocs.org,kenshinthebattosai/readthedocs.org,wijerasa/readthedocs.org,jerel/readthedocs.org,cgourlay/readthedocs.org,fujita-shintaro/readthedocs.org,royalwang/readthedocs.org,SteveViss/readthedocs.org,agjohnson/readthedocs.org,royalwang/readthedocs.org,atsuyim/readthedocs.org,Tazer/readthedocs.org,safwanrahman/readthedocs.org,gjtorikian/readthedocs.org,mhils/readthedocs.org,gjtorikian/readthedocs.org,wijerasa/readthedocs.org,SteveViss/readthedocs.org,singingwolfboy/readthedocs.org,pombredanne/readthedocs.org,royalwang/readthedocs.org,techtonik/readthedocs.org,titiushko/readthedocs.org,LukasBoersma/readthedocs.org,sunnyzwh/readthedocs.org,wanghaven/readthedocs.org,VishvajitP/readthedocs.org,takluyver/readthedocs.org,davidfischer/readthedocs.org,jerel/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,SteveViss/readthedocs.org,asampat3090/readthedocs.org,pombredanne/readthedocs.org
readthedocs/oauth/utils.py
readthedocs/oauth/utils.py
import logging from .models import GithubProject, GithubOrganization log = logging.getLogger(__name__) def make_github_project(user, org, privacy, repo_json): if (repo_json['private'] is True and privacy == 'private' or repo_json['private'] is False and privacy == 'public'): project, created = GithubProject.objects.get_or_create( full_name=repo_json['full_name'], ) if project.user != user: log.debug('Not importing %s because mismatched user' % repo_json['name']) return None if project.organization and project.organization != org: log.debug('Not importing %s because mismatched orgs' % repo_json['name']) return None project.organization=org project.name = repo_json['name'] project.description = repo_json['description'] project.git_url = repo_json['git_url'] project.ssh_url = repo_json['ssh_url'] project.html_url = repo_json['html_url'] project.json = repo_json project.save() return project else: log.debug('Not importing %s because mismatched type' % repo_json['name']) def make_github_organization(user, org_json): org, created = GithubOrganization.objects.get_or_create( login=org_json.get('login'), html_url=org_json.get('html_url'), name=org_json.get('name'), email=org_json.get('email'), json=org_json, ) org.users.add(user) return org
import logging from .models import GithubProject, GithubOrganization log = logging.getLogger(__name__) def make_github_project(user, org, privacy, repo_json): if (repo_json['private'] is True and privacy == 'private' or repo_json['private'] is False and privacy == 'public'): project, created = GithubProject.objects.get_or_create( user=user, organization=org, full_name=repo_json['full_name'], ) project.name = repo_json['name'] project.description = repo_json['description'] project.git_url = repo_json['git_url'] project.ssh_url = repo_json['ssh_url'] project.html_url = repo_json['html_url'] project.json = repo_json project.save() return project else: log.debug('Not importing %s because mismatched type' % repo_json['name']) def make_github_organization(user, org_json): org, created = GithubOrganization.objects.get_or_create( login=org_json.get('login'), html_url=org_json.get('html_url'), name=org_json.get('name'), email=org_json.get('email'), json=org_json, ) org.users.add(user) return org
mit
Python
bc196c74b3959577f7254d1d5434aeb23d284eea
Convert tabs to spaces
WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder
RecordingApp/app/src/scripts/getChunks.py
RecordingApp/app/src/scripts/getChunks.py
#Script to generate a json file containing book name, number of chapters, number of chunks import json import urllib.request import re result_json_name = "chunks.json" with open("catalog.json") as file: data = json.load(file) output = [] #skip obs for now, loop over all books for x in range(1, 67): #gives book name and order (the books are stored out of order in the json) slug = data[x]["slug"] sort = data[x]["sort"] #Get languages.json url_lang_cat = data[x]["lang_catalog"] response_lang_cat = urllib.request.urlopen(url_lang_cat) lang_catalog = json.loads(response_lang_cat.read().decode('utf-8')) name = lang_catalog[0]["project"]["name"] #Get resources.json #0 is for udb, are chunks the same for both? url_res = lang_catalog[0]["res_catalog"] response_res = urllib.request.urlopen(url_res) res_cat = json.loads(response_res.read().decode('utf-8')) #Get the usfm file url_usfm = res_cat[0]["usfm"] response_usfm = urllib.request.urlopen(url_usfm) usfm_data = response_usfm.read().decode('utf-8') lines = usfm_data.splitlines() #keep a count of \c and \s5 tags (chapter and chunk respectively) chapter = 0 num_chunks = 0 chunk_list = [] for line in lines: chunk_match = re.search(r'\\s5', line) #add to the number of chunks seen so far if chunk_match: num_chunks += 1 #on a new chapter, append the number of chunks tallied and reset the count chapter_match = re.search(r'\\c', line) if chapter_match: chunk_list.append(num_chunks) num_chunks = 0 chapter += 1 #append the last chapter chunk_list.append(num_chunks+1) #Account for the off by one introduced from chunks coming before chapters chunk_list_fixed = [] length = len(chunk_list)-1 #eliminate chapter "0" for i in range(length): chunk_list_fixed.append(chunk_list[i+1]) #create a dictionary to store the book's data book = {} book['slug'] = slug book['name'] = name book['sort'] = sort book['chapters'] = len(chunk_list_fixed) book['chunks'] = chunk_list_fixed #add to the list of books output.append(book) #output all book data to a json file with open(result_json_name, 'w') as outfile: json.dump(output, outfile)
#Script to generate a json file containing book name, number of chapters, number of chunks import json import urllib.request import re result_json_name = "chunks.json" with open("catalog.json") as file: data = json.load(file) output = [] #skip obs for now, loop over all books for x in range(1, 67): #gives book name and order (the books are stored out of order in the json) slug = data[x]["slug"] sort = data[x]["sort"] #Get languages.json url_lang_cat = data[x]["lang_catalog"] response_lang_cat = urllib.request.urlopen(url_lang_cat) lang_catalog = json.loads(response_lang_cat.read().decode('utf-8')) name = lang_catalog[0]["project"]["name"] #Get resources.json #0 is for udb, are chunks the same for both? url_res = lang_catalog[0]["res_catalog"] response_res = urllib.request.urlopen(url_res) res_cat = json.loads(response_res.read().decode('utf-8')) #Get the usfm file url_usfm = res_cat[0]["usfm"] response_usfm = urllib.request.urlopen(url_usfm) usfm_data = response_usfm.read().decode('utf-8') lines = usfm_data.splitlines() #keep a count of \c and \s5 tags (chapter and chunk respectively) chapter = 0 num_chunks = 0 chunk_list = [] for line in lines: chunk_match = re.search(r'\\s5', line) #add to the number of chunks seen so far if chunk_match: num_chunks += 1 #on a new chapter, append the number of chunks tallied and reset the count chapter_match = re.search(r'\\c', line) if chapter_match: chunk_list.append(num_chunks) num_chunks = 0 chapter += 1 #append the last chapter chunk_list.append(num_chunks+1) #Account for the off by one introduced from chunks coming before chapters chunk_list_fixed = [] length = len(chunk_list)-1 #eliminate chapter "0" for i in range(length): chunk_list_fixed.append(chunk_list[i+1]) #create a dictionary to store the book's data book = {} book['slug'] = slug book['name'] = name book['sort'] = sort book['chapters'] = len(chunk_list_fixed) book['chunks'] = chunk_list_fixed #add to the list of books output.append(book) #output all book data to a json file with open(result_json_name, 'w') as outfile: json.dump(output, outfile)
mit
Python
c96cccbe7afc282aedbb316a2e9e41e47e68bcb6
fix efs lvm create (#610)
intel-hpdd/intel-manager-for-lustre,intel-hpdd/intel-manager-for-lustre,intel-hpdd/intel-manager-for-lustre
chroma-manager/tests/integration/utils/test_blockdevices/test_blockdevice_lvm.py
chroma-manager/tests/integration/utils/test_blockdevices/test_blockdevice_lvm.py
# Copyright (c) 2017 Intel Corporation. All rights reserved. # Use of this source code is governed by a MIT-style # license that can be found in the LICENSE file. import re from tests.integration.utils.test_blockdevices.test_blockdevice import TestBlockDevice class TestBlockDeviceLvm(TestBlockDevice): _supported_device_types = ['lvm'] def __init__(self, device_type, device_path): super(TestBlockDeviceLvm, self).__init__(device_type, device_path) @property def preferred_fstype(self): return 'ldiskfs' # Create a lvm on the device. @property def prepare_device_commands(self): # FIXME: the use of --yes in the {vg,lv}create commands is a work-around for #500 # and should be reverted when #500 is fixed return [ "wipefs -a {}".format(self._device_path), "vgcreate --yes %s %s; lvcreate --yes --wipesignatures n -l 100%%FREE --name %s %s" % (self.vg_name, self._device_path, self.lv_name, self.vg_name) ] @property def vg_name(self): return "vg_%s" % "".join( [c for c in self._device_path if re.match(r'\w', c)]) @property def lv_name(self): return "lv_%s" % "".join( [c for c in self._device_path if re.match(r'\w', c)]) @property def device_path(self): return "/dev/%s/%s" % (self.vg_name, self.lv_name) @classmethod def clear_device_commands(cls, device_paths): lv_destroy = [ "if lvdisplay /dev/{0}/{1}; then lvchange -an /dev/{0}/{1} && lvremove /dev/{0}/{1}; else exit 0; fi". format( TestBlockDeviceLvm('lvm', device_path).vg_name, TestBlockDeviceLvm('lvm', device_path).lv_name) for device_path in device_paths ] vg_destroy = [ "if vgdisplay {0}; then vgremove {0}; else exit 0; fi".format( TestBlockDeviceLvm('lvm', device_path).vg_name) for device_path in device_paths ] return lv_destroy + vg_destroy @property def install_packages_commands(self): return []
# Copyright (c) 2017 Intel Corporation. All rights reserved. # Use of this source code is governed by a MIT-style # license that can be found in the LICENSE file. import re from tests.integration.utils.test_blockdevices.test_blockdevice import TestBlockDevice class TestBlockDeviceLvm(TestBlockDevice): _supported_device_types = ['lvm'] def __init__(self, device_type, device_path): super(TestBlockDeviceLvm, self).__init__(device_type, device_path) @property def preferred_fstype(self): return 'ldiskfs' # Create a lvm on the device. @property def prepare_device_commands(self): # FIXME: the use of --yes in the {vg,lv}create commands is a work-around for #500 # and should be reverted when #500 is fixed return [ "vgcreate --yes %s %s; lvcreate --yes --wipesignatures n -l 100%%FREE --name %s %s" % (self.vg_name, self._device_path, self.lv_name, self.vg_name) ] @property def vg_name(self): return "vg_%s" % "".join( [c for c in self._device_path if re.match(r'\w', c)]) @property def lv_name(self): return "lv_%s" % "".join( [c for c in self._device_path if re.match(r'\w', c)]) @property def device_path(self): return "/dev/%s/%s" % (self.vg_name, self.lv_name) @classmethod def clear_device_commands(cls, device_paths): lv_destroy = [ "if lvdisplay /dev/{0}/{1}; then lvchange -an /dev/{0}/{1} && lvremove /dev/{0}/{1}; else exit 0; fi". format( TestBlockDeviceLvm('lvm', device_path).vg_name, TestBlockDeviceLvm('lvm', device_path).lv_name) for device_path in device_paths ] vg_destroy = [ "if vgdisplay {0}; then vgremove {0}; else exit 0; fi".format( TestBlockDeviceLvm('lvm', device_path).vg_name) for device_path in device_paths ] return lv_destroy + vg_destroy @property def install_packages_commands(self): return []
mit
Python
d0e75c65505713a5f044d67a08e6697c4e332611
Add djangobower and update static settings
be-ndee/darts,be-ndee/darts
darts/darts/settings.py
darts/darts/settings.py
""" Django settings for darts project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'yg&r*1k#$nak&g*9ay6zh!+@*=f=ids5u10a!!r^yjvltw0&8=' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'djangobower' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'darts.urls' WSGI_APPLICATION = 'darts.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_DIRS = ( os.path.join(BASE_DIR, 'assets'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'djangobower.finders.BowerFinder', ) TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates'), ) # Djangobower BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'components') BOWER_INSTALLED_APPS = ( 'jquery', 'bootstrap', )
""" Django settings for darts project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'yg&r*1k#$nak&g*9ay6zh!+@*=f=ids5u10a!!r^yjvltw0&8=' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'darts.urls' WSGI_APPLICATION = 'darts.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
mit
Python
5273e0fcdf2b7f1b03301cb0834b07da82064b98
Remove trailing /n
rmed/zoe-vidmaster,rmed/zoe-vidmaster
mailproc/vidmaster.py
mailproc/vidmaster.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Zoe vidmaster - https://github.com/rmed/zoe-vidmaster # # Copyright (c) 2015 Rafael Medina García <[email protected]> # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('--mail-subject', dest='subject') parser.add_argument('--msg-sender-alias', dest='sender') parser.add_argument('--application/octet-stream', dest='script') if __name__ == '__main__': args, unknown = parser.parse_known_args() if args.subject != "vidmaster": sys.exit(0) print("message dst=vidmaster&tag=compose&script=%s&sender=%s" % ( args.script, args.sender))
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Zoe vidmaster - https://github.com/rmed/zoe-vidmaster # # Copyright (c) 2015 Rafael Medina García <[email protected]> # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('--mail-subject', dest='subject') parser.add_argument('--msg-sender-alias', dest='sender') parser.add_argument('--application/octet-stream', dest='script') if __name__ == '__main__': args, unknown = parser.parse_known_args() if args.subject != "vidmaster": sys.exit(0) print("message dst=vidmaster&tag=compose&script=%s&sender=%s\n" % ( args.script, args.sender))
mit
Python
95d9c3ecd9a8c2aa73fd91ffdf40a55fee541dd3
Enable flatpages without middleware.
jambonrose/DjangoUnleashed-1.8,jambonrose/DjangoUnleashed-1.8
suorganizer/urls.py
suorganizer/urls.py
"""suorganizer URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url from django.contrib import admin from django.contrib.flatpages import \ urls as flatpage_urls from blog import urls as blog_urls from contact import urls as contact_urls from organizer.urls import ( newslink as newslink_urls, startup as startup_urls, tag as tag_urls) from .views import redirect_root urlpatterns = [ url(r'^$', redirect_root), url(r'^admin/', include(admin.site.urls)), url(r'^blog/', include(blog_urls)), url(r'^contact/', include(contact_urls)), url(r'^newslink/', include(newslink_urls)), url(r'^startup/', include(startup_urls)), url(r'^tag/', include(tag_urls)), url(r'^', include(flatpage_urls)), ]
"""suorganizer URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url from django.contrib import admin from blog import urls as blog_urls from contact import urls as contact_urls from organizer.urls import ( newslink as newslink_urls, startup as startup_urls, tag as tag_urls) from .views import redirect_root urlpatterns = [ url(r'^$', redirect_root), url(r'^admin/', include(admin.site.urls)), url(r'^blog/', include(blog_urls)), url(r'^contact/', include(contact_urls)), url(r'^newslink/', include(newslink_urls)), url(r'^startup/', include(startup_urls)), url(r'^tag/', include(tag_urls)), ]
bsd-2-clause
Python