max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
rplugin/python3/denite/ui/default.py | timgates42/denite.nvim | 0 | 4 | <gh_stars>0
# ============================================================================
# FILE: default.py
# AUTHOR: <NAME> <<EMAIL> at g<EMAIL>>
# License: MIT license
# ============================================================================
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
# if hasattr(self._vim, 'run_coroutine'):
# self._denite = ASyncParent(self._vim)
# else:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
# Re-open denite buffer
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
# Restore the cursor
self._move_to_pos(prev_cursor)
# Disable quit flag
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
# Ignore command line window.
return
resume = self._initialized and context['resume']
if resume:
# Skip the initialization
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
# Ignore empty sources.
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = self._context['prev_winid']
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
if self._context['prompt']:
self._vim.command('setlocal signcolumn=yes')
else:
self._vim.command('setlocal signcolumn=auto')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
# Disable ruler
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
# In Vim8, FileType autocmd is not fired after set filetype option.
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
# Move the window to bottom
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in [
'floating',
'floating_relative_cursor',
'floating_relative_window',
]
self._filter_floating = False
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
self._split_floating(split)
elif self._context['filter_split_direction'] == 'floating':
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating',
'floating_relative_window',
'floating_relative_cursor'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = self._context['winminheight']
max_height = min(self._context['winheight'],
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
# Extra
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (str(self._context['selected_icon'])
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = self._context['winrow']
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += self._context['winheight']
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': self._context['wincol'],
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = self._context['wincol']
else:
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
# Jump to denite window
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
# Move to the previous window
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
# Note: Close filter window before preview window
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
# Denite buffer is already closed
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
# Quit filter buffer
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
# Move to denite window
self._vim.call('win_gotoid', self._winid)
# Restore the window
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
# Restore the position
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
# Note: execute restcmd twice to restore layout properly
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = self._context['winheight']
self._winwidth = self._context['winwidth']
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
if self._context['reversed']:
self._vim.command('normal! zb')
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
# Note: After timer_stop is called, self._timers may be removed
if key in self._timers:
self._timers.pop(key)
def _split_floating(self, split: str) -> None:
# Use floating window
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': self._context['winrow'],
'col': self._context['wincol'],
'width': self._context['winwidth'],
'height': self._context['winheight'],
})
elif split == 'floating_relative_cursor':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = self._context['winwidth']
height = self._context['winheight']
if opened_pos + height + 3 > self._vim.options['lines']:
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif split == 'floating_relative_window':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'win',
'row': self._context['winrow'],
'col': self._context['wincol'],
'width': self._context['winwidth'],
'height': self._context['winheight'],
})
| 1.453125 | 1 |
examples/first_char_last_column.py | clarkfitzg/sta141c | 24 | 132 | <filename>examples/first_char_last_column.py
#!/usr/bin/env python3
"""
For the last column, print only the first character.
Usage:
$ printf "100,200\n0,\n" | python3 first_char_last_column.py
Should print "100,2\n0,"
"""
import csv
from sys import stdin, stdout
def main():
reader = csv.reader(stdin)
writer = csv.writer(stdout)
for row in reader:
try:
row[-1] = row[-1][0]
except IndexError:
# Python: Better to ask forgiveness than permission
# Alternative: Look before you leap
pass
writer.writerow(row)
if __name__ == "__main__":
main()
| 1.789063 | 2 |
twitoff/predict.py | dscohen75/twitoff | 0 | 260 | import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user1_name, user2_name, tweet_text):
"""
Determine and return which user is more likely to say a given Tweet.
Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!')
Returns 1 corresponding to 1st user passed in, or 0 for second.
"""
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
user1_vect = np.array([tweet.vect for tweet in user1.tweets])
user2_vect = np.array([tweet.vect for tweet in user2.tweets])
vects = np.vstack([user1_vect, user2_vect])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
log_reg = LogisticRegression().fit(vects, labels)
# We've done the model fitting, now to predict...
hypo_tweet_vect = vectorize_tweet(tweet_text)
return log_reg.predict(np.array(hypo_tweet_vect).reshape(1,-1))
| 2.671875 | 3 |
tensorflow/python/ops/standard_ops.py | ashutom/tensorflow-upstream | 8 | 388 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform as _platform
import sys as _sys
from tensorflow.python import autograph
from tensorflow.python.training.experimental import loss_scaling_gradient_tape
# pylint: disable=g-bad-import-order
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import rnn_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.eager import wrap_function
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.batch_ops import *
from tensorflow.python.ops.critical_section_ops import *
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.proto_ops import *
from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch
from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sort_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.stateless_random_ops import *
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.variables import *
from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map
# pylint: disable=g-import-not-at-top
if _platform.system() == "Windows":
from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt
else:
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# pylint: enable=g-import-not-at-top
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
# These modules were imported to set up RaggedTensor operators and dispatchers:
del _ragged_dispatch, _ragged_operators
| 1.367188 | 1 |
Models.py | jmj23/Kaggle-Pneumothorax | 0 | 516 | import numpy as np
from keras.applications.inception_v3 import InceptionV3
from keras.initializers import RandomNormal
from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D,
Cropping2D, Dense, Flatten, GlobalAveragePooling2D,
Input, Lambda, MaxPooling2D, Reshape, UpSampling2D,
ZeroPadding2D, ZeroPadding3D, add, concatenate)
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.models import Model
# Parameterized 2D Block Model
def BlockModel2D(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block CED model for segmentation problems
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_clean_{}'.format(rr))(x)
skip_list.append(x)
# expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
# Parameterized 2D Block Model
def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block model for pretraining on classification task
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_skip_{}'.format(rr))(x)
# average pooling
x = GlobalAveragePooling2D()(x)
# classifier
lay_out = Dense(1, activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def ConvertEncoderToCED(model):
# Returns a model with frozen encoder layers
# and complimentary, unfrozen decoder layers
# get input layer
# model must be compiled again after using this function
lay_input = model.input
# get skip connection layer outputs
skip_list = [l.output for l in model.layers if 'skip' in l.name]
numBlocks = len(skip_list)
filt_num = int(skip_list[0].shape[-1])
x = model.layers[-3].output
# freeze encoder layers
for layer in model.layers:
layer.trainable = False
use_bn = True
# make expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def Inception_model(input_shape=(299, 299, 3)):
incep_model = InceptionV3(
include_top=False, weights=None, input_shape=input_shape, pooling='avg')
input_layer = incep_model.input
incep_output = incep_model.output
# x = Conv2D(16, (3, 3), activation='relu')(incep_output)
# x = Flatten()(x)
x = Dense(1, activation='sigmoid')(incep_output)
return Model(inputs=input_layer, outputs=x)
| 2.609375 | 3 |
vize/170401038.py | omuryorulmaz/kriptografi | 8 | 644 | # <NAME> 170401038
import math
import random
r = 3271
def egcd(a,b):
if(a == 0):
return(b,0,1)
else:
c,d,e = egcd(b % a, a)
return(c, e - (b // a) * d, d)
def modInvert(a,b):
c,d,e = egcd(a,b)
if c != 1:
raise Exception('moduler ters bulunamadi')
else:
return d % b
def randomInteger(n):
return random.randrange(2 ** (n-1), 2 ** n) | 1
def RabinMiller(f):
s = 5
if(f == 2):
return 1
if not (f & 1):
return 0
p = f-1
u = 0
r = f-1
while (r%2 == 0):
r >>= 1
u+=1
def Control(a):
z = pow(a, r, f)
if z == 1:
return 0
for i in range(u):
z = pow(a, (2**i) * r, f-1)
if z == p:
return 0
return 1
for i in range(s):
a = random.randrange(2, p-2)
if Control(a):
return 0
return 1
def Keygen(n):
while True:
p = randomInteger(n//2)
if (p - 1) % r == 0 and RabinMiller(p) and math.gcd(r, int((p - 1) / r)) == 1:
break
while True:
q = randomInteger(n//2)
if RabinMiller(q) and math.gcd(r, int(q - 1)) == 1:
break
N = p * q
phi = (p - 1) * (q - 1)
while True:
y = random.randrange(1, N)
if math.gcd(y, N) == 1:
x = pow(y, phi * modInvert(r, N) % N, N)
if x != 1:
break
publicKeyFile = open("publickey.txt", "w+")
publicKeyFile.write(str(N) + "\n" + str(y))
publicKeyFile.close()
privateKeyFile = open("privatekey.txt", "w+")
privateKeyFile.write(str(phi) + "\n" + str(x) + "\n" + str(N))
privateKeyFile.close()
def encrypt(plaintext, publickeytxt):
try:
open(publickeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan şifrelme işlemi yapılamaz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
publicKeyFile = open(publickeytxt, "r")
N, y = publicKeyFile.read().split("\n")
N = int(N)
y = int(y)
publicKeyFile.close()
plainTextFile = open(plaintext, "r")
plainCopy = int(plainTextFile.read().split("\n")[0])
plainTextFile.close()
while True:
u = random.randrange(1, int(N))
if math.gcd(y, N) == 1:
break
cipherText = pow(y, plainCopy, N) * pow(u, r, N) % N
cipherTextFile = open("ciphertext.txt", "w+")
cipherTextFile.write(str(cipherText))
cipherTextFile.close()
def decrypt(ciphertext, privatekeytxt):
try:
open(privatekeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan deşifreleme işlemi yapılamz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
privateKeyFile = open(privatekeytxt, "r")
phi, x, N = privateKeyFile.read().split("\n")
phi, x, N = int(phi), int(x), int(N)
privateKeyFile.close()
cipherTextFile = open(ciphertext, "r")
cipherCopy = int(cipherTextFile.read())
a = pow(cipherCopy, (phi * modInvert(r, N)) % N, N)
for i in range(r -1):
if(pow(x, i, N) == a):
break
plainText2File = open("plaintext2.txt", "w+")
plainText2File.write(str(i))
plainText2File.close()
plain2File = open("plaintext2.txt", "r")
plain1File = open("plaintext.txt", "r")
plain1 = plain1File.read().split("\n")[0]
plain2 = plain2File.read().split("\n")[0]
if plain1 == plain2:
print("Dosyalar Özdeştir..")
else:
print("Dosyalar özdeş değildir..")
n = int(input("Oluşturulmak istenen anahtar çiftlerinin bit uzunluğunu girin: "))
Keygen(n)
encrypt("plaintext.txt","publickey.txt")
decrypt("ciphertext.txt", "privatekey.txt")
| 2.03125 | 2 |
python3_module_template/subproject/myexample.py | sdpython/python_project_template | 0 | 772 | <filename>python3_module_template/subproject/myexample.py
# -*- coding: utf-8 -*-
"""
@file
@brief This the documentation of this module (myexampleb).
"""
class myclass:
"""
This is the documentation for this class.
**example with a sphinx directives**
It works everywhere in the documentation.
.. exref::
:title: an example of use
Just for documentation purpose.
::
m = myclass(0)
The old way:
@example(an old example of use)
This only works from the code,
not inserted in a RST file. The source
documentation is parsed and every such example is
collected and placed in a page ``all_examples.rst``
(look at the source).
@code
m = myclass(0)
@endcode
@endexample
**FAQ**
.. faqref::
:title: How to add a question ?
Just look a this section.
Look also :ref:`l-FAQ2`.
.. faqref::
:title: Add a label
:lid: label1
Look also :ref:`l-FAQ2`.
**BLOC**
.. blocref::
:title: How to add a bloc
:tag: aaaa
Just look a this bloc.
Look also :ref:`l-FAQ2`.
An accent, é, to check it is working.
A link to github source: :githublink:`source|py`.
"""
def __init__(self, pa):
"""
documentation for the constructor
@param pa first parameter
"""
self.pa = pa
def get_value(self, mul):
"""
returns the parameter multiplied by a value
@param mul a float
@return a float
"""
return self.pa * mul
| 1.953125 | 2 |
test/crossrunner/compat.py | BluechipSystems/thrift | 0 | 900 | import os
import sys
if sys.version_info[0] == 2:
_ENCODE = sys.getfilesystemencoding()
def path_join(*args):
bin_args = map(lambda a: a.decode(_ENCODE), args)
return os.path.join(*bin_args).encode(_ENCODE)
def str_join(s, l):
bin_args = map(lambda a: a.decode(_ENCODE), l)
b = s.decode(_ENCODE)
return b.join(bin_args).encode(_ENCODE)
logfile_open = open
else:
path_join = os.path.join
str_join = str.join
def logfile_open(*args):
return open(*args, errors='replace')
| 1.101563 | 1 |
cinder/tests/unit/fake_group_snapshot.py | lightsey/cinder | 571 | 1028 | <filename>cinder/tests/unit/fake_group_snapshot.py
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from cinder import objects
from cinder.tests.unit import fake_constants as fake
def fake_db_group_snapshot(**updates):
db_group_snapshot = {
'id': fake.GROUP_SNAPSHOT_ID,
'name': 'group-1',
'status': 'available',
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'group_type_id': fake.GROUP_TYPE_ID,
'group_id': fake.GROUP_ID,
}
for name, field in objects.GroupSnapshot.fields.items():
if name in db_group_snapshot:
continue
if field.nullable:
db_group_snapshot[name] = None
elif field.default != fields.UnspecifiedDefault:
db_group_snapshot[name] = field.default
else:
raise Exception('fake_db_group_snapshot needs help with %s.'
% name)
if updates:
db_group_snapshot.update(updates)
return db_group_snapshot
def fake_group_snapshot_obj(context, **updates):
return objects.GroupSnapshot._from_db_object(
context, objects.GroupSnapshot(), fake_db_group_snapshot(**updates))
| 1.445313 | 1 |
darknet2ncnn.py | nihui/gen-ncnn-models | 4 | 1156 | <reponame>nihui/gen-ncnn-models
#! /usr/bin/env python
# coding: utf-8
import configparser
import numpy as np
import re,sys,os
from graph import MyGraph
from collections import OrderedDict
def unique_config_sections(config_file):
"""Convert all config sections to have unique names.
Adds unique suffixes to config sections for compability with configparser.
"""
from collections import defaultdict
import io
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream
def getFilters(mydict, name):
#print('find filters for ', name)
if hasattr(mydict[name], 'filters'):
return mydict[name].filters
else:
assert len(mydict[name].input) >= 1
return getFilters(mydict, mydict[name].input[0])
def readfile(f, len, msg):
print(" %s read %d bytes" % (msg, len))
return f.read(len)
def buildGraph(config_path, weights_path):
unique_config_file = unique_config_sections(config_path)
cfg_parser = configparser.ConfigParser()
cfg_parser.read_file(unique_config_file)
weights_file = open(weights_path, 'rb')
# read out major, minor, revision, net.seen
readfile(weights_file, (4*4), 'head')
mydict = OrderedDict()
# record the output of the original layer
mylist = []
count = 4
import queue
for _section in cfg_parser.sections():
sec_q = queue.Queue(0)
sec_q.put(cfg_parser[_section])
while not sec_q.empty():
sec = sec_q.get()
section = sec.name
print('Parsing section {}'.format(section))
# this section will can be a subsection
if section.startswith('activation') or section.endswith('activation'):
activation = sec.get('activation', fallback = 'logistic')
if activation == 'linear':
pass
elif activation == 'linear' or activation == 'leaky' or activation == 'relu':
node = MyGraph.MyNode()
node.name = section
node.op = 'Leaky'
if activation == 'linear':
node.slope = 1
elif activation == 'leaky':
node.slope = 0.1
elif activation == 'relu':
node.slope = 0
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
mydict[node.name] = node
prev_output = node.name
# prev_layer_filters no change
else:
raise ValueError(
'Unknown activation function `{}` in section {}'.format(
activation, section))
if section.startswith('activation'):
mylist.append(section)
elif re.match(r'^(convolutional|depthwise|groupwise)_\d+$', section):
if section.startswith('convolutional'):
conv = 'conv'
filters = sec.getint('filters', fallback = 1)
groups = 1
op = 'Conv2D'
elif section.startswith('depthwise'):
conv = 'dconv'
filters = prev_layer_filters
multiplier = sec.getint('multiplier', fallback = 1)
assert multiplier == 1
groups = filters
op = 'DepthwiseConv2dNative'
elif section.startswith('groupwise'):
conv = 'gconv'
filters = sec.getint('filters', fallback=1)
groups = sec.getint('groups', fallback = 1)
op = 'DepthwiseConv2dNative'
size = sec.getint('size', fallback = 1)
stride = sec.getint('stride', fallback = 1)
pad = sec.getint('pad', fallback = 0)
padding = sec.getint('padding', fallback = 0)
activation = sec.get('activation', fallback = 'logistic')
batch_normalize = sec.getint('batch_normalize', 0)
# padding='same' is equivalent to Darknet pad=1
# padding = 'same' if pad == 1 else 'valid'
if pad:
padding = size//2
# Setting weights.
# Darknet serializes convolutional weights as:
# [bias/beta, [gamma, mean, variance], conv_weights]
#prev_layer_shape = prev_layer.shape
# TODO: This assumes channel last dim_ordering.
if conv == 'conv':
weights_shape = (size, size, prev_layer_filters, filters)
idx_tf2darknet = [0, 1, 2, 3]
elif conv == 'dconv':
weights_shape = (size, size, filters)
idx_tf2darknet = [0, 1, 2]
elif conv == 'gconv':
weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups)
idx_tf2darknet = [0, 1, 2, 3, 4]
idxmap = {x: i for i, x in enumerate(idx_tf2darknet)}
idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))]
weights_size = np.product(weights_shape)
print(' ' + conv, 'bn' if batch_normalize else ' ', activation, weights_shape)
conv_bias = np.ndarray(
shape=(filters, ),
dtype=np.float32,
buffer=readfile(weights_file, (filters * 4), section+'-bias'))
count += filters
if batch_normalize:
bn_weights = np.ndarray(
shape=(3, filters),
dtype=np.float32,
buffer=readfile(weights_file, (filters * 12), section+'-batchnorm'))
count += 3 * filters
# TODO: Keras BatchNormalization mistakenly refers to var
# as std.
bn_weight_list = [
bn_weights[0], # scale gamma
conv_bias, # shift beta
bn_weights[1], # running mean
bn_weights[2] # running var
]
conv_weights = np.ndarray(
shape=[weights_shape[i] for i in idx_tf2darknet],
dtype=np.float32,
buffer=readfile(weights_file, (weights_size * 4), section+'-weights'))
count += weights_size
# DarkNet conv_weights are serialized Caffe-style:
# (out_dim, in_dim, height, width)
# We would like to set these to Tensorflow order:
# (height, width, in_dim, out_dim)
# TODO: Add check for Theano dim ordering.
#print("the darknet shape is ", conv_weights.shape)
conv_weights = np.transpose(conv_weights, idx_dartnet2tf)
#print("the tf shape is ", conv_weights.shape)
conv_weights = [conv_weights] if batch_normalize else [
conv_weights, conv_bias
]
# Create nodes
#conv_layer = np.zeros([1, 1, filters], dtype = np.float32)
node = MyGraph.MyNode()
node.name = section
node.op = op
node.input = [prev_output]
node.input_norm = node.input
node.kernel = conv_weights[0]
node.padding = padding
node.strides = [1,stride,stride,1]
node.groups = groups
node.filters = filters
mydict[node.name] = node
prev_output = node.name
prev_layer_filters = filters
if batch_normalize:
node = MyGraph.MyNode()
node.name = section + '_batch_normalize'
node.op = 'FusedBatchNorm'
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
node.gamma = bn_weights[0]
node.beta = conv_bias
node.mean = bn_weights[1]
node.variance = bn_weights[2]
mydict[node.name] = node
prev_output = node.name
# prev_layer_filters no change
else:
node = MyGraph.MyNode()
node.name = section + '_bias'
node.op = 'BiasAdd'
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
node.bias = conv_bias
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
mylist.append(name)
elif section.startswith('shuffle'):
node = MyGraph.MyNode()
node.name = section
node.op = 'Shuffle'
node.input = [prev_output]
node.input_norm = node.input
node.groups = int(cfg_parser[section]['groups'])
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
elif re.match(r'^(pooling|maxpool|avgpool)_\d+$', section):
node = MyGraph.MyNode()
node.stride = sec.getint('stride', fallback = 1)
node.size = sec.getint('size', node.stride)
node.padding = sec.getint('padding', fallback = (node.size-1)//2)
if section.startswith('pooling'):
node.mode = str(cfg_parser[section]['mode'])
node.global_pooling = 0
elif section.startswith('maxpool'):
node.mode = 'max'
node.global_pooling = 0
elif section.startswith('avgpool'):
node.mode = 'avg'
node.global_pooling = 1
node.name = section
node.op = 'Pooling'
node.input = [prev_output]
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
#print('pooling ', vars(node))
mylist.append(section)
elif section.startswith('route'):
ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
node = MyGraph.MyNode()
node.name = section
node.op = 'NCNNConcat'
node.input = [mylist[i] for i in ids]
#print('mylist is ', mylist, 'the ids is ', ids, 'node input is ', node.input)
node.input_norm = node.input
node.axis = 0
node.filters = sum([getFilters(mydict, mylist[i]) for i in ids])
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
prev_layer_filters = node.filters
elif section.startswith('reorg'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetReorg'
node.input = [prev_output]
node.stride = sec.getint('stride', fallback = 1)
node.input_norm = node.input
node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
prev_layer_filters = node.filters
elif re.match(r'^(shortcut)_\d+$', section):
activation = sec.get('activation', fallback = 'logistic')
from_ = sec.getint('from')
node = MyGraph.MyNode()
node.name = section
node.op = 'BinaryOp'
node.op_type = 0
node.input = [prev_output, mylist[from_]]
#print('mylist is ', mylist, 'the from_ is ', from_, 'node input is ', node.input)
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
# NOTE: this section has relative reference
mylist.append(name)
elif section.startswith('connected'):
activation = sec.get('activation', fallback='linear')
filters = sec.getint('output', 2)
bias_data = np.ndarray(
shape=[filters],
dtype=np.float32,
buffer=readfile(weights_file, (filters * 4), section+'-bias'))
fc_data = np.ndarray(
shape=[prev_layer_filters, filters],
dtype=np.float32,
buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight'))
node = MyGraph.MyNode()
node.name = section
node.op = 'MatMul'
node.input = [prev_output]
node.input_norm = node.input
node.multiplier = fc_data
mydict[node.name] = node
prev_output = node.name
prev_layer_filters = filters
node = MyGraph.MyNode()
node.name = section + '_bias'
node.op = 'BiasAdd'
node.input = [prev_output]
node.input_norm = node.input
# node.attr = []
node.bias = bias_data
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
mylist.append(name)
elif section.startswith('net'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetNet'
node.input = []
node.input_norm = []
node.width = int(cfg_parser['net_0']['width'])
node.height = int(cfg_parser['net_0']['height'])
node.channels = int(cfg_parser['net_0']['channels'])
node.filters = node.channels
# print(vars(node))
# node.attr = []
mydict[node.name] = node
# start here
prev_output = node.name
prev_layer_filters = node.channels
mylist.append(section)
elif section.startswith('region'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetRegion'
node.input = [prev_output]
node.input_norm = node.input
node.classes = int(cfg_parser[section]['classes'])
node.num = int(cfg_parser[section]['num'])
node.softmax = int(cfg_parser[section]['softmax'])
node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])]
#print(vars(node))
#node.attr = []
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
elif section.startswith('softmax'):
node = MyGraph.MyNode()
node.name = section
node.op = 'Softmax'
node.input = [prev_output]
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
pass
elif section.startswith('cost'):
pass # Configs not currently handled during model definition.
else:
raise ValueError(
'Unsupported section header type: {}'.format(section))
print(' out filters ', prev_layer_filters)
print('loaded {} bytes in weights file'.format(count*4))
mygraph = MyGraph(mydict)
mygraph.type = 'darknet'
return mygraph
if __name__ == '__main__':
config_path = sys.argv[1]
weights_path = sys.argv[2]
mygraph = buildGraph(config_path, weights_path)
# 定义子图所需要的输出节点,输入节点,终止节点
outputNodes = ['region_0', 'softmax_0']
stopNodes = []
inputNodes = ['darknet_0']
mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes)
mygraph.generateDot('YoloV2.dot')
# 生成子图对应的代码
mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')
| 1.8125 | 2 |
osrsapi/__init__.py | XaKingas/osrsapi | 0 | 1284 | from .grandexchange import GrandExchange, GameItemNotFound, GameItemParseError
from .item import Item
from .priceinfo import PriceInfo
from .pricetrend import PriceTrend
| 0.283203 | 0 |
tests/components/mysensors/conftest.py | liangleslie/core | 30,023 | 1412 | <filename>tests/components/mysensors/conftest.py
"""Provide common mysensors fixtures."""
from __future__ import annotations
from collections.abc import AsyncGenerator, Callable, Generator
import json
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
from mysensors import BaseSyncGateway
from mysensors.persistence import MySensorsJSONDecoder
from mysensors.sensor import Sensor
import pytest
from homeassistant.components.device_tracker.legacy import Device
from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN
from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE
from homeassistant.components.mysensors.const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_SERIAL,
CONF_VERSION,
DOMAIN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, load_fixture
@pytest.fixture(autouse=True)
def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]:
"""Mock out device tracker known devices storage."""
devices = mock_device_tracker_conf
return devices
@pytest.fixture(name="mqtt")
def mock_mqtt_fixture(hass: HomeAssistant) -> None:
"""Mock the MQTT integration."""
hass.config.components.add(MQTT_DOMAIN)
@pytest.fixture(name="is_serial_port")
def is_serial_port_fixture() -> Generator[MagicMock, None, None]:
"""Patch the serial port check."""
with patch("homeassistant.components.mysensors.gateway.cv.isdevice") as is_device:
is_device.side_effect = lambda device: device
yield is_device
@pytest.fixture(name="gateway_nodes")
def gateway_nodes_fixture() -> dict[int, Sensor]:
"""Return the gateway nodes dict."""
return {}
@pytest.fixture(name="serial_transport")
async def serial_transport_fixture(
gateway_nodes: dict[int, Sensor],
is_serial_port: MagicMock,
) -> AsyncGenerator[dict[int, Sensor], None]:
"""Mock a serial transport."""
with patch(
"mysensors.gateway_serial.AsyncTransport", autospec=True
) as transport_class, patch("mysensors.task.OTAFirmware", autospec=True), patch(
"mysensors.task.load_fw", autospec=True
), patch(
"mysensors.task.Persistence", autospec=True
) as persistence_class:
persistence = persistence_class.return_value
mock_gateway_features(persistence, transport_class, gateway_nodes)
yield transport_class
def mock_gateway_features(
persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor]
) -> None:
"""Mock the gateway features."""
async def mock_schedule_save_sensors() -> None:
"""Load nodes from via persistence."""
gateway = transport_class.call_args[0][0]
gateway.sensors.update(nodes)
persistence.schedule_save_sensors = AsyncMock(
side_effect=mock_schedule_save_sensors
)
# For some reason autospeccing does not recognize these methods.
persistence.safe_load_sensors = MagicMock()
persistence.save_sensors = MagicMock()
async def mock_connect() -> None:
"""Mock the start method."""
transport.connect_task = MagicMock()
gateway = transport_class.call_args[0][0]
gateway.on_conn_made(gateway)
transport = transport_class.return_value
transport.connect_task = None
transport.connect.side_effect = mock_connect
@pytest.fixture(name="transport")
def transport_fixture(serial_transport: MagicMock) -> MagicMock:
"""Return the default mocked transport."""
return serial_transport
@pytest.fixture
def transport_write(transport: MagicMock) -> MagicMock:
"""Return the transport mock that accepts string messages."""
return transport.return_value.send
@pytest.fixture(name="serial_entry")
async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry:
"""Create a config entry for a serial gateway."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL,
CONF_VERSION: "2.3",
CONF_DEVICE: "/test/device",
CONF_BAUD_RATE: DEFAULT_BAUD_RATE,
},
)
return entry
@pytest.fixture(name="config_entry")
def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry:
"""Provide the config entry used for integration set up."""
return serial_entry
@pytest.fixture(name="integration")
async def integration_fixture(
hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry
) -> AsyncGenerator[MockConfigEntry, None]:
"""Set up the mysensors integration with a config entry."""
config: dict[str, Any] = {}
config_entry.add_to_hass(hass)
with patch("homeassistant.components.mysensors.device.UPDATE_DELAY", new=0):
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
yield config_entry
@pytest.fixture
def receive_message(
transport: MagicMock, integration: MockConfigEntry
) -> Callable[[str], None]:
"""Receive a message for the gateway."""
def receive_message_callback(message_string: str) -> None:
"""Receive a message with the transport.
The message_string parameter is a string in the MySensors message format.
"""
gateway = transport.call_args[0][0]
# node_id;child_id;command;ack;type;payload\n
gateway.logic(message_string)
return receive_message_callback
@pytest.fixture(name="gateway")
def gateway_fixture(
transport: MagicMock, integration: MockConfigEntry
) -> BaseSyncGateway:
"""Return a setup gateway."""
return transport.call_args[0][0]
def load_nodes_state(fixture_path: str) -> dict:
"""Load mysensors nodes fixture."""
return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder)
def update_gateway_nodes(
gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor]
) -> dict:
"""Update the gateway nodes."""
gateway_nodes.update(nodes)
return nodes
@pytest.fixture(name="gps_sensor_state", scope="session")
def gps_sensor_state_fixture() -> dict:
"""Load the gps sensor state."""
return load_nodes_state("mysensors/gps_sensor_state.json")
@pytest.fixture
def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor:
"""Load the gps sensor."""
nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="power_sensor_state", scope="session")
def power_sensor_state_fixture() -> dict:
"""Load the power sensor state."""
return load_nodes_state("mysensors/power_sensor_state.json")
@pytest.fixture
def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor:
"""Load the power sensor."""
nodes = update_gateway_nodes(gateway_nodes, power_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="energy_sensor_state", scope="session")
def energy_sensor_state_fixture() -> dict:
"""Load the energy sensor state."""
return load_nodes_state("mysensors/energy_sensor_state.json")
@pytest.fixture
def energy_sensor(
gateway_nodes: dict[int, Sensor], energy_sensor_state: dict
) -> Sensor:
"""Load the energy sensor."""
nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="sound_sensor_state", scope="session")
def sound_sensor_state_fixture() -> dict:
"""Load the sound sensor state."""
return load_nodes_state("mysensors/sound_sensor_state.json")
@pytest.fixture
def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor:
"""Load the sound sensor."""
nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="distance_sensor_state", scope="session")
def distance_sensor_state_fixture() -> dict:
"""Load the distance sensor state."""
return load_nodes_state("mysensors/distance_sensor_state.json")
@pytest.fixture
def distance_sensor(
gateway_nodes: dict[int, Sensor], distance_sensor_state: dict
) -> Sensor:
"""Load the distance sensor."""
nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="temperature_sensor_state", scope="session")
def temperature_sensor_state_fixture() -> dict:
"""Load the temperature sensor state."""
return load_nodes_state("mysensors/temperature_sensor_state.json")
@pytest.fixture
def temperature_sensor(
gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict
) -> Sensor:
"""Load the temperature sensor."""
nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="text_node_state", scope="session")
def text_node_state_fixture() -> dict:
"""Load the text node state."""
return load_nodes_state("mysensors/text_node_state.json")
@pytest.fixture
def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor:
"""Load the text child node."""
nodes = update_gateway_nodes(gateway_nodes, text_node_state)
node = nodes[1]
return node
| 1.484375 | 1 |
app/__init__.py | Jotasenpai/DigitalMediaStoreRESTfull | 0 | 1540 | import logging
import os
from flask import Flask
from flask_cors import CORS
from app.extensions import api
from app.extensions.database import db
from app.extensions.schema import ma
from app.views import albums, artists, hello, tracks
def create_app(config, **kwargs):
logging.basicConfig(level=logging.INFO)
app = Flask(__name__, **kwargs)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config.from_object(config)
# app.url_map.strict_slashes = False
with app.app_context():
api.init_app(app)
db.init_app(app)
db.create_all()
ma.init_app(app)
api.register_blueprint(hello.blp)
api.register_blueprint(artists.blp)
api.register_blueprint(albums.blp)
api.register_blueprint(tracks.blp)
try:
os.makedirs(app.instance_path)
except OSError:
pass
return app
| 1.4375 | 1 |
src/find_genes_by_location/find_genes_by_location.py | NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome | 3 | 1668 | import argparse
from collections import defaultdict
import csv
from dataclasses import dataclass, field
from enum import Enum, unique, auto
import os
import sys
import tempfile
import yaml
import zipfile
import gffutils
from google.protobuf import json_format
from ncbi.datasets.v1alpha1 import dataset_catalog_pb2
from ncbi.datasets.v1alpha1.reports import assembly_pb2
from ncbi.datasets.reports.report_reader import DatasetsReportReader
def retrieve_assembly_report(zip_in, catalog, assm_acc: str) -> assembly_pb2.AssemblyDataReport:
report_files = get_catalog_files_for_assembly(catalog, dataset_catalog_pb2.File.FileType.DATA_REPORT, assm_acc)
for path in report_files:
yaml = zip_in.read(path)
rpt_rdr = DatasetsReportReader()
return rpt_rdr.assembly_report(yaml)
def retrieve_data_catalog(zip_in) -> dataset_catalog_pb2.Catalog:
catalog_json = zip_in.read('ncbi_dataset/data/dataset_catalog.json')
return json_format.Parse(catalog_json, dataset_catalog_pb2.Catalog())
def get_catalog_files_for_assembly(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str):
report_files = get_catalog_files(catalog, desired_filetype, assm_acc)
filepaths = []
for assm_acc, paths in report_files.items():
filepaths.extend(paths)
return filepaths
def get_catalog_files(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str = None):
files = defaultdict(list)
for assm in catalog.assemblies:
acc = assm.accession
if assm_acc and assm_acc != acc:
continue
for f in assm.files:
filepath = os.path.join('ncbi_dataset', 'data', f.file_path)
if f.file_type == desired_filetype:
files[acc].append(filepath)
return files
def get_zip_file_for_acc(acc, path):
fname = os.path.join(path, f'{acc}.zip')
if os.path.isfile(fname):
return fname
return None
@dataclass
class Gene:
id: str
feat_type: str
name: str
chrom: str
strand: str
range_start: int
range_stop: int
protein_accession: str = ""
def get_fields(self):
return [self.feat_type, self.name, self.range_start, self.range_stop, self.protein_accession]
def name_val(self):
return self.protein_accession if self.protein_accession else self.name
def find_genes_by_loc(gff3_db, csvout, assm_acc, seq_acc, start, stop, extra_fields):
found_genes = []
feat_types = ('gene', 'pseudogene')
for gene in gff3_db.region(seqid=seq_acc, start=start, end=stop, featuretype=feat_types, completely_within=False):
gene_name = gene.attributes.get('Name', None)[0]
prot_acc = ""
if gene.attributes['gene_biotype'][0] == 'protein_coding':
cds = list(gff3_db.children(gene, featuretype='CDS'))
prot_acc = cds[0].attributes.get('protein_id', None)[0]
geneobj = Gene(
gene.id,
gene.featuretype,
gene_name,
gene.chrom,
gene.strand,
gene.start,
gene.stop,
prot_acc,
)
csvout.writerow([assm_acc, seq_acc, start, stop, *extra_fields, *geneobj.get_fields()])
found_genes.append(geneobj)
return found_genes
class FindGenesByLoc:
default_packages_dir = os.path.join('var', 'data', 'packages')
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--packages-dir', type=str, default=self.default_packages_dir,
help=f'root of input data directory [{self.default_packages_dir}]')
parser.add_argument('--locs', type=str, help='file containing genomic locations')
self.args = parser.parse_args()
self.writer = csv.writer(sys.stdout, dialect='excel-tab')
def read_data(self):
for row in csv.reader(iter(sys.stdin.readline, ''), dialect='excel-tab'):
yield row
def run(self):
for assm_acc, seq_acc, start, stop, *extra in self.read_data():
self.find_all_for_location(assm_acc, seq_acc, start, stop, extra)
def process_loc_for_gff(self, zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(zin.read(gff_fname))
db = gffutils.create_db(
tmpfile.name,
dbfn=':memory:',
force=True,
keep_order=True,
merge_strategy='merge',
sort_attribute_values=True
)
find_genes_by_loc(db, self.writer, assm_acc, seq_acc, start, stop, extra_fields)
def find_all_for_location(self, assm_acc, seq_acc, start, stop, extra_fields):
zip_file = get_zip_file_for_acc(assm_acc, self.args.packages_dir)
try:
with zipfile.ZipFile(zip_file, 'r') as zin:
catalog = retrieve_data_catalog(zin)
gff_files = get_catalog_files(catalog, dataset_catalog_pb2.File.FileType.GFF3)
for assm_acc, gff_files in gff_files.items():
report = retrieve_assembly_report(zin, catalog, assm_acc)
for gff_fname in gff_files:
self.process_loc_for_gff(zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields)
except zipfile.BadZipFile:
print(f'{zip_file} is not a zip file')
if __name__ == '__main__':
FindGenesByLoc().run()
| 1.4375 | 1 |
src/Products/CMFCore/tests/test_DirectoryView.py | fdiary/Products.CMFCore | 3 | 1796 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit tests for DirectoryView module.
"""
import sys
import unittest
import warnings
from os import mkdir
from os import remove
from os.path import join
from tempfile import mktemp
from App.config import getConfiguration
from . import _globals
from .base.dummy import DummyFolder
from .base.testcase import FSDVTest
from .base.testcase import WritableFSDVTest
class DirectoryViewPathTests(unittest.TestCase):
"""
These test that, no matter what is stored in their dirpath,
FSDV's will do their best to find an appropriate skin
and only do nothing in the case where an appropriate skin
can't be found.
"""
def setUp(self):
from Products.CMFCore.DirectoryView import addDirectoryViews
from Products.CMFCore.DirectoryView import registerDirectory
registerDirectory('fake_skins', _globals)
self.ob = DummyFolder()
addDirectoryViews(self.ob, 'fake_skins', _globals)
def test__generateKey(self):
from Products.CMFCore.DirectoryView import _generateKey
key = _generateKey('Products.CMFCore', 'tests')
self.assertEqual(key.split(':')[0], 'Products.CMFCore')
subkey = _generateKey('Products.CMFCore', 'tests\foo')
self.assertTrue(subkey.startswith(key))
def test__findProductForPath(self):
from Products.CMFCore.DirectoryView import _findProductForPath
cmfpath = sys.modules['Products.CMFCore'].__path__[0]
self.assertEqual(_findProductForPath(cmfpath),
('Products.CMFCore', ''))
cmfpath = join(cmfpath, 'tests')
self.assertEqual(_findProductForPath(cmfpath),
('Products.CMFCore', 'tests'))
def test_getDirectoryInfo(self):
skin = self.ob.fake_skin
skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin')
self.assertTrue(hasattr(self.ob.fake_skin, 'test1'),
self.ob.fake_skin.getDirPath())
# Test we do nothing if given a really wacky path
def test_UnhandleableExpandPath(self):
file = mktemp()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.ob.fake_skin.manage_properties(file)
self.assertEqual(self.ob.fake_skin.objectIds(), [])
# Check that a warning was raised.
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
text = ('DirectoryView fake_skin refers to a non-existing path %r'
% file)
self.assertTrue(text in str(w[-1].message))
# this test tests that registerDirectory creates keys in the right format.
def test_registerDirectoryKeys(self):
from Products.CMFCore.DirectoryView import _dirreg
dirs = _dirreg._directories
self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs,
dirs.keys())
self.assertEqual(self.ob.fake_skin.getDirPath(),
'Products.CMFCore.tests:fake_skins/fake_skin')
class DirectoryViewTests(FSDVTest):
def setUp(self):
FSDVTest.setUp(self)
self._registerDirectory(self)
def test_addDirectoryViews(self):
# Test addDirectoryViews
# also test registration of directory views doesn't barf
pass
def test_DirectoryViewExists(self):
# Check DirectoryView added by addDirectoryViews
# appears as a DirectoryViewSurrogate due
# to Acquisition hackery.
from Products.CMFCore.DirectoryView import DirectoryViewSurrogate
self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate))
def test_DirectoryViewMethod(self):
# Check if DirectoryView method works
self.assertEqual(self.ob.fake_skin.test1(), 'test1')
def test_properties(self):
# Make sure the directory view is reading properties
self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope')
def test_ignored(self):
# Test that "artifact" files and dirs are ignored
for name in '#test1', 'CVS', '.test1', 'test1~':
self.assertTrue(name not in self.ob.fake_skin.objectIds(),
'%s not ignored' % name)
def test_surrogate_writethrough(self):
# CMF Collector 316: It is possible to cause ZODB writes because
# setting attributes on the non-persistent surrogate writes them
# into the persistent DirectoryView as well. This is bad in situations
# where you only want to store markers and remove them before the
# transaction has ended - they never got removed because there was
# no equivalent __delattr__ on the surrogate that would clean up
# the persistent DirectoryView as well.
fs = self.ob.fake_skin
test_foo = 'My Foovalue'
fs.foo = test_foo
self.assertEqual(fs.foo, test_foo)
self.assertEqual(fs.__dict__['_real'].foo, test_foo)
del fs.foo
self.assertRaises(AttributeError, getattr, fs, 'foo')
self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo')
class DirectoryViewIgnoreTests(FSDVTest):
def setUp(self):
FSDVTest.setUp(self)
self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py')
self._registerDirectory(self, ignore=self.manual_ign)
def test_ignored(self):
# Test that "artifact" files and dirs are ignored,
# even when a custom ignore list is used; and that the
# custom ignore list is also honored
auto_ign = ('#test1', '.test1', 'test1~')
must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',)
visible = self.ob.fake_skin.objectIds()
for name in must_ignore:
self.assertFalse(name in visible)
class DirectoryViewFolderTests(FSDVTest):
def setUp(self):
FSDVTest.setUp(self)
self._registerDirectory(self)
def tearDown(self):
from Products.CMFCore import DirectoryView
# This is nasty, but there is no way to unregister anything
# right now...
metatype_registry = DirectoryView._dirreg._meta_types
if 'FOLDER' in metatype_registry:
del metatype_registry['FOLDER']
FSDVTest.tearDown(self)
def test_DirectoryViewMetadata(self):
# Test to determine if metadata shows up correctly on a
# FSDV that has a corresponding .metadata file
testfolder = self.ob.fake_skin.test_directory
self.assertEqual(testfolder.title, 'test_directory Title')
def test_DirectoryViewMetadataOnPropertyManager(self):
# Test to determine if metadata shows up correctly on a
# FSDV that has a corresponding .metadata file
testfolder = self.ob.fake_skin.test_directory
self.assertEqual(testfolder.getProperty('title'),
'test_directory Title')
def test_DirectoryViewFolderDefault(self):
# Test that a folder inside the fake skin really is of type
# DirectoryViewSurrogate
from Products.CMFCore.DirectoryView import DirectoryViewSurrogate
testfolder = self.ob.fake_skin.test_directory
self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate))
def test_DirectoryViewFolderCustom(self):
# Now we register a different class under the fake meta_type
# "FOLDER" and test again...
from Products.CMFCore.DirectoryView import DirectoryView
from Products.CMFCore.DirectoryView import registerMetaType
class DummyDirectoryViewSurrogate:
pass
class DummyDirectoryView(DirectoryView):
def __of__(self, parent):
return DummyDirectoryViewSurrogate()
registerMetaType('FOLDER', DummyDirectoryView)
# In order to regenerate the FSDV data we need to remove and
# register again, that way the newly registered meta_type is used
self.ob._delObject('fake_skin')
self._registerDirectory(self)
testfolder = self.ob.fake_skin.test_directory
self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate))
class DebugModeTests(WritableFSDVTest):
def setUp(self):
from Products.CMFCore.DirectoryView import _dirreg
WritableFSDVTest.setUp(self)
self.saved_cfg_debug_mode = getConfiguration().debug_mode
getConfiguration().debug_mode = True
# initialise skins
self._registerDirectory(self)
# add a method to the fake skin folder
self._writeFile('test2.py', "return 'test2'")
# edit the test1 method
self._writeFile('test1.py', "return 'new test1'")
# add a new folder
mkdir(join(self.skin_path_name, 'test3'))
info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath)
info.reload()
self.use_dir_mtime = info.use_dir_mtime
def tearDown(self):
getConfiguration().debug_mode = self.saved_cfg_debug_mode
WritableFSDVTest.tearDown(self)
def test_AddNewMethod(self):
# See if a method added to the skin folder can be found
self.assertEqual(self.ob.fake_skin.test2(), 'test2')
def test_EditMethod(self):
# See if an edited method exhibits its new behaviour
self.assertEqual(self.ob.fake_skin.test1(), 'new test1')
def test_DeleteMethod(self):
# Make sure a deleted method goes away
remove(join(self.skin_path_name, 'test2.py'))
self.assertFalse(hasattr(self.ob.fake_skin, 'test2'))
def test_DeleteAddEditMethod(self):
# Check that if we delete a method, then add it back,
# then edit it, the DirectoryView notices.
# This exercises yet another Win32 mtime weirdity.
remove(join(self.skin_path_name, 'test2.py'))
self.assertFalse(hasattr(self.ob.fake_skin, 'test2'))
# add method back to the fake skin folder
self._writeFile('test2.py', "return 'test2.2'",
self.use_dir_mtime)
# check
self.assertEqual(self.ob.fake_skin.test2(), 'test2.2')
# edit method
self._writeFile('test2.py', "return 'test2.3'",
self.use_dir_mtime)
# check
self.assertEqual(self.ob.fake_skin.test2(), 'test2.3')
def test_NewFolder(self):
# See if a new folder shows up
self.assertFalse(hasattr(self.ob.fake_skin, 'test3'))
def test_DeleteFolder(self):
# Make sure a deleted folder goes away
self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory'))
# It has a file, which we need to delete first.
self.assertTrue(hasattr(self.ob.fake_skin.test_directory,
'README.txt'))
self._deleteFile(join('test_directory', 'README.txt'),
self.use_dir_mtime)
self._deleteDirectory('test_directory', self.use_dir_mtime)
self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory'))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DirectoryViewPathTests))
suite.addTest(unittest.makeSuite(DirectoryViewTests))
suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests))
suite.addTest(unittest.makeSuite(DirectoryViewFolderTests))
suite.addTest(unittest.makeSuite(DebugModeTests))
return suite
| 1.4375 | 1 |
SLHCUpgradeSimulations/Configuration/python/aging.py | ckamtsikis/cmssw | 852 | 1924 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# handle normal mixing or premixing
def getHcalDigitizer(process):
if hasattr(process,'mixData'):
return process.mixData
if hasattr(process,'mix') and hasattr(process.mix,'digitizers') and hasattr(process.mix.digitizers,'hcal'):
return process.mix.digitizers.hcal
return None
def getHGCalDigitizer(process,section):
if hasattr(process,'mix') and hasattr(process.mix,'digitizers'):
if section == 'EE' and hasattr(process.mix.digitizers,'hgceeDigitizer'):
return process.mix.digitizers.hgceeDigitizer
elif section == 'FH' and hasattr(process.mix.digitizers,'hgchefrontDigitizer'):
return process.mix.digitizers.hgchefrontDigitizer
elif section == 'BH' and hasattr(process.mix.digitizers,'hgchebackDigitizer'):
return process.mix.digitizers.hgchebackDigitizer
elif section == 'HFNose' and hasattr(process.mix.digitizers,'hfnoseDigitizer'):
return process.mix.digitizers.hfnoseDigitizer
return None
# change assumptions about lumi rate
def setScenarioHLLHC(module,scenarioHLLHC):
if scenarioHLLHC=="nominal":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_nominal
module.years = _years_LHC + _years_HLLHC_nominal
elif scenarioHLLHC=="ultimate":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_ultimate
module.years = _years_LHC + _years_HLLHC_ultimate
return module
# turnon = True enables default, False disables
# recalibration and darkening always together
def ageHB(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HBDarkeningEP
process.HBDarkeningEP = HBDarkeningEP
process.HBDarkeningEP = setScenarioHLLHC(process.HBDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HBDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HBRecalibration = cms.bool(turnon)
return process
def ageHE(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HEDarkeningEP
process.HEDarkeningEP = HEDarkeningEP
process.HEDarkeningEP = setScenarioHLLHC(process.HEDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HEDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HERecalibration = cms.bool(turnon)
return process
def ageHF(process,turnon):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HFRecalibration = cms.bool(turnon)
return process
def agedHFNose(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HFNose_setEndOfLifeNoise
process = HFNose_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def agedHGCal(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setEndOfLifeNoise
process = HGCal_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def realisticHGCalStartup(process):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setRealisticStartupNoise
process = HGCal_setRealisticStartupNoise(process)
return process
# needs lumi to set proper ZS thresholds (tbd)
def ageSiPM(process,turnon,lumi):
process.es_hardcode.hbUpgrade.doRadiationDamage = turnon
process.es_hardcode.heUpgrade.doRadiationDamage = turnon
# todo: determine ZS threshold adjustments
# adjust PF thresholds for increased noise
# based on: https://baylor.box.com/s/w32ja75krcbxcycyifexu28dwlgrj7wg
hcal_lumis = [300, 1000, 3000, 4500, 1e10]
hcal_thresholds = {
300: {
"seed": [0.5, 0.625, 0.75, 0.75],
"rec": [0.4, 0.5, 0.6, 0.6],
},
1000: {
"seed": [1.0, 1.5, 1.5, 1.5],
"rec": [0.8, 1.2, 1.2, 1.2],
},
3000: {
"seed": [1.25, 2.5, 2.5, 2.5],
"rec": [1.0, 2.0, 2.0, 2.0],
},
4500: {
"seed": [1.5, 3.0, 3.0, 3.0],
"rec": [1.25, 2.5, 2.5, 2.5],
},
}
ctmodules = ['calotowermaker','caloTowerForTrk','caloTowerForTrkPreSplitting','towerMaker','towerMakerWithHO']
for ilumi, hcal_lumi in enumerate(hcal_lumis[:-1]):
if lumi >= hcal_lumi and lumi < hcal_lumis[ilumi+1]:
if hasattr(process,'particleFlowClusterHBHE'):
process.particleFlowClusterHBHE.seedFinder.thresholdsByDetector[0].seedingThreshold = hcal_thresholds[hcal_lumi]["seed"]
process.particleFlowClusterHBHE.initialClusteringStep.thresholdsByDetector[0].gatheringThreshold = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.recHitEnergyNorms[0].recHitEnergyNorm = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.positionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowClusterHCAL'):
process.particleFlowClusterHCAL.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowRecHitHBHE'):
process.particleFlowRecHitHBHE.producers[0].qualityTests[0].cuts[0].threshold = hcal_thresholds[hcal_lumi]["rec"]
for ctmod in ctmodules:
if hasattr(process,ctmod):
getattr(process,ctmod).HBThreshold1 = hcal_thresholds[hcal_lumi]["rec"][0]
getattr(process,ctmod).HBThreshold2 = hcal_thresholds[hcal_lumi]["rec"][1]
getattr(process,ctmod).HBThreshold = hcal_thresholds[hcal_lumi]["rec"][-1]
break
return process
def ageHcal(process,lumi,instLumi,scenarioHLLHC):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.DelivLuminosity = cms.double(float(lumi)) # integrated lumi in fb-1
# these lines need to be further activated by turning on 'complete' aging for HF
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.InstLuminosity = cms.double(float(instLumi))
process.g4SimHits.HCalSD.DelivLuminosity = cms.double(float(lumi))
# recalibration and darkening always together
if hasattr(process,'es_hardcode'):
process.es_hardcode.iLumi = cms.double(float(lumi))
# functions to enable individual subdet aging
process = ageHB(process,True,scenarioHLLHC)
process = ageHE(process,True,scenarioHLLHC)
process = ageHF(process,True)
process = ageSiPM(process,True,lumi)
return process
def turn_on_HB_aging(process):
process = ageHB(process,True,"")
return process
def turn_off_HB_aging(process):
process = ageHB(process,False,"")
return process
def turn_on_HE_aging(process):
process = ageHE(process,True,"")
return process
def turn_off_HE_aging(process):
process = ageHE(process,False,"")
return process
def turn_on_HF_aging(process):
process = ageHF(process,True)
return process
def turn_off_HF_aging(process):
process = ageHF(process,False)
return process
def turn_off_SiPM_aging(process):
process = ageSiPM(process,False,0.0)
return process
def hf_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.HFDarkening = cms.untracked.bool(True)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.untracked.bool(False)
return process
def ageEcal(process,lumi,instLumi):
if hasattr(process,'g4SimHits'):
#these lines need to be further activiated by tuning on 'complete' aging for ecal
process.g4SimHits.ECalSD.InstLuminosity = cms.double(instLumi)
process.g4SimHits.ECalSD.DelivLuminosity = cms.double(float(lumi))
# available conditions
ecal_lumis = [300,1000,3000,4500]
ecal_conditions = [
['EcalIntercalibConstantsRcd','EcalIntercalibConstants_TL{:d}_upgrade_8deg_v2_mc'],
['EcalIntercalibConstantsMCRcd','EcalIntercalibConstantsMC_TL{:d}_upgrade_8deg_v2_mc'],
['EcalLaserAPDPNRatiosRcd','EcalLaserAPDPNRatios_TL{:d}_upgrade_8deg_mc'],
['EcalPedestalsRcd','EcalPedestals_TL{:d}_upgradeTIA_8deg_mc'],
['EcalTPGLinearizationConstRcd','EcalTPGLinearizationConst_TL{:d}_upgrade_8deg_mc'],
]
# update PF thresholds, based on https://indico.cern.ch/event/653123/contributions/2659235/attachments/1491385/2318364/170711_upsg_ledovskoy.pdf
ecal_thresholds = {
300 : 0.103,
1000 : 0.175,
3000 : 0.435,
4500 : 0.707,
}
ecal_seed_multiplier = 2.5
# try to get conditions
if int(lumi) in ecal_lumis:
if not hasattr(process.GlobalTag,'toGet'):
process.GlobalTag.toGet=cms.VPSet()
for ecal_condition in ecal_conditions:
process.GlobalTag.toGet.append(cms.PSet(
record = cms.string(ecal_condition[0]),
tag = cms.string(ecal_condition[1].format(int(lumi))),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
)
)
if hasattr(process,"particleFlowClusterECALUncorrected"):
_seeds = process.particleFlowClusterECALUncorrected.seedFinder.thresholdsByDetector
for iseed in range(0,len(_seeds)):
if _seeds[iseed].detector.value()=="ECAL_BARREL":
_seeds[iseed].seedingThreshold = cms.double(ecal_thresholds[int(lumi)]*ecal_seed_multiplier)
_clusters = process.particleFlowClusterECALUncorrected.initialClusteringStep.thresholdsByDetector
for icluster in range(0,len(_clusters)):
if _clusters[icluster].detector.value()=="ECAL_BARREL":
_clusters[icluster].gatheringThreshold = cms.double(ecal_thresholds[int(lumi)])
return process
def ecal_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.ECalSD.AgeingWithSlopeLY = cms.untracked.bool(True)
if hasattr(process,'ecal_digi_parameters'):
process.ecal_digi_parameters.UseLCcorrection = cms.untracked.bool(False)
return process
def customise_aging_300(process):
process=ageHcal(process,300,5.0e34,"nominal")
process=ageEcal(process,300,5.0e34)
return process
def customise_aging_1000(process):
process=ageHcal(process,1000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,1000,5.0e34)
return process
def customise_aging_3000(process):
process=ageHcal(process,3000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,5.0e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_3000_ultimate(process):
process=ageHcal(process,3000,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_4500_ultimate(process):
process=ageHcal(process,4500,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,4500,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
| 1.140625 | 1 |
chroma-manager/tests/utils/__init__.py | GarimaVishvakarma/intel-chroma | 0 | 2052 | import time
import datetime
import contextlib
@contextlib.contextmanager
def patch(obj, **attrs):
"Monkey patch an object's attributes, restoring them after the block."
stored = {}
for name in attrs:
stored[name] = getattr(obj, name)
setattr(obj, name, attrs[name])
try:
yield
finally:
for name in stored:
setattr(obj, name, stored[name])
@contextlib.contextmanager
def timed(msg='', threshold=0):
"Print elapsed time of a block, if over optional threshold."
start = time.time()
try:
yield
finally:
elapsed = time.time() - start
if elapsed >= threshold:
print datetime.timedelta(seconds=elapsed), msg
| 2.03125 | 2 |
tao_compiler/mlir/disc/tests/glob_op_test.bzl | JamesTheZ/BladeDISC | 328 | 2180 | # Test definitions for Lit, the LLVM test runner.
#
# This is reusing the LLVM Lit test runner in the interim until the new build
# rules are upstreamed.
# TODO(b/136126535): remove this custom rule.
"""Lit runner globbing test
"""
load("//tensorflow:tensorflow.bzl", "filegroup")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_native_cc_binary", "tf_copts")
# Default values used by the test runner.
_default_test_file_exts = ["mlir", ".pbtxt", ".td"]
_default_driver = "@llvm-project//mlir:run_lit.sh"
_default_size = "small"
_default_tags = []
# These are patterns which we should never match, for tests, subdirectories, or
# test input data files.
_ALWAYS_EXCLUDE = [
"**/LICENSE.txt",
"**/README.txt",
"**/lit.local.cfg",
# Exclude input files that have spaces in their names, since bazel
# cannot cope with such "targets" in the srcs list.
"**/* *",
"**/* */**",
]
def _run_lit_test(name, test_file, data, size, tags, driver, features, exec_properties):
"""Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir.
Note that, due to Bazel's hermetic builds, lit only sees the tests that
are included in the `data` parameter, regardless of what other tests might
exist in the directory searched.
Args:
name: str, the name of the test, including extension.
data: [str], the data input to the test.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
name_without_suffix = test_file[0].split('.')[0]
local_test_files = name + ".test_files"
filegroup(
name = local_test_files,
srcs = native.glob([
"data/" + name_without_suffix + "*.mlir",
]),
)
tf_cc_test(
name = name,
srcs = test_file,
size = size,
deps = [
"//tensorflow/compiler/mlir/disc/tests:mlir_feature_test",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
],
data = [":" + local_test_files] + data + [
"//tensorflow/compiler/mlir/disc:disc_compiler_main",
"//tensorflow/compiler/mlir:tf-mlir-translate",
"//tensorflow/compiler/mlir:tf-opt",
],
)
def glob_op_tests(
exclude = [],
test_file_exts = _default_test_file_exts,
default_size = _default_size,
size_override = {},
data = [],
per_test_extra_data = {},
default_tags = _default_tags,
tags_override = {},
driver = _default_driver,
features = [],
exec_properties = {}):
"""Creates all plausible Lit tests (and their inputs) under this directory.
Args:
exclude: [str], paths to exclude (for tests and inputs).
test_file_exts: [str], extensions for files that are tests.
default_size: str, the test size for targets not in "size_override".
size_override: {str: str}, sizes to use for specific tests.
data: [str], additional input data to the test.
per_test_extra_data: {str: [str]}, extra data to attach to a given file.
default_tags: [str], additional tags to attach to the test.
tags_override: {str: str}, tags to add to specific tests.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
exec_properties: a dictionary of properties to pass on.
"""
# Ignore some patterns by default for tests and input data.
exclude = _ALWAYS_EXCLUDE + exclude
tests = native.glob(
["*." + ext for ext in test_file_exts],
exclude = exclude,
)
# Run tests individually such that errors can be attributed to a specific
# failure.
for i in range(len(tests)):
curr_test = tests[i]
# Instantiate this test with updated parameters.
lit_test(
name = curr_test,
data = data + per_test_extra_data.get(curr_test, []),
size = size_override.get(curr_test, default_size),
tags = default_tags + tags_override.get(curr_test, []),
driver = driver,
features = features,
exec_properties = exec_properties,
)
def lit_test(
name,
data = [],
size = _default_size,
tags = _default_tags,
driver = _default_driver,
features = [],
exec_properties = {}):
"""Runs test files under lit.
Args:
name: str, the name of the test.
data: [str], labels that should be provided as data inputs.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
_run_lit_test(name + ".test", [name], data, size, tags, driver, features, exec_properties)
| 1.15625 | 1 |
devtools/api/health.py | ankeshkhemani/devtools | 0 | 2308 | import datetime
from fastapi import APIRouter
router = APIRouter()
@router.get("", tags=["health"])
async def get_health():
return {
"results": [],
"status": "success",
"timestamp": datetime.datetime.now().timestamp()
}
| 1.03125 | 1 |
withdrawal/floor_ceiling.py | hoostus/prime-harvesting | 23 | 2436 | <filename>withdrawal/floor_ceiling.py<gh_stars>10-100
from decimal import Decimal
from .abc import WithdrawalStrategy
# Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money
class FloorCeiling(WithdrawalStrategy):
def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25):
super().__init__(portfolio, harvest_strategy)
self.floor = Decimal(floor)
self.ceiling = Decimal(ceiling)
self.rate = Decimal(rate)
def start(self):
amount = self.rate * self.portfolio.value
self.initial_amount = amount
return amount
def next(self):
amount = self.rate * self.portfolio.value
initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation
floor = initial_amount_inflation_adjusted * self.floor
ceiling = initial_amount_inflation_adjusted * self.ceiling
amount = max(amount, floor)
amount = min(amount, ceiling)
return amount
| 2.515625 | 3 |
ding/hpc_rl/wrapper.py | davide97l/DI-engine | 1 | 2564 | <filename>ding/hpc_rl/wrapper.py
import importlib
from ditk import logging
from collections import OrderedDict
from functools import wraps
import ding
'''
Overview:
`hpc_wrapper` is the wrapper for functions which are supported by hpc. If a function is wrapped by it, we will
search for its hpc type and return the function implemented by hpc.
We will use the following code as a sample to introduce `hpc_wrapper`:
```
@hpc_wrapper(shape_fn=shape_fn_dntd, namedtuple_data=True, include_args=[0,1,2,3],
include_kwargs=['data', 'gamma', 'v_min', 'v_max'], is_cls_method=False)
def dist_nstep_td_error(
data: namedtuple,
gamma: float,
v_min: float,
v_max: float,
n_atom: int,
nstep: int = 1,
) -> torch.Tensor:
...
```
Parameters:
- shape_fn (:obj:`function`): a function which return the shape needed by hpc function. In fact, it returns
all args that the hpc function needs.
- nametuple_data (:obj:`bool`): If True, when hpc function is called, it will be called as hpc_function(*nametuple).
If False, nametuple data will remain its `nametuple` type.
- include_args (:obj:`list`): a list of index of the args need to be set in hpc function. As shown in the sample,
include_args=[0,1,2,3], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function.
- include_kwargs (:obj:`list`): a list of key of the kwargs need to be set in hpc function. As shown in the sample,
include_kwargs=['data', 'gamma', 'v_min', 'v_max'], which means `data`, `gamma`, `v_min` and `v_max` will be
set in hpc function.
- is_cls_method (:obj:`bool`): If True, it means the function we wrap is a method of a class. `self` will be put
into args. We will get rid of `self` in args. Besides, we will use its classname as its fn_name.
If False, it means the function is a simple method.
Q&A:
- Q: Is `include_args` and `include_kwargs` need to be set at the same time?
- A: Yes. `include_args` and `include_kwargs` can deal with all type of input, such as (data, gamma, v_min=v_min,
v_max=v_max) and (data, gamma, v_min, v_max).
- Q: What is `hpc_fns`?
- A: Here we show a normal `hpc_fns`:
```
hpc_fns = {
'fn_name1': {
'runtime_name1': hpc_fn1,
'runtime_name2': hpc_fn2,
...
},
...
}
```
Besides, `per_fn_limit` means the max length of `hpc_fns[fn_name]`. When new function comes, the oldest
function will be popped from `hpc_fns[fn_name]`.
'''
hpc_fns = {}
per_fn_limit = 3
def register_runtime_fn(fn_name, runtime_name, shape):
fn_name_mapping = {
'gae': ['hpc_rll.rl_utils.gae', 'GAE'],
'dist_nstep_td_error': ['hpc_rll.rl_utils.td', 'DistNStepTD'],
'LSTM': ['hpc_rll.torch_utils.network.rnn', 'LSTM'],
'ppo_error': ['hpc_rll.rl_utils.ppo', 'PPO'],
'q_nstep_td_error': ['hpc_rll.rl_utils.td', 'QNStepTD'],
'q_nstep_td_error_with_rescale': ['hpc_rll.rl_utils.td', 'QNStepTDRescale'],
'ScatterConnection': ['hpc_rll.torch_utils.network.scatter_connection', 'ScatterConnection'],
'td_lambda_error': ['hpc_rll.rl_utils.td', 'TDLambda'],
'upgo_loss': ['hpc_rll.rl_utils.upgo', 'UPGO'],
'vtrace_error': ['hpc_rll.rl_utils.vtrace', 'VTrace'],
}
fn_str = fn_name_mapping[fn_name]
cls = getattr(importlib.import_module(fn_str[0]), fn_str[1])
hpc_fn = cls(*shape).cuda()
if fn_name not in hpc_fns:
hpc_fns[fn_name] = OrderedDict()
hpc_fns[fn_name][runtime_name] = hpc_fn
while len(hpc_fns[fn_name]) > per_fn_limit:
hpc_fns[fn_name].popitem(last=False)
# print(hpc_fns)
return hpc_fn
def hpc_wrapper(shape_fn=None, namedtuple_data=False, include_args=[], include_kwargs=[], is_cls_method=False):
def decorate(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ding.enable_hpc_rl:
shape = shape_fn(args, kwargs)
if is_cls_method:
fn_name = args[0].__class__.__name__
else:
fn_name = fn.__name__
runtime_name = '_'.join([fn_name] + [str(s) for s in shape])
if fn_name not in hpc_fns or runtime_name not in hpc_fns[fn_name]:
hpc_fn = register_runtime_fn(fn_name, runtime_name, shape)
else:
hpc_fn = hpc_fns[fn_name][runtime_name]
if is_cls_method:
args = args[1:]
clean_args = []
for i in include_args:
if i < len(args):
clean_args.append(args[i])
nouse_args = list(set(list(range(len(args)))).difference(set(include_args)))
clean_kwargs = {}
for k, v in kwargs.items():
if k in include_kwargs:
if k == 'lambda_':
k = 'lambda'
clean_kwargs[k] = v
nouse_kwargs = list(set(kwargs.keys()).difference(set(include_kwargs)))
if len(nouse_args) > 0 or len(nouse_kwargs) > 0:
logging.warn(
'in {}, index {} of args are dropped, and keys {} of kwargs are dropped.'.format(
runtime_name, nouse_args, nouse_kwargs
)
)
if namedtuple_data:
data = args[0] # args[0] is a namedtuple
return hpc_fn(*data, *clean_args[1:], **clean_kwargs)
else:
return hpc_fn(*clean_args, **clean_kwargs)
else:
return fn(*args, **kwargs)
return wrapper
return decorate
| 1.695313 | 2 |
resolwe/__init__.py | plojyon/resolwe | 27 | 2692 | <reponame>plojyon/resolwe<gh_stars>10-100
""".. Ignore pydocstyle D400.
=======
Resolwe
=======
Open source enterprise dataflow engine in Django.
"""
from resolwe.__about__ import ( # noqa: F401
__author__,
__copyright__,
__email__,
__license__,
__summary__,
__title__,
__url__,
__version__,
)
| 0.660156 | 1 |
tcapygen/layoutgen.py | Ahrvo-Trading-Systems/tcapy | 189 | 2820 | from __future__ import division, print_function
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
## Web server components
import dash_core_components as dcc
import dash_html_components as html
import base64
import os
## Date/time components
import pandas as pd
import datetime
from datetime import timedelta
from collections import OrderedDict
from pandas.tseries.offsets import *
from tcapy.vis.layoutdash import LayoutDash
########################################################################################################################
class LayoutDashImplGen(LayoutDash):
"""This implements the LayoutDash abstract class, to create the web based GUI for the tcapy application. It creates two
web pages
- detailed_page - for doing detailed tcapy analysis for a specific currency pair
- aggregated_page - for more aggregated style analysis across multiple currency pairs and over multiple time periods
"""
def __init__(self, app=None, constants=None, url_prefix=''):
super(LayoutDashImplGen, self).__init__(app=app, constants=constants, url_prefix=url_prefix)
available_dates = pd.date_range(
datetime.datetime.today().date() - timedelta(days=self._constants.gui_lookback_window),
datetime.datetime.today().date(), freq=BDay())
times = pd.date_range("0:00", "23:59", freq="15min")
### create the possible values for drop down boxes on both pages
# Reverse date list (for both detailed and aggregated pages)
self.available_dates = [x.date() for x in available_dates[::-1]]
# For detailed page only
self.available_times = [t.strftime("%H:%M") for t in times]
self.available_tickers = self._constants.available_tickers_dictionary['All']
self.available_venues = self._constants.available_venues_dictionary['All']
self.available_brokers = self._constants.available_brokers_dictionary['All']
self.available_algos = self._constants.available_algos_dictionary['All']
self.available_market_data = self._constants.available_market_data
self.available_order_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'arrival', 'twap', 'vwap',
'buy trade', 'sell trade']
self.available_execution_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'buy trade', 'sell trade']
self.available_slippage_bounds = ['0.25', '0.5', '1.0', '1.25', '1.5', '2.0', 'bid/ask']
# For aggregated page only
self.available_grouped_tickers = self._flatten_dictionary(self._constants.available_tickers_dictionary)
self.available_grouped_venues = self._flatten_dictionary(self._constants.available_venues_dictionary)
self.available_grouped_brokers = self._flatten_dictionary(self._constants.available_brokers_dictionary)
self.available_grouped_algos = self._flatten_dictionary(self._constants.available_algos_dictionary)
self.available_event_types = self._constants.available_event_types
self.available_metrics = self._constants.available_metrics
self.available_reload = ['no', 'yes']
self.available_visualization = ['yes', 'no']
self.construct_layout()
def _flatten_dictionary(self, dictionary):
available = dictionary['All']
available_groups = self._util_func.dict_key_list(dictionary.keys())
return self.flatten_list_of_strings([available_groups, available])
def construct_layout(self):
self.page_content = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
link_bar_dict = {'Detailed' : 'detailed',
'Aggregated' : 'aggregated',
'Compliance' : 'compliance'}
trade_outliers_cols = ['Date', 'ticker', 'side', 'notional cur', 'benchmark', 'exec not',
'exec not in rep cur', 'slippage']
broker_cols = ['Date', 'by broker notional (rep cur)']
# Main page for detailed analysing of (eg. over the course of a few days)
self.pages['detailed'] = html.Div([
self._sc.header_bar('FX: Detailed - Trader Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='detailed-status'), margin_left=5),
self._sc.horizontal_bar(),
# Dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id={'start-date-val' : self.available_dates,
'start-time-val' : self.available_times},
prefix_id='detailed'),
self._sc.drop_down(caption='Finish Date', id=OrderedDict([('finish-date-val', self.available_dates),
('finish-time-val', self.available_times)]),
prefix_id='detailed'),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='detailed',
drop_down_values=self.available_tickers),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='detailed',
drop_down_values=self.available_grouped_brokers),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='detailed',
drop_down_values=self.available_grouped_algos),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='detailed',
drop_down_values=self.available_grouped_venues),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='detailed',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='detailed',
drop_down_values=self.available_metrics)
]),
self._sc.horizontal_bar(),
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='detailed'),
# self.button(caption = 'Print PDF', id = 'detailed-print-pdf-button', className = 'no-print'),
# Orders
self._sc.horizontal_bar(),
self._sc.plot(caption='Orders: Timeline', id='order-candle-timeline-plot', prefix_id='detailed',
element_add=self._sc.timeline_dropdown('detailed-order-candle-timeline-plot',
self.available_order_plot_lines),
downloadplot_caption='Download CSV',
downloadplot_tag='order-candle-timeline-download-link',
download_file='download_order_candle_timeline', height=500),
self._sc.plot(caption='Orders: Markout', id='order-markout-plot', prefix_id='detailed', height=500),
self._sc.plot(caption='Orders: Histogram vs PDF fit', id='order-dist-plot', prefix_id='detailed', height=500),
# Execution trades
self._sc.horizontal_bar(),
self._sc.plot(caption='Executions: Timeline', id='execution-candle-timeline-plot', prefix_id='detailed',
element_add=self._sc.timeline_dropdown('detailed-execution-candle-timeline-plot',
self.available_execution_plot_lines),
downloadplot_caption='Download CSV',
downloadplot_tag='execution-candle-timeline-download-link',
download_file='download_execution_candle_timeline.csv', height=500),
self._sc.plot(caption='Executions: Markout', id='execution-markout-plot', prefix_id='detailed', height=500),
self._sc.plot(caption='Executions: Histogram vs PDF fit', id='execution-dist-plot', prefix_id='detailed', height=500),
# Detailed tcapy markout table for executions
html.Div([
html.H3('Executions: Markout Table'),
html.Div(id='detailed-execution-table')
],
style={'width': '1000px', 'display': 'inline-block', 'marginBottom': 5, 'marginTop': 5, 'marginLeft': 5,
'marginRight': 5}),
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
################################################################################################################
# Secondary page for analysing aggregated statistics over long periods of time, eg. who is the best broker?
self.pages['aggregated'] = html.Div([
self._sc.header_bar('FX: Aggregated - Trader Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='aggregated-status'), margin_left=5),
self._sc.horizontal_bar(),
# dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='aggregated',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='aggregated',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_tickers, multiselect=True),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_brokers, multiselect=True),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_algos, multiselect=True),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_venues, multiselect=True),
self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='aggregated',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='aggregated',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Event Type', id='event-type-val', prefix_id='aggregated',
drop_down_values=self.available_event_types),
self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='aggregated',
drop_down_values=self.available_metrics),
]),
self._sc.horizontal_bar(),
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='aggregated'),
# , msg_id='aggregated-status'),
self._sc.horizontal_bar(),
# self.date_picker_range(caption='Start/Finish Dates', id='aggregated-date-val', offset=[-7,-1]),
self._sc.plot(caption='Aggregated Trader: Summary',
id=['execution-by-ticker-bar-plot', 'execution-by-venue-bar-plot'], prefix_id='aggregated', height=500),
self._sc.horizontal_bar(),
self._sc.plot(caption='Aggregated Trader: Timeline', id='execution-by-ticker-timeline-plot',
prefix_id='aggregated', height=500),
self._sc.horizontal_bar(),
self._sc.plot(caption='Aggregated Trader: PDF fit (' + self._constants.reporting_currency + ' notional)', id=['execution-by-ticker-dist-plot',
'execution-by-venue-dist-plot'],
prefix_id='aggregated', height=500),
self._sc.horizontal_bar()
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
################################################################################################################
self.pages['compliance'] = html.Div([
self._sc.header_bar('FX: Compliance Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='compliance-status'), margin_left=5),
self._sc.horizontal_bar(),
# Dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='compliance',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='compliance',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='compliance',
drop_down_values=self.available_grouped_tickers, multiselect=True),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='compliance',
drop_down_values=self.available_grouped_brokers, multiselect=True),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='compliance',
drop_down_values=self.available_grouped_algos, multiselect=True),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='compliance',
drop_down_values=self.available_grouped_venues, multiselect=True),
self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='compliance',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='compliance',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Filter by Time', id='filter-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Start Time of Day', id='start-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_times),
self._sc.drop_down(caption='Finish Time of Day', id='finish-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_times),
self._sc.drop_down(caption='Slippage to Mid (bp)', id='slippage-bounds-val', prefix_id='compliance',
drop_down_values=self.available_slippage_bounds),
self._sc.drop_down(caption='Visualization', id='visualization-val', prefix_id='compliance',
drop_down_values=self.available_visualization)
]),
self._sc.horizontal_bar(),
html.Div([
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='compliance'),
# self.date_picker(caption='Start Date', id='start-date-dtpicker', prefix_id='compliance'),
# self.date_picker(caption='Finish Date', id='finish-date-dtpicker', prefix_id='compliance'),
]),
self._sc.horizontal_bar(),
self._sc.table(caption='Compliance: Trade Outliers', id='execution-by-anomalous-table', prefix_id='compliance',
columns=trade_outliers_cols,
downloadplot_caption='Trade outliers CSV',
downloadplot_tag='execution-by-anomalous-download-link',
download_file='download_execution_by_anomalous.csv'),
self._sc.table(caption='Compliance: Totals by Broker', id='summary-by-broker-table', prefix_id='compliance',
columns=broker_cols,
downloadplot_caption='Download broker CSV',
downloadplot_tag='summary-by-broker-download-link',
download_file='download_broker.csv'
),
self._sc.horizontal_bar()
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
# ID flags
self.id_flags = {
# Detailed trader page
# 'timeline_trade_orders' : {'client-orders': 'order', 'executions': 'trade'},
# 'markout_trade_orders' : {'client-orders': 'order_df', 'executions': 'trade_df'},
'detailed_candle_timeline_trade_order': {'execution': 'sparse_market_trade_df',
'order': 'sparse_market_order_df'},
'detailed_markout_trade_order': {'execution': 'trade_df', 'order': 'order_df'},
'detailed_table_trade_order': {'execution': 'table_trade_df_markout_by_all'},
'detailed_dist_trade_order': {'execution': 'dist_trade_df_by/pdf/side', 'order': 'dist_order_df_by/pdf/side'},
'detailed_download_link_trade_order': {'execution-candle-timeline': 'sparse_market_trade_df',
'order-candle-timeline': 'sparse_market_order_df'},
# Aggregated trader page
'aggregated_bar_trade_order': {'execution-by-ticker': 'bar_trade_df_by/mean/ticker',
'execution-by-venue': 'bar_trade_df_by/mean/venue'},
'aggregated_timeline_trade_order': {'execution-by-ticker': 'timeline_trade_df_by/mean_date/ticker',
'execution-by-venue': 'timeline_trade_df_by/mean_date/venue'},
'aggregated_dist_trade_order': {'execution-by-ticker': 'dist_trade_df_by/pdf/ticker',
'execution-by-venue': 'dist_trade_df_by/pdf/venue'},
# Compliance page
'compliance_metric_table_trade_order':
{'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all',
'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'},
'compliance_download_link_trade_order':
{'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all',
'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'},
}
| 1.796875 | 2 |
geolucidate/functions.py | kurtraschke/geolucidate | 3 | 2948 | <filename>geolucidate/functions.py
# -*- coding: utf-8 -*-
from decimal import Decimal, setcontext, ExtendedContext
from geolucidate.links.google import google_maps_link
from geolucidate.links.tools import MapLink
from geolucidate.parser import parser_re
setcontext(ExtendedContext)
def _cleanup(parts):
"""
Normalize up the parts matched by :obj:`parser.parser_re` to
degrees, minutes, and seconds.
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30',
... 'longdeg':'50','longmin':'40'})
['S', '60', '30', '00', 'W', '50', '40', '00']
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30', 'latdecsec':'.50',
... 'longdeg':'50','longmin':'40','longdecsec':'.90'})
['S', '60', '30.50', '00', 'W', '50', '40.90', '00']
"""
latdir = (parts['latdir'] or parts['latdir2']).upper()[0]
longdir = (parts['longdir'] or parts['longdir2']).upper()[0]
latdeg = parts.get('latdeg')
longdeg = parts.get('longdeg')
latmin = parts.get('latmin', '00') or '00'
longmin = parts.get('longmin', '00') or '00'
latdecsec = parts.get('latdecsec', '')
longdecsec = parts.get('longdecsec', '')
if (latdecsec and longdecsec):
latmin += latdecsec
longmin += longdecsec
latsec = '00'
longsec = '00'
else:
latsec = parts.get('latsec', '') or '00'
longsec = parts.get('longsec', '') or '00'
return [latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec]
def _convert(latdir, latdeg, latmin, latsec,
longdir, longdeg, longmin, longsec):
"""
Convert normalized degrees, minutes, and seconds to decimal degrees.
Quantize the converted value based on the input precision and
return a 2-tuple of strings.
>>> _convert('S','50','30','30','W','50','30','30')
('-50.508333', '-50.508333')
>>> _convert('N','50','27','55','W','127','27','65')
('50.459167', '-127.460833')
"""
if (latsec != '00' or longsec != '00'):
precision = Decimal('0.000001')
elif (latmin != '00' or longmin != '00'):
precision = Decimal('0.001')
else:
precision = Decimal('1')
latitude = Decimal(latdeg)
latmin = Decimal(latmin)
latsec = Decimal(latsec)
longitude = Decimal(longdeg)
longmin = Decimal(longmin)
longsec = Decimal(longsec)
if latsec > 59 or longsec > 59:
# Assume that 'seconds' greater than 59 are actually a decimal
# fraction of minutes
latitude += (latmin +
(latsec / Decimal('100'))) / Decimal('60')
longitude += (longmin +
(longsec / Decimal('100'))) / Decimal('60')
else:
latitude += (latmin +
(latsec / Decimal('60'))) / Decimal('60')
longitude += (longmin +
(longsec / Decimal('60'))) / Decimal('60')
if latdir == 'S':
latitude *= Decimal('-1')
if longdir == 'W':
longitude *= Decimal('-1')
lat_str = str(latitude.quantize(precision))
long_str = str(longitude.quantize(precision))
return (lat_str, long_str)
def replace(string, sub_function=google_maps_link()):
"""
Replace detected coordinates with a map link, using the given substitution
function.
The substitution function will be passed a :class:`~.MapLink` instance, and
should return a string which will be substituted by :func:`re.sub` in place
of the detected coordinates.
>>> replace("58147N/07720W")
'<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>'
>>> replace("5814N/07720W", google_maps_link('satellite'))
'<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>'
>>> from geolucidate.links.bing import bing_maps_link
>>> replace("58N/077W", bing_maps_link('map'))
'<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>'
"""
def do_replace(match):
original_string = match.group()
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
return sub_function(MapLink(original_string, latitude, longitude))
return parser_re.sub(do_replace, string)
def get_replacements(string, sub_function=google_maps_link()):
"""
Return a dict whose keys are instances of :class:`re.Match` and
whose values are the corresponding replacements. Use
:func:`get_replacements` when the replacement cannot be performed
through ordinary string substitution by :func:`re.sub`, as in
:func:`replace`.
>>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W")
... #doctest: +ELLIPSIS
{<re.Match object...>: '<a href="..." title="...">4630 NORTH 5705 WEST</a>', <re.Match object...>: '<a href="..." title="...">58147N/07720W</a>'}
>>> test_string = "4630 NORTH 5705 WEST 58147N/07720W"
>>> replacements = get_replacements(test_string)
>>> offset = 0
>>> out = bytearray(test_string, encoding="ascii", errors="replace")
>>> for (match, link) in replacements.items():
... start = match.start() + offset
... end = match.end() + offset
... out[start:end] = bytearray(link, encoding="ascii", errors="replace")
... offset += (len(link) - len(match.group()))
>>> out.decode(encoding="ascii") == replace(test_string)
True
"""
substitutions = {}
matches = parser_re.finditer(string)
for match in matches:
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
substitutions[match] = sub_function(MapLink(match.group(),
latitude, longitude))
return substitutions
| 1.640625 | 2 |
apps/orders/models.py | LinkanDawang/FreshMallDemo | 0 | 3076 | from django.db import models
from utils.models import BaseModel
from users.models import User, Address
from goods.models import GoodsSKU
# Create your models here.
class OrderInfo(BaseModel):
"""订单信息"""
PAY_METHOD = ['1', '2']
PAY_METHOD_CHOICES = (
(1, "货到付款"),
(2, "支付宝"),
)
ORDER_STATUS_CHOICES = (
(1, "待支付"),
(2, "待发货"),
(3, "待收货"),
(4, "待评价"),
(5, "已完成"),
)
"""---------订单信息------------------------"""
PAY_METHODS = {
1: "货到付款",
2: "支付宝",
}
ORDER_STATUS = {
1: "待支付",
2: "待发货",
3: "待收货",
4: "待评价",
5: "已完成",
}
PAY_METHODS_ENUM = {
"CASH": 1,
"ALIPAY": 2
}
ORDER_STATUS_ENUM = {
"UNPAID": 1,
"UNSEND": 2,
"UNRECEIVED": 3,
"UNCOMMENT": 4,
"FINISHED": 5
}
order_id = models.CharField(max_length=64, primary_key=True, verbose_name="订单号")
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="下单用户")
address = models.ForeignKey(Address, on_delete=models.CASCADE, verbose_name="收获地址")
total_count = models.IntegerField(default=1, verbose_name="商品总数")
total_amount = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="商品总金额")
trans_cost = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="运费")
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=1, verbose_name="支付方式")
status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name="订单状态")
trade_id = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name="支付编号")
class Meta:
db_table = "df_order_info"
class OrderGoods(BaseModel):
"""订单商品"""
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name="订单")
sku = models.ForeignKey(GoodsSKU, on_delete=models.CASCADE, verbose_name="订单商品")
count = models.IntegerField(default=1, verbose_name="数量")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="单价")
comment = models.TextField(default="", verbose_name="评价信息")
class Meta:
db_table = "df_order_goods"
| 1.585938 | 2 |
quaesit/agent.py | jgregoriods/quaesit | 0 | 3204 | <gh_stars>0
import inspect
from math import hypot, sin, asin, cos, radians, degrees
from abc import ABCMeta, abstractmethod
from random import randint, choice
from typing import Dict, List, Tuple, Union
class Agent(metaclass=ABCMeta):
"""
Class to represent an agent in an agent-based model.
"""
_id = 0
colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange',
'pink', 'purple', 'red', 'yellow']
def __init__(self, world, coords: Tuple = None):
self._id = Agent._id
Agent._id += 1
self.world = world
self.coords = coords or (randint(0, self.world.width - 1),
randint(0, self.world.height - 1))
self.direction = 90
self.breed = self.__class__.__name__.lower()
self.icon = '.'
self.color = choice(self.colors)
self.world.add_agent(self)
def die(self):
"""
Remove the agent from the world.
"""
del self.world.agents[self._id]
self.world.grid[self.coords]['agents'].remove(self)
del self
def hatch(self):
"""
Creates an agent and initializes it with the same parameters as
oneself.
"""
sig = inspect.signature(self.__init__)
filter_keys = [param.name for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
filtered_dict = {filter_key: self.__dict__[filter_key]
for filter_key in filter_keys}
return self.__class__(**filtered_dict)
def move_to(self, coords: Tuple):
"""
Places the agent in a different cell of the world grid.
"""
self.world.remove_from_grid(self)
self.coords = coords
self.world.place_on_grid(self)
def cell_here(self, layer = None):
"""
Returns the value of a layer in the model's grid for the cell
where the agent is. If no layer is specified, the values of all
layers are returned.
"""
if layer is not None:
return self.world.grid[self.coords][layer]
else:
return self.world.grid[self.coords]
def get_distance(self, coords: Tuple) -> int:
"""
Returns the distance (in cells) from the agent to a pair of
coordinates.
"""
x, y = coords
return round(hypot((x - self.coords[0]), (y - self.coords[1])))
def cells_in_radius(self, radius: int) -> Dict:
"""
Returns all cells and respective attributes within a distance
of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if self.get_distance((x, y)) <= radius}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid)}
return neighborhood
def empty_cells_in_radius(self, radius: int) -> Dict:
"""
Returns all empty cells (with no agents on them) and respective
attributes within a distance of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and not
self.world.grid[self.world.to_torus((x, y))]
['agents'])}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid and not
self.world.grid[(x, y)]['agents'])}
return neighborhood
def nearest_cell(self, cells: Union[List, Dict]) -> Tuple:
"""
Given a list or dictionary of cells, returns the coordinates of
the cell that is nearest to the agent.
"""
dists = {cell: self.get_distance(cell) for cell in cells}
return min(dists, key=dists.get)
def agents_in_radius(self, radius: int):
"""
Returns all agents within a distance of oneself.
"""
neighborhood = self.cells_in_radius(radius)
neighbors = [agent for coords in neighborhood
for agent in self.world.grid[coords]['agents']
if agent is not self]
return neighbors
def agents_here(self) -> List:
"""
Returns all agents located on the same cell as oneself.
"""
return [agent for agent in self.world.grid[self.coords]['agents']
if agent is not self]
def nearest_agent(self, agents: List = None):
"""
Given a list of agents, returns the agent that is nearest to
oneself. If no list is provided, all agents are evaluated.
"""
if agents is None:
agents = [self.world.agents[_id] for _id in self.world.agents]
dists = {agent: self.get_distance(agent.coords)
for agent in agents if agent is not self}
return min(dists, key=dists.get)
def turn_right(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the right.
"""
self.direction = round((self.direction - angle) % 360)
def turn_left(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the left.
"""
self.direction = round((self.direction + angle) % 360)
def forward(self, n_steps: int = 1):
"""
Moves the agent a number of cells forward in the direction it
is currently facing.
"""
x = round(self.coords[0] + cos(radians(self.direction)) * n_steps)
y = round(self.coords[1] + sin(radians(self.direction)) * n_steps)
if self.world.torus:
self.move_to(self.world.to_torus((x, y)))
elif (x, y) in self.world.grid:
self.move_to((x, y))
def face_towards(self, coords: Tuple):
"""
Turns the agent's direction towards a given pair of coordinates.
"""
if coords != self.coords:
xdif = coords[0] - self.coords[0]
ydif = coords[1] - self.coords[1]
dist = hypot(xdif, ydif)
angle = degrees(asin(ydif / dist))
if xdif < 0:
self.direction = round(180 - angle)
else:
self.direction = round((360 + angle) % 360)
def random_walk(self, n_steps: int = 1):
"""
Moves the agent one cell forward in a random direction for a
number of times.
"""
for i in range(n_steps):
self.turn_right(randint(0, 360))
self.forward()
@abstractmethod
def step(self):
"""
Methods to be performed by the agent at each step of the
simulation.
"""
raise NotImplementedError
| 2.890625 | 3 |
tools/load_demo_data.py | glenn2763/skyportal | 0 | 3332 | import datetime
import os
import subprocess
import base64
from pathlib import Path
import shutil
import pandas as pd
import signal
import requests
from baselayer.app.env import load_env
from baselayer.app.model_util import status, create_tables, drop_tables
from social_tornado.models import TornadoStorage
from skyportal.models import init_db, Base, DBSession, Source, User
from skyportal.model_util import setup_permissions, create_token
from skyportal.tests import api
from baselayer.tools.test_frontend import verify_server_availability
if __name__ == "__main__":
"""Insert test data"""
env, cfg = load_env()
basedir = Path(os.path.dirname(__file__)) / ".."
with status(f"Connecting to database {cfg['database']['database']}"):
init_db(**cfg["database"])
with status("Dropping all tables"):
drop_tables()
with status("Creating tables"):
create_tables()
for model in Base.metadata.tables:
print(" -", model)
with status(f"Creating permissions"):
setup_permissions()
with status(f"Creating dummy users"):
super_admin_user = User(
username="<EMAIL>", role_ids=["Super admin"]
)
group_admin_user = User(
username="<EMAIL>", role_ids=["Super admin"]
)
full_user = User(username="<EMAIL>", role_ids=["Full user"])
view_only_user = User(
username="<EMAIL>", role_ids=["View only"]
)
DBSession().add_all(
[super_admin_user, group_admin_user, full_user, view_only_user]
)
for u in [super_admin_user, group_admin_user, full_user, view_only_user]:
DBSession().add(
TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2")
)
with status("Creating token"):
token = create_token(
[
"Manage groups",
"Manage sources",
"Upload data",
"Comment",
"Manage users",
],
super_admin_user.id,
"load_demo_data token",
)
def assert_post(endpoint, data):
response_status, data = api("POST", endpoint, data, token)
if not response_status == 200 and data["status"] == "success":
raise RuntimeError(
f'API call to {endpoint} failed with status {status}: {data["message"]}'
)
return data
with status("Launching web app & executing API calls"):
try:
response_status, data = api("GET", "sysinfo", token=token)
app_already_running = True
except requests.ConnectionError:
app_already_running = False
web_client = subprocess.Popen(
["make", "run"], cwd=basedir, preexec_fn=os.setsid
)
server_url = f"http://localhost:{cfg['ports.app']}"
print()
print(f"Waiting for server to appear at {server_url}...")
try:
verify_server_availability(server_url)
print("App running - continuing with API calls")
with status("Creating dummy group & adding users"):
data = assert_post(
"groups",
data={
"name": "Stream A",
"group_admins": [
super_admin_user.username,
group_admin_user.username,
],
},
)
group_id = data["data"]["id"]
for u in [view_only_user, full_user]:
data = assert_post(
f"groups/{group_id}/users/{u.username}", data={"admin": False}
)
with status("Creating dummy instruments"):
data = assert_post(
"telescope",
data={
"name": "Palomar 1.5m",
"nickname": "P60",
"lat": 33.3633675,
"lon": -116.8361345,
"elevation": 1870,
"diameter": 1.5,
"group_ids": [group_id],
},
)
telescope1_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "P60 Camera",
"type": "phot",
"band": "optical",
"telescope_id": telescope1_id,
},
)
instrument1_id = data["data"]["id"]
data = assert_post(
"telescope",
data={
"name": "Nordic Optical Telescope",
"nickname": "NOT",
"lat": 28.75,
"lon": 17.88,
"elevation": 1870,
"diameter": 2.56,
"group_ids": [group_id],
},
)
telescope2_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "ALFOSC",
"type": "both",
"band": "optical",
"telescope_id": telescope2_id,
},
)
with status("Creating dummy sources"):
SOURCES = [
{
"id": "14gqr",
"ra": 353.36647,
"dec": 33.646149,
"redshift": 0.063,
"group_ids": [group_id],
"comments": [
"No source at transient location to R>26 in LRIS imaging",
"Strong calcium lines have emerged.",
],
},
{
"id": "16fil",
"ra": 322.718872,
"dec": 27.574113,
"redshift": 0.0,
"group_ids": [group_id],
"comments": ["Frogs in the pond", "The eagle has landed"],
},
]
(basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True)
for source_info in SOURCES:
comments = source_info.pop("comments")
data = assert_post("sources", data=source_info)
assert data["data"]["id"] == source_info["id"]
for comment in comments:
data = assert_post(
"comment",
data={"source_id": source_info["id"], "text": comment},
)
phot_file = basedir / "skyportal/tests/data/phot.csv"
phot_data = pd.read_csv(phot_file)
data = assert_post(
"photometry",
data={
"source_id": source_info["id"],
"time_format": "iso",
"time_scale": "utc",
"instrument_id": instrument1_id,
"observed_at": phot_data.observed_at.tolist(),
"mag": phot_data.mag.tolist(),
"e_mag": phot_data.e_mag.tolist(),
"lim_mag": phot_data.lim_mag.tolist(),
"filter": phot_data["filter"].tolist(),
},
)
spec_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"skyportal",
"tests",
"data",
"spec.csv",
)
spec_data = pd.read_csv(spec_file)
for i, df in spec_data.groupby("instrument_id"):
data = assert_post(
"spectrum",
data={
"source_id": source_info["id"],
"observed_at": str(datetime.datetime(2014, 10, 24)),
"instrument_id": 1,
"wavelengths": df.wavelength.tolist(),
"fluxes": df.flux.tolist(),
},
)
for ttype in ["new", "ref", "sub"]:
fname = f'{source_info["id"]}_{ttype}.png'
fpath = basedir / f"skyportal/tests/data/{fname}"
thumbnail_data = base64.b64encode(
open(os.path.abspath(fpath), "rb").read()
)
data = assert_post(
"thumbnail",
data={
"source_id": source_info["id"],
"data": thumbnail_data,
"ttype": ttype,
},
)
source = Source.query.get(source_info["id"])
source.add_linked_thumbnails()
finally:
if not app_already_running:
print("Terminating web app")
os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
| 1.34375 | 1 |
tools.py | Jakuko99/effectb | 1 | 3460 | from calendar import month_name
class Tools:
def __init__(self):
self.output = ""
def formatDate(self, date):
elements = date.split("-")
return f"{elements[2]}. {month_name[int(elements[1])]} {elements[0]}"
def shortenText(self, string, n): #return first n sentences from string
first = string.find(".")
for _ in range(n - 1):
if not string.find(".", first + 1) == -1:
first = string.find(".", first + 1)
return f"{string[:first-len(string)]}."
def tupleUnpack(self, tup):
self.output = ""
for item in tup:
self.output += f"{item} "
return self.output[:-1]
def joinList(self, list):
self.output = ""
for item in list:
self.output += f"{item}, "
return self.output[:-2] #remove last ', '
def partialJoin(self, list, n):
self.output = ""
i = 0
for item in list:
self.output += f"{item}, "
i += 1
if i >= n:
break
return self.output[:-2]
def processFilmography(self, list, n):
self.output = ""
i = 0
for item in list:
if 'year' in item:
self.output += f"{item['title']} ({item['year']}), "
else:
self.output += f"{item['title'].replace(' ()', '')}, "
i += 1
if i >= n:
break
return self.output[:-2]
def convertTime(self, runtime):
time = int(runtime)
mins = time % 60
hours = int(time / 60)
if hours >= 1:
return f"{hours} h {mins} min"
else:
return f"{mins} min" | 2.46875 | 2 |
tests/test_pyclipper.py | odidev/pyclipper | 0 | 3588 | <filename>tests/test_pyclipper.py<gh_stars>0
#!/usr/bin/python
"""
Tests for Pyclipper wrapper library.
"""
from __future__ import print_function
from unittest2 import TestCase, main
import sys
if sys.version_info < (3,):
integer_types = (int, long)
else:
integer_types = (int,)
import pyclipper
# Example polygons from http://www.angusj.com/delphi/clipper.php
PATH_SUBJ_1 = [[180, 200], [260, 200], [260, 150], [180, 150]] # square, orientation is False
PATH_SUBJ_2 = [[215, 160], [230, 190], [200, 190]] # triangle
PATH_CLIP_1 = [[190, 210], [240, 210], [240, 130], [190, 130]] # square
PATH_SIGMA = [[300, 400], [100, 400], [200, 300], [100, 200], [300, 200]] # greek letter sigma
PATTERN = [[4, -6], [6, -6], [-4, 6], [-6, 6]]
INVALID_PATH = [[1, 1], ] # less than 2 vertices
class TestPyclipperModule(TestCase):
def test_has_classes(self):
self.assertTrue(hasattr(pyclipper, 'Pyclipper'))
self.assertTrue(hasattr(pyclipper, 'PyclipperOffset'))
def test_has_namespace_methods(self):
for method in ('Orientation', 'Area', 'PointInPolygon', 'SimplifyPolygon', 'SimplifyPolygons',
'CleanPolygon', 'CleanPolygons', 'MinkowskiSum', 'MinkowskiSum2', 'MinkowskiDiff',
'PolyTreeToPaths', 'ClosedPathsFromPolyTree', 'OpenPathsFromPolyTree',
'ReversePath', 'ReversePaths'):
self.assertTrue(hasattr(pyclipper, method))
class TestNamespaceMethods(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
def test_orientation(self):
self.assertFalse(pyclipper.Orientation(PATH_SUBJ_1))
self.assertTrue(pyclipper.Orientation(PATH_SUBJ_1[::-1]))
def test_area(self):
# area less than 0 because orientation is False
area_neg = pyclipper.Area(PATH_SUBJ_1)
area_pos = pyclipper.Area(PATH_SUBJ_1[::-1])
self.assertLess(area_neg, 0)
self.assertGreater(area_pos, 0)
self.assertEqual(abs(area_neg), area_pos)
def test_point_in_polygon(self):
# on polygon
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
# in polygon
self.assertEqual(pyclipper.PointInPolygon((200, 180), PATH_SUBJ_1), 1)
# outside of polygon
self.assertEqual(pyclipper.PointInPolygon((500, 500), PATH_SUBJ_1), 0)
def test_minkowski_sum(self):
solution = pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
self.assertGreater(len(solution), 0)
def test_minkowski_sum2(self):
solution = pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
self.assertGreater(len(solution), 0)
def test_minkowski_diff(self):
solution = pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
self.assertGreater(len(solution), 0)
def test_reverse_path(self):
solution = pyclipper.ReversePath(PATH_SUBJ_1)
manualy_reversed = PATH_SUBJ_1[::-1]
self.check_reversed_path(solution, manualy_reversed)
def test_reverse_paths(self):
solution = pyclipper.ReversePaths([PATH_SUBJ_1])
manualy_reversed = [PATH_SUBJ_1[::-1]]
self.check_reversed_path(solution[0], manualy_reversed[0])
def check_reversed_path(self, path_1, path_2):
if len(path_1) is not len(path_2):
return False
for i in range(len(path_1)):
self.assertEqual(path_1[i][0], path_2[i][0])
self.assertEqual(path_1[i][1], path_2[i][1])
def test_simplify_polygon(self):
solution = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
def test_simplify_polygons(self):
solution = pyclipper.SimplifyPolygons([PATH_SUBJ_1])
solution_single = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution), len(solution_single))
_do_solutions_match(solution, solution_single)
def test_clean_polygon(self):
solution = pyclipper.CleanPolygon(PATH_CLIP_1)
self.assertEqual(len(solution), len(PATH_CLIP_1))
def test_clean_polygons(self):
solution = pyclipper.CleanPolygons([PATH_CLIP_1])
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution[0]), len(PATH_CLIP_1))
class TestFilterPyPolyNode(TestCase):
def setUp(self):
tree = pyclipper.PyPolyNode()
tree.Contour.append(PATH_CLIP_1)
tree.IsOpen = True
child = pyclipper.PyPolyNode()
child.IsOpen = False
child.Parent = tree
child.Contour = PATH_SUBJ_1
tree.Childs.append(child)
child = pyclipper.PyPolyNode()
child.IsOpen = True
child.Parent = tree
child.Contour = PATH_SUBJ_2
tree.Childs.append(child)
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = PATTERN
child.Childs.append(child2)
# empty contour should not
# be included in filtered results
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = []
child.Childs.append(child2)
self.tree = tree
def test_polytree_to_paths(self):
paths = pyclipper.PolyTreeToPaths(self.tree)
self.check_paths(paths, 4)
def test_closed_paths_from_polytree(self):
paths = pyclipper.ClosedPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def test_open_paths_from_polytree(self):
paths = pyclipper.OpenPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def check_paths(self, paths, expected_nr):
self.assertEqual(len(paths), expected_nr)
self.assertTrue(all((len(path) > 0 for path in paths)))
class TestPyclipperAddPaths(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
def test_add_path(self):
# should not raise an exception
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
# should not raise an exception
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
def test_add_path_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPath, INVALID_PATH, pyclipper.PT_CLIP, True)
def test_add_paths_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPaths, [INVALID_PATH, INVALID_PATH],
pyclipper.PT_CLIP, True)
try:
self.pc.AddPaths([INVALID_PATH, PATH_CLIP_1], pyclipper.PT_CLIP)
self.pc.AddPaths([PATH_CLIP_1, INVALID_PATH], pyclipper.PT_CLIP)
except pyclipper.ClipperException:
self.fail("add_paths raised ClipperException when not all paths were invalid")
class TestClassProperties(TestCase):
def check_property_assignment(self, pc, prop_name, values):
for val in values:
setattr(pc, prop_name, val)
self.assertEqual(getattr(pc, prop_name), val)
def test_pyclipper_properties(self):
pc = pyclipper.Pyclipper()
for prop_name in ('ReverseSolution', 'PreserveCollinear', 'StrictlySimple'):
self.check_property_assignment(pc, prop_name, [True, False])
def test_pyclipperoffset_properties(self):
for factor in range(6):
pyclipper.SCALING_FACTOR = 10 ** factor
pc = pyclipper.PyclipperOffset()
for prop_name in ('MiterLimit', 'ArcTolerance'):
self.check_property_assignment(pc, prop_name, [2.912, 132.12, 12, -123])
class TestPyclipperExecute(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
self.add_default_paths(self.pc)
self.default_args = [pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD]
@staticmethod
def add_default_paths(pc):
pc.AddPath(PATH_CLIP_1, pyclipper.PT_CLIP)
pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], pyclipper.PT_SUBJECT)
@staticmethod
def add_paths(pc, clip_path, subj_paths, addend=None, multiplier=None):
pc.AddPath(_modify_vertices(clip_path, addend=addend, multiplier=multiplier), pyclipper.PT_CLIP)
for subj_path in subj_paths:
pc.AddPath(_modify_vertices(subj_path, addend=addend, multiplier=multiplier), pyclipper.PT_SUBJECT)
def test_get_bounds(self):
bounds = self.pc.GetBounds()
self.assertIsInstance(bounds, pyclipper.PyIntRect)
self.assertEqual(bounds.left, 180)
self.assertEqual(bounds.right, 260)
self.assertEqual(bounds.top, 130)
self.assertEqual(bounds.bottom, 210)
def test_execute(self):
solution = self.pc.Execute(*self.default_args)
self.assertEqual(len(solution), 2)
def test_execute2(self):
solution = self.pc.Execute2(*self.default_args)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.check_pypolynode(solution)
def test_execute_empty(self):
pc = pyclipper.Pyclipper()
with self.assertRaises(pyclipper.ClipperException):
pc.Execute(pyclipper.CT_UNION,
pyclipper.PFT_NONZERO,
pyclipper.PFT_NONZERO)
def test_clear(self):
self.pc.Clear()
with self.assertRaises(pyclipper.ClipperException):
self.pc.Execute(*self.default_args)
def test_exact_results(self):
"""
Test whether coordinates passed into the library are returned exactly, if they are not affected by the
operation.
"""
pc = pyclipper.Pyclipper()
# Some large triangle.
path = [[[0, 1], [0, 0], [15 ** 15, 0]]]
pc.AddPaths(path, pyclipper.PT_SUBJECT, True)
result = pc.Execute(pyclipper.PT_CLIP, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD)
assert result == path
def check_pypolynode(self, node):
self.assertTrue(len(node.Contour) == 0 or len(node.Contour) > 2)
# check vertex coordinate, should not be an iterable (in that case
# that means that node.Contour is a list of paths, should be path
if node.Contour:
self.assertFalse(hasattr(node.Contour[0][0], '__iter__'))
for child in node.Childs:
self.check_pypolynode(child)
class TestPyclipperOffset(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
@staticmethod
def add_path(pc, path):
pc.AddPath(path, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
def test_execute(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 1)
def test_execute2(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute2(2.0)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.assertEqual(len(pyclipper.OpenPathsFromPolyTree(solution)), 0)
self.assertEqual(len(pyclipper.ClosedPathsFromPolyTree(solution)), 1)
def test_clear(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
pc.Clear()
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 0)
class TestScalingFactorWarning(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 2.
self.pc = pyclipper.Pyclipper()
def test_orientation(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Orientation(PATH_SUBJ_1)
def test_area(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Area(PATH_SUBJ_1)
def test_point_in_polygon(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
def test_minkowski_sum(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
def test_minkowski_sum2(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
def test_minkowski_diff(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
def test_add_path(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
class TestScalingFunctions(TestCase):
scale = 2 ** 31
path = [(0, 0), (1, 1)]
paths = [path] * 3
def test_value_scale_to(self):
value = 0.5
res = pyclipper.scale_to_clipper(value, self.scale)
assert isinstance(res, integer_types)
assert res == int(value * self.scale)
def test_value_scale_from(self):
value = 1000000000000
res = pyclipper.scale_from_clipper(value, self.scale)
assert isinstance(res, float)
# Convert to float to get "normal" division in Python < 3.
assert res == float(value) / self.scale
def test_path_scale_to(self):
res = pyclipper.scale_to_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, integer_types) for i in res for j in i)
def test_path_scale_from(self):
res = pyclipper.scale_from_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, float) for i in res for j in i)
def test_paths_scale_to(self):
res = pyclipper.scale_to_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, integer_types) for i in res for j in i for k in j)
def test_paths_scale_from(self):
res = pyclipper.scale_from_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, float) for i in res for j in i for k in j)
class TestNonStandardNumbers(TestCase):
def test_sympyzero(self):
try:
from sympy import Point2D
from sympy.core.numbers import Zero
except ImportError:
self.skipTest("Skipping, sympy not available")
path = [(0,0), (0,1)]
path = [Point2D(v) for v in [(0,0), (0,1)]]
assert type(path[0].x) == Zero
path = pyclipper.scale_to_clipper(path)
assert path == [[0, 0], [0, 2147483648]]
def _do_solutions_match(paths_1, paths_2, factor=None):
if len(paths_1) != len(paths_2):
return False
paths_1 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_1]
paths_2 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_2]
return all(((p_1 in paths_2) for p_1 in paths_1))
def _modify_vertices(path, addend=0.0, multiplier=1.0, converter=None):
path = path[:]
def convert_coordinate(c):
if multiplier is not None:
c *= multiplier
if addend is not None:
c += addend
if converter:
c = converter(c)
return c
return [[convert_coordinate(c) for c in v] for v in path]
def run_tests():
main()
if __name__ == '__main__':
run_tests()
| 1.773438 | 2 |
YouTube/CursoEmVideo/python/ex012.py | Fh-Shadow/Progamando | 0 | 3716 | <filename>YouTube/CursoEmVideo/python/ex012.py<gh_stars>0
a = float(input('Qual é o preço do produto? R$'))
d = a - (a * 23 / 100)
print('O produto que custava R${:.2f}, na promoção de 23% de desconto vai custar: R${:.2f}' .format(a, d))
| 2.046875 | 2 |
shutTheBox/main.py | robi1467/shut-the-box | 0 | 3844 | <filename>shutTheBox/main.py
import random
numbers_list = [1,2,3,4,5,6,7,8,9,10]
game_won = False
game_completed = False
#Stats
games_played = 0
games_won = 0
games_lost = 0
average_score = 0
total_score = 0
def welcome():
welcome_message = "Welcome to shut the box"
print(welcome_message)
i = 0
result = ""
while i < len(numbers_list):
if i < len(numbers_list)-1:
result += str(numbers_list[i]) + " "
else:
result += str(numbers_list[i])
i+=1
print(result)
def dice_roll(amount):
total = 0
i = 0
while i < amount:
total += random.randint(1, 6)
i+=1
return total
def choose_dice_amount():
amount = 0
while True:
try:
amount = int(input("You choose to roll one or two dice. Please enter either '1' or '2': "))
except ValueError:
print("INVALID ENTRY PLEASE TRY AGAIN")
continue
if amount == 1 or amount == 2:
return amount
else:
print("INVALID ENTRY PLEASE TRY AGAIN!")
continue
return amount
def choose_number_to_drop(target_amount):
entered = 0
goal = target_amount
entered_numbers = list()
while goal != 0:
try:
print("Available numbers: " + str(numbers_list) + " to get to " + str(target_amount))
entered = int(input("Please enter a number that is available: "))
except ValueError:
print("Invalid Entry, please try again")
continue
if entered not in numbers_list or entered in entered_numbers:
print("Invalid Entry, please try again")
continue
else:
goal -= entered
entered_numbers.append(entered)
if goal < 0:
goal = target_amount
entered_numbers = list()
i = 0
while i < len(entered_numbers):
numbers_list.remove(entered_numbers[i])
i += 1
def check_lost_game(rolled):
value = True
if rolled not in numbers_list:
i = 0
while i < len(numbers_list):
j = i+1
while j< len(numbers_list):
if numbers_list[i] + numbers_list[j] == rolled:
return False
k = j+1
while k < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] == rolled:
return False
l = k+1
while l < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] + numbers_list[l] == rolled:
return False
l+=1
k+=1
j+=1
i +=1
else:
value = False
return value
def end_game():
game_completed = True
return game_completed
def win_game():
game_won = True
return game_won
def score_game():
score = 0
i = 0
while i < len(numbers_list):
score += numbers_list[i]
i+=1
return score
def all_less_than_7():
less_than_7 = True
i = 0
while i < len(numbers_list):
if numbers_list[i] > 6:
less_than_7 = False
i += 1
return less_than_7
def keep_playing_input():
while True:
try:
continue_playing = (input("Do you wish to keep playing? y or n: "))
except ValueError:
print("Invalid choice; please try again")
continue
if continue_playing.lower == "y":
return True
else:
return False
keep_playing = True
while keep_playing:
numbers_list = [1,2,3,4,5,6,7,8,9,10]
welcome()
roll_total = 0
while roll_total < 55:
dice_amount = 2
if all_less_than_7():
dice_amount = choose_dice_amount()
dice_total = dice_roll(dice_amount)
print("Your roll is: " + str(dice_total))
if check_lost_game(dice_total):
print("It is impossible to continue the game with this roll")
break
choose_number_to_drop(dice_total)
roll_total += dice_total
if roll_total == 55:
game_won = win_game()
if game_won:
print("Congrats you won!!!!")
games_played +=1
games_won +=1
else:
print("You lose, your score is " + str(score_game()))
print("Numbers remaining: " + str(numbers_list))
games_played += 1
games_lost += 1
total_score += score_game()
average_score = total_score/games_played
game_won = False
print("STATS:\n Games Played: " + str(games_played) + "\nGames Won: " + str(games_won) + "\nGames Lost: " + str(games_lost)
+ "\nAverage Score: " + str(average_score) + "\nTotal Score: " + str(total_score))
keep_playing_input()
| 2.578125 | 3 |
wpt/websockets/websock_handlers/open_delay_wsh.py | gsnedders/presto-testo | 0 | 3972 | #!/usr/bin/python
from mod_pywebsocket import msgutil
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
time.sleep(3)
msgutil.send_message(request, "line")
| 1 | 1 |
1094 EXPERIENCIAS.py | castrolimoeiro/Uri-exercise | 0 | 4100 | n = int(input())
coelho = rato = sapo = contador = 0
for i in range(0, n):
q, t = input().split(' ')
t = t.upper()
q = int(q)
if 1 <= q <= 15:
contador += q
if t == 'C':
coelho += q
elif t == 'R':
rato += q
elif t == 'S':
sapo += q
porccoelho = (coelho * 100) / contador
porcrato = (rato * 100) / contador
porcsapo = (sapo * 100) / contador
print(f'Total: {contador} cobaias')
print(f'Total de coelhos: {coelho}')
print(f'Total de ratos: {rato}')
print(f'Total de sapos: {sapo}')
print(f'Percentual de coelhos: {porccoelho:.2f} %')
print(f'Percentual de ratos: {porcrato:.2f} %')
print(f'Percentual de sapos: {porcsapo:.2f} %')
| 1.953125 | 2 |
PLM/options.py | vtta2008/pipelineTool | 7 | 4228 | # -*- coding: utf-8 -*-
"""
Script Name:
Author: <NAME>/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
import os
from PySide2.QtWidgets import (QFrame, QStyle, QAbstractItemView, QSizePolicy, QLineEdit, QPlainTextEdit,
QGraphicsItem, QGraphicsView, QGraphicsScene, QRubberBand, QCalendarWidget, )
from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime
from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor
SingleSelection = QCalendarWidget.SingleSelection
NoSelection = QCalendarWidget.NoSelection
SingleLetterDay = QCalendarWidget.SingleLetterDayNames
ShortDay = QCalendarWidget.ShortDayNames
LongDay = QCalendarWidget.LongDayNames
NoHoriHeader = QCalendarWidget.NoHorizontalHeader
NoVertHeader = QCalendarWidget.NoVerticalHeader
IsoWeekNum = QCalendarWidget.ISOWeekNumbers
SelectMode = QCalendarWidget.SelectionMode
HoriHeaderFm = QCalendarWidget.HorizontalHeaderFormat
VertHeaderFm = QCalendarWidget.VerticalHeaderFormat
DayOfWeek = Qt.DayOfWeek
Sunday = Qt.Sunday
Monday = Qt.Monday
Tuesday = Qt.Tuesday
Wednesday = Qt.Wednesday
Thursday = Qt.Thursday
Friday = Qt.Friday
Saturday = Qt.Saturday
ICONSIZE = 32
ICONBUFFER = -1
BTNTAGSIZE = QSize(87, 20)
TAGBTNSIZE = QSize(87-1, 20-1)
BTNICONSIZE = QSize(ICONSIZE, ICONSIZE)
ICONBTNSIZE = QSize(ICONSIZE+ICONBUFFER, ICONSIZE+ICONBUFFER)
DAMG_LOGO_COLOR = QColor(0, 114, 188, 255)
# Basic color
GlobalColor = Qt.GlobalColor
WHITE = QColor(Qt.white)
LIGHTGRAY = QColor(Qt.lightGray)
GRAY = QColor(Qt.gray)
DARKGRAY = QColor(Qt.darkGray)
BLACK = QColor(Qt.black)
RED = QColor(Qt.red)
GREEN = QColor(Qt.green)
BLUE = QColor(Qt.blue)
DARKRED = QColor(Qt.darkRed)
DARKGREEN = QColor(Qt.darkGreen)
DARKBLUE = QColor(Qt.darkBlue)
CYAN = QColor(Qt.cyan)
MAGENTA = QColor(Qt.magenta)
YELLOW = QColor(Qt.yellow)
DARKCYAN = QColor(Qt.darkCyan)
DARKMAGENTA = QColor(Qt.darkMagenta)
DARKYELLOW = QColor(Qt.darkYellow)
# Dark Palette color
Color_BACKGROUND_LIGHT = QColor('#505F69')
COLOR_BACKGROUND_NORMAL = QColor('#32414B')
COLOR_BACKGROUND_DARK = QColor('#19232D')
COLOR_FOREGROUND_LIGHT = QColor('#F0F0F0')
COLOR_FOREGROUND_NORMAL = QColor('#AAAAAA')
COLOR_FOREGROUND_DARK = QColor('#787878')
COLOR_SELECTION_LIGHT = QColor('#148CD2')
COLOR_SELECTION_NORMAL = QColor('#1464A0')
COLOR_SELECTION_DARK = QColor('#14506E')
# Nice color
blush = QColor(246, 202, 203, 255)
petal = QColor(247, 170, 189, 255)
petunia = QColor(231, 62, 151, 255)
deep_pink = QColor(229, 2, 120, 255)
melon = QColor(241, 118, 110, 255)
pomegranate = QColor(178, 27, 32, 255)
poppy_red = QColor(236, 51, 39, 255)
orange_red = QColor(240, 101, 53, 255)
olive = QColor(174, 188, 43, 255)
spring = QColor(227, 229, 121, 255)
yellow = QColor(255, 240, 29, 255)
mango = QColor(254, 209, 26, 255)
cantaloupe = QColor(250, 176, 98, 255)
tangelo = QColor(247, 151, 47, 255)
burnt_orange = QColor(236, 137, 36, 255)
bright_orange = QColor(242, 124, 53, 255)
moss = QColor(176, 186, 39, 255)
sage = QColor(212, 219, 145, 255)
apple = QColor(178, 215, 140, 255)
grass = QColor(111, 178, 68, 255)
forest = QColor(69, 149, 62, 255)
peacock = QColor(21, 140, 167, 255)
teal = QColor(24, 157, 193, 255)
aqua = QColor(153, 214, 218, 255)
violet = QColor(55, 52, 144, 255)
deep_blue = QColor(15, 86, 163, 255)
hydrangea = QColor(150, 191, 229, 255)
sky = QColor(139, 210, 244, 255)
dusk = QColor(16, 102, 162, 255)
midnight = QColor(14, 90, 131, 255)
seaside = QColor(87, 154, 188, 255)
poolside = QColor(137, 203, 225, 255)
eggplant = QColor(86, 5, 79, 255)
lilac = QColor(222, 192, 219, 255)
chocolate = QColor(87, 43, 3, 255)
blackout = QColor(19, 17, 15, 255)
stone = QColor(125, 127, 130, 255)
gravel = QColor(181, 182, 185, 255)
pebble = QColor(217, 212, 206, 255)
sand = QColor(185, 172, 151, 255)
ignoreARM = Qt.IgnoreAspectRatio
scrollAsNeed = Qt.ScrollBarAsNeeded
scrollOff = Qt.ScrollBarAlwaysOff
scrollOn = Qt.ScrollBarAlwaysOn
SiPoMin = QSizePolicy.Minimum # Size policy
SiPoMax = QSizePolicy.Maximum
SiPoExp = QSizePolicy.Expanding
SiPoPre = QSizePolicy.Preferred
SiPoIgn = QSizePolicy.Ignored
frameStyle = QFrame.Sunken | QFrame.Panel
center = Qt.AlignCenter # Alignment
right = Qt.AlignRight
left = Qt.AlignLeft
top = Qt.AlignTop
bottom = Qt.AlignBottom
hori = Qt.Horizontal
vert = Qt.Vertical
dockL = Qt.LeftDockWidgetArea # Docking area
dockR = Qt.RightDockWidgetArea
dockT = Qt.TopDockWidgetArea
dockB = Qt.BottomDockWidgetArea
dockAll = Qt.AllDockWidgetAreas
datetTimeStamp = QDateTime.currentDateTime().toString("hh:mm - dd MMMM yy") # datestamp
PRS = dict(password = QLineEdit.Password, center = center , left = left , right = right,
spmax = SiPoMax , sppre = SiPoPre, spexp = SiPoExp, spign = SiPoIgn,
expanding = QSizePolicy.Expanding, spmin = SiPoMin,)
# -------------------------------------------------------------------------------------------------------------
""" Event """
NO_WRAP = QPlainTextEdit.NoWrap
NO_FRAME = QPlainTextEdit.NoFrame
ELIDE_RIGHT = Qt.ElideRight
ELIDE_NONE = Qt.ElideNone
# -------------------------------------------------------------------------------------------------------------
""" Window state """
StateNormal = Qt.WindowNoState
StateMax = Qt.WindowMaximized
StateMin = Qt.WindowMinimized
State_Selected = QStyle.State_Selected
# -------------------------------------------------------------------------------------------------------------
""" Nodegraph setting variables """
ASPEC_RATIO = Qt.KeepAspectRatio
SMOOTH_TRANS = Qt.SmoothTransformation
SCROLLBAROFF = Qt.ScrollBarAlwaysOff # Scrollbar
SCROLLBARON = Qt.ScrollBarAlwaysOn
SCROLLBARNEED = Qt.ScrollBarAsNeeded
WORD_WRAP = Qt.TextWordWrap
INTERSECT_ITEM_SHAPE = Qt.IntersectsItemShape
CONTAIN_ITEM_SHAPE = Qt.ContainsItemShape
MATCH_EXACTLY = Qt.MatchExactly
DRAG_ONLY = QAbstractItemView.DragOnly
# -------------------------------------------------------------------------------------------------------------
""" UI flags """
ITEMENABLE = Qt.ItemIsEnabled
ITEMMOVEABLE = QGraphicsItem.ItemIsMovable
ITEMSENDGEOCHANGE = QGraphicsItem.ItemSendsGeometryChanges
ITEMSCALECHANGE = QGraphicsItem.ItemScaleChange
ITEMPOSCHANGE = QGraphicsItem.ItemPositionChange
DEVICECACHE = QGraphicsItem.DeviceCoordinateCache
SELECTABLE = QGraphicsItem.ItemIsSelectable
MOVEABLE = QGraphicsItem.ItemIsMovable
FOCUSABLE = QGraphicsItem.ItemIsFocusable
PANEL = QGraphicsItem.ItemIsPanel
NOINDEX = QGraphicsScene.NoIndex # Scene
RUBBER_DRAG = QGraphicsView.RubberBandDrag # Viewer
RUBBER_REC = QRubberBand.Rectangle
POS_CHANGE = QGraphicsItem.ItemPositionChange
NODRAG = QGraphicsView.NoDrag
NOFRAME = QGraphicsView.NoFrame
ANCHOR_NO = QGraphicsView.NoAnchor
ANCHOR_UNDERMICE = QGraphicsView.AnchorUnderMouse
ANCHOR_CENTER = QGraphicsView.AnchorViewCenter
CACHE_BG = QGraphicsView.CacheBackground
UPDATE_VIEWRECT = QGraphicsView.BoundingRectViewportUpdate
UPDATE_FULLVIEW = QGraphicsView.FullViewportUpdate
UPDATE_SMARTVIEW = QGraphicsView.SmartViewportUpdate
UPDATE_BOUNDINGVIEW = QGraphicsView.BoundingRectViewportUpdate
UPDATE_MINIMALVIEW = QGraphicsView.MinimalViewportUpdate
STAY_ON_TOP = Qt.WindowStaysOnTopHint
STRONG_FOCUS = Qt.StrongFocus
SPLASHSCREEN = Qt.SplashScreen
FRAMELESS = Qt.FramelessWindowHint
CUSTOMIZE = Qt.CustomizeWindowHint
CLOSEBTN = Qt.WindowCloseButtonHint
MINIMIZEBTN = Qt.WindowMinimizeButtonHint
AUTO_COLOR = Qt.AutoColor
# -------------------------------------------------------------------------------------------------------------
""" Drawing """
ANTIALIAS = QPainter.Antialiasing # Painter
ANTIALIAS_TEXT = QPainter.TextAntialiasing
ANTIALIAS_HIGH_QUALITY = QPainter.HighQualityAntialiasing
SMOOTH_PIXMAP_TRANSFORM = QPainter.SmoothPixmapTransform
NON_COSMETIC_PEN = QPainter.NonCosmeticDefaultPen
NO_BRUSH = Qt.NoBrush # Brush
NO_PEN = Qt.NoPen # Pen
ROUND_CAP = Qt.RoundCap
ROUND_JOIN = Qt.RoundJoin
PATTERN_SOLID = Qt.SolidPattern # Pattern
LINE_SOLID = Qt.SolidLine # Line
LINE_DASH = Qt.DashLine
LINE_DOT = Qt.DotLine
LINE_DASH_DOT = Qt.DashDotDotLine
TRANSPARENT = Qt.transparent
TRANSPARENT_MODE = Qt.TransparentMode
# -------------------------------------------------------------------------------------------------------------
""" Meta Object """
QUEUEDCONNECTION = Qt.QueuedConnection
# -------------------------------------------------------------------------------------------------------------
""" Keyboard and cursor """
TEXT_BOLD = QFont.Bold
TEXT_NORMAL = QFont.Normal
MONO_SPACE = QFont.Monospace
TEXT_MENEOMIC = Qt.TextShowMnemonic
KEY_PRESS = QEvent.KeyPress
KEY_RELEASE = QEvent.KeyRelease
KEY_ALT = Qt.Key_Alt
KEY_DEL = Qt.Key_Delete
KEY_TAB = Qt.Key_Tab
KEY_SHIFT = Qt.Key_Shift
KEY_CTRL = Qt.Key_Control
KEY_BACKSPACE = Qt.Key_Backspace
KEY_ENTER = Qt.Key_Enter
KEY_RETURN = Qt.Key_Return
KEY_F = Qt.Key_F
KEY_S = Qt.Key_S
ALT_MODIFIER = Qt.AltModifier
CTRL_MODIFIER = Qt.ControlModifier
SHIFT_MODIFIER = Qt.ShiftModifier
NO_MODIFIER = Qt.NoModifier
CLOSE_HAND_CUSOR = Qt.ClosedHandCursor
SIZEF_CURSOR = Qt.SizeFDiagCursor
windows = os.name = 'nt'
DMK = Qt.AltModifier if windows else CTRL_MODIFIER
MOUSE_LEFT = Qt.LeftButton
MOUSE_RIGHT = Qt.RightButton
MOUSE_MIDDLE = Qt.MiddleButton
NO_BUTTON = Qt.NoButton
ARROW_NONE = Qt.NoArrow # Cursor
CURSOR_ARROW = Qt.ArrowCursor
CURSOR_SIZEALL = Qt.SizeAllCursor
MOVE_OPERATION = QTextCursor.MoveOperation
MOVE_ANCHOR = QTextCursor.MoveMode.MoveAnchor
KEEP_ANCHOR = QTextCursor.MoveMode.KeepAnchor
ACTION_MOVE = Qt.MoveAction # Action
ignoreARM = Qt.IgnoreAspectRatio
# -------------------------------------------------------------------------------------------------------------
""" Set number """
RELATIVE_SIZE = Qt.RelativeSize # Size
INI = QSettings.IniFormat
NATIVE = QSettings.NativeFormat
INVALID = QSettings.InvalidFormat
SYS_SCOPE = QSettings.SystemScope
USER_SCOPE = QSettings.UserScope
# -------------------------------------------------------------------------------------------------------------
# Created by <NAME> on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved | 1.398438 | 1 |
src/commons.py | ymontilla/WebScrapingCatastro | 0 | 4356 | <reponame>ymontilla/WebScrapingCatastro
# -*- coding: utf-8 -*-
# +
## Utilidades comunes entre places y OSM.
# +
import csv
import ast
import codecs
from math import cos, asin, sqrt
# +
def read_csv_with_encoding(filename, delimiter="|", encoding="iso-8859-1"):
with codecs.open(filename, encoding=encoding) as fp:
reader = csv.reader(fp, delimiter=delimiter)
csvFile = list(reader)
return pd.DataFrame(csvFile[1:], columns=csvFile[0])
def read_json_with_encoding(filename, encoding="iso-8859-1"):
with codecs.open(filename, encoding=encoding) as a:
l = a.read()
json_file = ast.literal_eval(l)
return json_file
# -
import pandas as pd
def distance(lat1, lon1, lat2, lon2):
"""
El resultado de la medición de distancia esta en kilometros.
"""
p = 0.017453292519943295 #Pi/180
a = 0.5 - cos((lat2 - lat1) * p)/2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return 12742 * asin(sqrt(a))
def build_center_point(df):
lat = df["latitude"].mean()
lon = df["longitude"].mean()
return pd.DataFrame({'fid': [777], 'latitude': [lat], 'longitude': [lon]})
"""
El proceso es muy pesado y no es posible hacer el ananlisis con toda la data de bogotá, el número de registros es
demasiado grande para caber en memoria. El uso correcto es filtrar los datos antes de hacer el cross join.
"""
def compute_cross_distances(location_df, interest_points_df=None):
condition_latitude = ~location_df["latitude"].isna()
condition_longitude = ~location_df["longitude"].isna()
location_df_complete = location_df.loc[condition_latitude & condition_longitude]
results = []
for i in location_df_complete.index:
for j in interest_points_df.index:
results.append([
location_df_complete.loc[i, "fid"],
distance(location_df_complete.loc[i, "latitude"],
location_df_complete.loc[i, "longitude"],
float(interest_points_df.loc[j, "lat"]), float(interest_points_df.loc[j, "lon"])),
location_df_complete.loc[i, "latitude"],
location_df_complete.loc[i, "longitude"],
interest_points_df.loc[j, "lat"],
interest_points_df.loc[j, "lon"],
interest_points_df.loc[j, "amenity"],
interest_points_df.loc[j, "name"]
])
final = list(zip(*results))
return pd.DataFrame({'fid': final[0], 'distance': final[1], 'p_lat': final[2],
'p_lon': final[3], 'i_lat': final[4], 'i_lon': final[5],
'amenity': final[6], 'name': final[7]})
| 2.390625 | 2 |
wishes/migrations/0005_auto_20201029_0904.py | e-elson/bd | 0 | 4484 | # Generated by Django 3.1.2 on 2020-10-29 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wishes', '0004_auto_20201029_0857'),
]
operations = [
migrations.AlterField(
model_name='gallery',
name='image',
field=models.FilePathField(path='/images'),
),
]
| 0.890625 | 1 |
pottan_ocr/utils.py | nithyadurai87/pottan-ocr-tamil | 5 | 4612 | import torch
import json
import numpy as np
from torch.autograd import Variable
import gzip
import yaml
from re import split
from matplotlib import pyplot
def showImg( im ):
pyplot.imshow( im )
pyplot.show()
def myOpen( fname, mode ):
return open( fname, mode, encoding="utf-8" )
def readFile( fname ):
opener, mode = ( gzip.open, 'rt' ) if fname[-3:] == '.gz' else ( open, 'r' )
with opener( fname, mode ) as f:
return f.read()
def readLines( fname ):
return split('[\r\n]', readFile( fname ) )
def readJson( fname ):
with myOpen( fname, 'r' ) as f:
return json.load( f )
def writeFile( fname, contents ):
with myOpen( fname, 'w' ) as f:
f.write( contents )
def writeJson( fname, data ):
with myOpen( fname, 'w') as outfile:
json.dump(data, outfile)
def readYaml( fname ):
with myOpen(fname, 'r') as fp:
return yaml.load( fp )
config = readYaml('./config.yaml')
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def loadTrainedModel( model, opt ):
"""Load a pretrained model into given model"""
print('loading pretrained model from %s' % opt.crnn)
if( opt.cuda ):
stateDict = torch.load(opt.crnn )
else:
stateDict = torch.load(opt.crnn, map_location={'cuda:0': 'cpu'} )
# Handle the case of some old torch version. It will save the data as module.<xyz> . Handle it
if( list( stateDict.keys() )[0][:7] == 'module.' ):
for key in list(stateDict.keys()):
stateDict[ key[ 7:] ] = stateDict[key]
del stateDict[ key ]
model.load_state_dict( stateDict )
print('Completed loading pre trained model')
| 1.921875 | 2 |
src/lr_find.py | KushajveerSingh/fastai_without_fastai | 12 | 4740 | import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# NOT -> ParameterModule
# NOT -> children_and_parameters
# NOT -> flatten_model
# NOT -> lr_range
# NOT -> scheduling functions
# NOT -> SmoothenValue
# YES -> lr_find
# NOT -> plot_lr_find
# NOT TO BE MODIFIED
class ParameterModule(nn.Module):
"Register a lone parameter 'p' in a module"
def __init__(self, p:nn.Parameter):
super().__init__()
self.val = p
def forward(self, x):
return x
# NOT TO BE MODIFIED
# To be used to flatten_model
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# NOT TO BE MODIFIED
flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if len(list(m.children())) else [m]
# NOT TO BE MODIFIED
def lr_range(model, lr):
"""
Build differential learning rate from lr. It will give you the
Arguments:
model :- torch.nn.Module
lr :- float or slice
Returns:
Depending upon lr
"""
if not isinstance(lr, slice):
return lr
num_layer = len([nn.Sequential(*flatten_model(model))])
if lr.start:
mult = lr.stop / lr.start
step = mult**(1/(num_layer-1))
res = np.array([lr.start*(step**i) for i in range(num_layer)])
else:
res = [lr.stop/10.]*(num_layer-1) + [lr.stop]
return np.array(res)
# NOT TO BE MODIFIED
# These are the functions that would give us the values of lr. Liks for linearly
# increasing lr we would use annealing_linear.
# You can add your own custom function, for producing lr.
# By defualt annealing_exp is used for both lr and momentum
def annealing_no(start, end, pct:float):
"No annealing, always return `start`."
return start
def annealing_linear(start, end, pct:float):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start, end, pct:float):
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start, end, pct:float):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start, end, pct:float, degree):
return end + (start-end) * (1-pct)**degree
# NOT TO BE MODIFIED
class Stepper():
"""
Used to step from start, end ('vals') over 'n_iter' iterations on a schedule.
We will create a stepper object and then use one of the above annelaing functions,
to step from start lr to end lr.
"""
def __init__(self, vals, n_iter:int, func=None):
self.start, self.end = (vals[0], vals[1]) if isinstance(vals, tuple) else (vals,0)
self.n_iter = max(1, n_iter)
if func is None:
self.func = annealing_linear if isinstance(vals, tuple) else annealing_no
else:
self.func = func
self.n = 0
def step(self):
"Return next value along annealed schedule"
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Return 'True' if schedule completed"
return self.n >= self.n_iter
# NOT TO BE MODIFIED
class SmoothenValue():
"Create a smooth moving average for a value (loss, etc) using `beta`."
def __init__(self, beta:float):
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
# TO BE MODIFIED IN SOME CASES
def lr_find(data_loader, model, loss_fn, opt, wd:int=0, start_lr:float=1e-7, end_lr:float=10,
num_it:int=100, stop_div:bool=True, smooth_beta:float=0.98, use_gpu:bool=True,
device=torch.device('cuda'), anneal_func=annealing_exp):
"""
The main function that you will call to plot learning_rate vs losses graph. It is
the only function from lr_find.py that you will call. By default it will use GPU. It
assumes your model is already on GPU if you use use_gpu.
Arguments:-
data_loader :- torch.utils.data.DataLoader
model :- torch.nn.Module
loss_fn :- torch.nn.LossFunction
opt :- torch.optim.Optimizer
wd :- weight decay (default=0).
start_lr :- The learning rate from where to start in lr_find (default=1e-7)
end_lr :- The learning rate at which to end lr_find (default=10)
num_it :- Number of iterations for lr_find (default=100)
stop_div :- If the loss diverges, then stop early (default=True)
smooth_beta :- The beta value to smoothen the running avergae of the loss function (default=0.98)
use_gpu :- True (train on GPU) else CPU
anneal_func :- The step function you want to use (default exp)
device :- Torch device to use for training model (default GPU)
Returns:
losses :- list of smoothened version of losses
lrs :- list of all lrs that we test
"""
model.train()
stop = False
flag = False
best_loss = 0.
iteration = 0
losses = []
lrs = []
lrs.append(start_lr)
start_lr = lr_range(model, start_lr)
start_lr = np.array(start_lr) if isinstance(start_lr, (tuple, list)) else start_lr
end_lr = lr_range(model, end_lr)
end_lr = np.array(end_lr) if isinstance(end_lr, (tuple, list)) else end_lr
sched = Stepper((start_lr, end_lr), num_it, anneal_func)
smoothener = SmoothenValue(smooth_beta)
epochs = int(np.ceil(num_it/len(data_loader)))
# save model_dict
model_state = model.state_dict()
opt_state = opt.state_dict()
# Set optimizer learning_rate = start_lr
for group in opt.param_groups:
group['lr'] = sched.start
for i in range(epochs):
for data in data_loader:
opt.zero_grad()
################### TO BE MODIFIED ###################
# Depending on your model, you will have to modify your
# data pipeline and how you give inputs to your model.
inputs, labels = data
if use_gpu:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = loss_fn(outputs, labels)
#####################################################
if use_gpu:
smoothener.add_value(loss.detach().cpu())
else:
smoothener.add_value(loss.detach())
smooth_loss = smoothener.smooth
losses.append(smooth_loss)
loss.backward()
################### TO BE MODIFIED ###################
# For AdamW. If you want to use Adam, comment these lines
for group in opt.param_groups:
for param in group['params']:
param.data = param.data.add(-wd * group['lr'], param.data)
#####################################################
opt.step()
# Change lr
new_lr = sched.step()
lrs.append(new_lr)
for group in opt.param_groups:
group['lr'] = new_lr
################### TO BE MODIFIED ###################
# You necessarily don't want to change it. But in cases
# when you are maximizing the loss, then you will have
# to change it.
if iteration == 0 or smooth_loss < best_loss:
best_loss = smooth_loss
iteration += 1
if sched.is_done or (stop_div and (smooth_loss > 4*best_loss or torch.isnan(loss))):
flag = True
break
#####################################################
if iteration%10 == 0:
print(f'Iteration: {iteration}')
if flag:
break
# Load state dict
model.load_state_dict(model_state)
opt.load_state_dict(opt_state)
lrs.pop()
print(f'LR Finder is complete.')
return losses, lrs
# NOT TO BE MODIFIED
def plot_lr_find(losses, lrs, skip_start:int=10, skip_end:int=5, suggestion:bool=False, return_fig:bool=None):
"""
It will take the losses and lrs returned by lr_find as input.
Arguments:-
skip_start -> It will skip skip_start lrs from the start
skip_end -> It will skip skip_end lrs from the end
suggestion -> If you want to see the point where the gradient changes most
return_fig -> True then get the fig in the return statement
"""
lrs = lrs[skip_start:-skip_end] if skip_end > 0 else lrs[skip_start:]
losses = losses[skip_start:-skip_end] if skip_end > 0 else losses[skip_start:]
losses = [x.item() for x in losses]
fig, ax = plt.subplots(1, 1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
try:
mg = (np.gradient(np.array(losses))).argmin()
except:
print("Failed to compute the gradients, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E}")
ax.plot(lrs[mg], losses[mg], markersize=10, marker='o', color='red')
if return_fig is not None:
return fig
| 2.265625 | 2 |
instagram/admin.py | James19stack/instagram-copy_cat | 0 | 4868 | <reponame>James19stack/instagram-copy_cat<filename>instagram/admin.py
from django.contrib import admin
from .models import Images,Comments,Profile
# Register your models here.
class CommentInline(admin.TabularInline):
model=Comments
extra=3
class ImageInline(admin.ModelAdmin):
fieldsets=[
(None,{'fields':['image']}),
(None,{'fields':['image_name']}),
(None,{'fields':['image_caption']}),
(None,{'fields':['likes']}),
]
inlines=[CommentInline]
admin.site.site_header='InstaPost Admin'
admin.site.site_title='InstaPost Admin Dashboard'
admin.site.register(Images,ImageInline)
admin.site.register(Profile)
| 1.234375 | 1 |
applications/tensorflow/cnns/models/resnet.py | xihuaiwen/chinese_bert | 0 | 4996 | <reponame>xihuaiwen/chinese_bert
# Copyright 2019 Graphcore Ltd.
from models.resnet_base import ResNet
import tensorflow.compat.v1 as tf
import tensorflow.contrib as contrib
from tensorflow.python.ipu import normalization_ops
# This is all written for: NHWC
class TensorflowResNet(ResNet):
def __init__(self, *args, **kwargs):
self.dtype = tf.float16
super(TensorflowResNet, self).__init__(*args, **kwargs)
def _get_variable(self, name, shape, init):
return tf.get_variable(name, shape, initializer=init, dtype=self.dtype)
def residual(self, x, shortcut, out_filters, stride, type='B'):
in_shape = shortcut.get_shape()
pad = int(x.get_shape()[3] - in_shape[3])
if pad != 0 or type == 'C':
if type == 'A':
shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape,
strides=[1, stride, stride, 1])
shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])
else:
shortcut = self.conv(shortcut, 1, stride, out_filters)
shortcut = self.norm(shortcut)
x = shortcut + x
x = self.relu(x)
return x
def relu(self, x):
return tf.nn.relu(x)
def conv(self, x, ksize, stride, filters_out, bias=True):
filters_in = x.get_shape()[-1]
wshape = [ksize, ksize, filters_in, filters_out]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
weights = self._get_variable('weights', shape=wshape, init=w_init)
x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
if bias:
bshape = [filters_out]
b_init = tf.zeros_initializer()
biases = self._get_variable('biases', shape=bshape, init=b_init)
x = x + biases
return x
def norm(self, x, type='BATCH', groups=32, training=False):
if type == 'BATCH':
# Perhaps use tf.nn.fused_batch_norm instead.
x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,
training=training, trainable=training,
momentum=0.997, epsilon=1e-5)
elif type == 'GROUP':
x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,
training=training, trainable=training,
channels_axis=-1, reduction_axes=[-3, -2])
return x
def fc(self, x, num_units_out):
num_units_in = x.get_shape()[1]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
b_init = tf.constant_initializer(0.0)
with self.namescope('fc'):
weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init)
biases = self._get_variable('biases', shape=[num_units_out], init=b_init)
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def reduce_mean(self, x, indices=(1, 2)):
x = tf.reduce_mean(x, reduction_indices=indices)
return x
def maxpool(self, x):
x = tf.nn.max_pool(
x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME')
return x
def namescope(self, debug_string):
return tf.variable_scope(debug_string)
| 1.789063 | 2 |
scripts/external_libs/scapy-2.4.3/scapy/config.py | timgates42/trex-core | 956 | 5124 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import os
import re
import time
import socket
import sys
from scapy import VERSION, base_classes
from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS
from scapy.error import log_scapy, warning, ScapyInvalidPlatformException
from scapy.modules import six
from scapy.themes import NoTheme, apply_ipython_style
############
# Config #
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s = ""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76 - max(len(i), 10)
if len(r) > wlen:
r = r[:wlen - 3] + "..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name=None, default=None,
hook=None, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default = default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
@staticmethod
def set_from_hook(obj, name, val):
int_name = "_intercepted_%s" % name
setattr(obj, int_name, val)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
def _readonly(name):
default = Conf.__dict__[name].default
Interceptor.set_from_hook(conf, name, default)
raise ValueError("Read-only value !")
ReadOnlyAttribute = functools.partial(
Interceptor,
hook=(lambda name, *args, **kwargs: _readonly(name))
)
ReadOnlyAttribute.__doc__ = "Read-only class attribute"
class ProgPath(ConfClass):
universal_open = "open" if DARWIN else "xdg-open"
pdfreader = universal_open
psreader = universal_open
svgreader = universal_open
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
return self[item] if item in self else default
def __repr__(self):
lst = []
for num, layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer, num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x, y in lst)
class LayersList(list):
def __init__(self):
list.__init__(self)
self.ldict = {}
def __repr__(self):
return "\n".join("%-20s: %s" % (l.__name__, l.name) for l in self)
def register(self, layer):
self.append(layer)
if layer.__module__ not in self.ldict:
self.ldict[layer.__module__] = []
self.ldict[layer.__module__].append(layer)
def layers(self):
result = []
# This import may feel useless, but it is required for the eval below
import scapy # noqa: F401
for lay in self.ldict:
doc = eval(lay).__doc__
result.append((lay, doc.strip().split("\n")[0] if doc else lay))
return result
class CommandsList(list):
def __repr__(self):
s = []
for l in sorted(self, key=lambda x: x.__name__):
doc = l.__doc__.split("\n")[0] if l.__doc__ else "--"
s.append("%-20s: %s" % (l.__name__, doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
"""Displays Scapy's default commands"""
print(repr(conf.commands))
class CacheInstance(dict, object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def flush(self):
self.__init__(name=self.name, timeout=self.timeout)
def __getitem__(self, item):
if item in self.__slots__:
return object.__getattribute__(self, item)
val = dict.__getitem__(self, item)
if self.timeout is not None:
t = self._timetable[item]
if time.time() - t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
dict.__setitem__(self, item, v)
def update(self, other):
for key, value in six.iteritems(other):
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
if self.timeout is None:
return six.iteritems(self.__dict__)
t0 = time.time()
return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def iterkeys(self):
if self.timeout is None:
return six.iterkeys(self.__dict__)
t0 = time.time()
return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def __iter__(self):
return six.iterkeys(self.__dict__)
def itervalues(self):
if self.timeout is None:
return six.itervalues(self.__dict__)
t0 = time.time()
return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def items(self):
if self.timeout is None:
return dict.items(self)
t0 = time.time()
return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0 = time.time()
return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def values(self):
if self.timeout is None:
return list(six.itervalues(self))
t0 = time.time()
return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk + 1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self, cache.name, cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self, co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
def _version_checker(module, minver):
"""Checks that module has a higher version that minver.
params:
- module: a module to test
- minver: a tuple of versions
"""
# We could use LooseVersion, but distutils imports imp which is deprecated
version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'
version_tags = re.match(version_regexp, module.__version__)
if not version_tags:
return False
version_tags = version_tags.group(1).split(".")
version_tags = tuple(int(x) for x in version_tags)
return version_tags >= minver
def isCryptographyValid():
"""
Check if the cryptography library is present, and if it is recent enough
for most usages in scapy (v1.7 or later).
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (1, 7))
def isCryptographyRecent():
"""
Check if the cryptography library is recent (2.0 and later)
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (2, 0))
def isCryptographyAdvanced():
"""
Check if the cryptography library is present, and if it supports X25519,
ChaCha20Poly1305 and such (v2.0 or later).
"""
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501
X25519PrivateKey.generate()
except Exception:
return False
else:
return True
def isPyPy():
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__ # noqa: F401
return True
except ImportError:
return False
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass
def _set_conf_sockets():
"""Populate the conf.L2Socket and conf.L3Socket
according to the various use_* parameters
"""
from scapy.main import _load
if conf.use_bpf and not BSD:
Interceptor.set_from_hook(conf, "use_bpf", False)
raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")
if not conf.use_pcap and SOLARIS:
Interceptor.set_from_hook(conf, "use_pcap", True)
raise ScapyInvalidPlatformException(
"Scapy only supports libpcap on Solaris !"
)
# we are already in an Interceptor hook, use Interceptor.set_from_hook
if conf.use_pcap or conf.use_dnet:
try:
from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \
L3pcapSocket
except (OSError, ImportError):
warning("No libpcap provider available ! pcap won't be used")
Interceptor.set_from_hook(conf, "use_pcap", False)
else:
conf.L3socket = L3pcapSocket
conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6")
conf.L2socket = L2pcapSocket
conf.L2listen = L2pcapListenSocket
# Update globals
_load("scapy.arch.pcapdnet")
return
if conf.use_bpf:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, \
L2bpfSocket, L3bpfSocket
conf.L3socket = L3bpfSocket
conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6")
conf.L2socket = L2bpfSocket
conf.L2listen = L2bpfListenSocket
# Update globals
_load("scapy.arch.bpf")
return
if LINUX:
from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket
conf.L3socket = L3PacketSocket
conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6")
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
# Update globals
_load("scapy.arch.linux")
return
if WINDOWS:
from scapy.arch.windows import _NotAvailableSocket
from scapy.arch.windows.native import L3WinSocket, L3WinSocket6
conf.L3socket = L3WinSocket
conf.L3socket6 = L3WinSocket6
conf.L2socket = _NotAvailableSocket
conf.L2listen = _NotAvailableSocket
# No need to update globals on Windows
return
from scapy.supersocket import L3RawSocket
from scapy.layers.inet6 import L3RawSocket6
conf.L3socket = L3RawSocket
conf.L3socket6 = L3RawSocket6
def _socket_changer(attr, val):
if not isinstance(val, bool):
raise TypeError("This argument should be a boolean")
dependencies = { # Things that will be turned off
"use_pcap": ["use_bpf"],
"use_bpf": ["use_pcap"],
}
restore = {k: getattr(conf, k) for k in dependencies}
del restore[attr] # This is handled directly by _set_conf_sockets
if val: # Only if True
for param in dependencies[attr]:
Interceptor.set_from_hook(conf, param, False)
try:
_set_conf_sockets()
except (ScapyInvalidPlatformException, ImportError) as e:
for key, value in restore.items():
Interceptor.set_from_hook(conf, key, value)
if isinstance(e, ScapyInvalidPlatformException):
raise
def _loglevel_changer(attr, val):
"""Handle a change of conf.logLevel"""
log_scapy.setLevel(val)
class Conf(ConfClass):
"""This object contains the configuration of Scapy.
session : filename where the session will be saved
interactive_shell : can be "ipython", "python" or "auto". Default: Auto
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501
checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not
check IP layers that encapsulates another IP layer
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501
iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501
histfile : history file
padding : includes padding in disassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501
debug_tls:When 1, print some TLS session secrets when they are computed.
recv_poll_rate: how often to check for new packets. Defaults to 0.05s.
"""
version = ReadOnlyAttribute("version", VERSION)
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
iface6 = None
layers = LayersList()
commands = CommandsList()
dot15d4_protocol = None # Used in dot15d4.py
logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer)
checkIPID = False
checkIPsrc = True
checkIPaddr = True
checkIPinIP = True
check_TCPerror_seqack = False
verb = 2
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
promisc = True
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L3socket6 = None
L2socket = None
L2listen = None
BTsocket = None
USBsocket = None
min_pkt_size = 60
bufsize = 2**16
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
padding = 1
except_filter = ""
debug_match = False
debug_tls = False
wepkey = ""
cache_iflist = {}
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = True
debug_dissector = False
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
warning_threshold = 5
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pypy = ReadOnlyAttribute("use_pypy", isPyPy())
use_pcap = Interceptor(
"use_pcap",
os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"),
_socket_changer
)
# XXX use_dnet is deprecated
use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_bpf = Interceptor("use_bpf", False, _socket_changer)
use_npcap = False
ipv6_enabled = socket.has_ipv6
extensions_paths = "."
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
geoip_city = None
# can, tls, http are not loaded by default
load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns',
'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet',
'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp',
'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios',
'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip',
'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'snmp',
'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee']
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_recent = isCryptographyRecent()
crypto_valid_advanced = crypto_valid_recent and isCryptographyAdvanced()
fancy_prompt = True
auto_crop_tables = True
recv_poll_rate = 0.05
def __getattr__(self, attr):
# Those are loaded on runtime to avoid import loops
if attr == "manufdb":
from scapy.data import MANUFDB
return MANUFDB
if attr == "ethertypes":
from scapy.data import ETHER_TYPES
return ETHER_TYPES
if attr == "protocols":
from scapy.data import IP_PROTOS
return IP_PROTOS
if attr == "services_udp":
from scapy.data import UDP_SERVICES
return UDP_SERVICES
if attr == "services_tcp":
from scapy.data import TCP_SERVICES
return TCP_SERVICES
return object.__getattr__(self, attr)
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501
for m in ["inet6", "dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf = Conf()
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in
| 1.6875 | 2 |
mmdet/core/ufp/__init__.py | PuAnysh/UFPMP-Det | 9 | 5252 | <reponame>PuAnysh/UFPMP-Det
from .spp import *
from .unified_foreground_packing import *
__all__ = [
'phsppog', 'UnifiedForegroundPacking'
]
| 0.088867 | 0 |
meshio/_cli/_info.py | jorgensd/meshio | 1 | 5380 | import argparse
import numpy as np
from .._helpers import read, reader_map
from ._helpers import _get_version_text
def info(argv=None):
# Parse command line arguments.
parser = _get_info_parser()
args = parser.parse_args(argv)
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
print(mesh)
# check if the cell arrays are consistent with the points
is_consistent = True
for cells in mesh.cells:
if np.any(cells.data > mesh.points.shape[0]):
print("\nATTENTION: Inconsistent mesh. Cells refer to nonexistent points.")
is_consistent = False
break
# check if there are redundant points
if is_consistent:
point_is_used = np.zeros(mesh.points.shape[0], dtype=bool)
for cells in mesh.cells:
point_is_used[cells.data] = True
if np.any(~point_is_used):
print("ATTENTION: Some points are not part of any cell.")
def _get_info_parser():
parser = argparse.ArgumentParser(
description=("Print mesh info."), formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--version",
"-v",
action="version",
version=_get_version_text(),
help="display version information",
)
return parser
| 2.265625 | 2 |
skynet-agent/plugins/plugin_api.py | skynetera/skynet | 3 | 5508 | #!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: <EMAIL>
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: plugin_api.py
@time: 2015-11-28 下午1:52
"""
from linux import cpu,disk,iostats,loadavg,memory,netstats,swap
def get_load_info():
return loadavg.monitor()
def get_cpu_status():
return cpu.monitor()
def get_memory_info():
return memory.monitor()
def get_swap_info():
return swap.monitor()
def get_disk_info():
return disk.monitor()
def get_network_info():
return netstats.monitor()
def get_iostats_info():
return iostats.monitor()
| 1.085938 | 1 |
zerver/management/commands/list_realms.py | rtzll/zulip | 0 | 5636 | <filename>zerver/management/commands/list_realms.py<gh_stars>0
import sys
from typing import Any
from argparse import ArgumentParser
from zerver.models import Realm
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """List realms in the server and it's configuration settings(optional).
Usage examples:
./manage.py list_realms
./manage.py list_realms --all"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--all",
dest="all",
action="store_true",
default=False,
help="Print all the configuration settings of the realms.")
def handle(self, *args: Any, **options: Any) -> None:
realms = Realm.objects.all()
outer_format = "%-5s %-40s %-40s"
inner_format = "%-40s %s"
deactivated = False
if not options["all"]:
print(outer_format % ("id", "string_id", "name"))
print(outer_format % ("--", "---------", "----"))
for realm in realms:
if realm.deactivated:
print(self.style.ERROR(outer_format % (realm.id, realm.string_id, realm.name)))
deactivated = True
else:
print(outer_format % (realm.id, realm.string_id, realm.name))
if deactivated:
print(self.style.WARNING("\nRed rows represent deactivated realms."))
sys.exit(0)
# The remaining code path is the --all case.
identifier_attributes = ["id", "name", "string_id"]
for realm in realms:
# Start with just all the fields on the object, which is
# hacky but doesn't require any work to maintain.
realm_dict = realm.__dict__
# Remove a field that is confusingly useless
del realm_dict['_state']
# Fix the one bitfield to display useful data
realm_dict['authentication_methods'] = str(realm.authentication_methods_dict())
for key in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, realm_dict[key])))
deactivated = True
else:
print(inner_format % (key, realm_dict[key]))
for key, value in sorted(realm_dict.iteritems()):
if key not in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, value)))
else:
print(inner_format % (key, value))
print("-" * 80)
if deactivated:
print(self.style.WARNING("\nRed is used to highlight deactivated realms."))
| 1.632813 | 2 |
tests_app/tests/functional/key_constructor/bits/models.py | maryokhin/drf-extensions | 1 | 5764 | # -*- coding: utf-8 -*-
from django.db import models
class KeyConstructorUserProperty(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'tests_app'
class KeyConstructorUserModel(models.Model):
property = models.ForeignKey(KeyConstructorUserProperty)
class Meta:
app_label = 'tests_app' | 1.164063 | 1 |
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py | MarcoMancha/BreastCancerDetector | 2 | 5892 | """
For backwards-compatibility. keep this file.
(Many people are going to have key bindings that rely on this file.)
"""
from __future__ import unicode_literals
from .app import *
__all__ = [
# Old names.
'HasArg',
'HasCompletions',
'HasFocus',
'HasSelection',
'HasValidationError',
'IsDone',
'IsReadOnly',
'IsMultiline',
'RendererHeightIsKnown',
'InEditingMode',
'InPasteMode',
'ViMode',
'ViNavigationMode',
'ViInsertMode',
'ViInsertMultipleMode',
'ViReplaceMode',
'ViSelectionMode',
'ViWaitingForTextObjectMode',
'ViDigraphMode',
'EmacsMode',
'EmacsInsertMode',
'EmacsSelectionMode',
'IsSearching',
'HasSearch',
'ControlIsSearchable',
]
# Keep the original classnames for backwards compatibility.
HasValidationError = lambda: has_validation_error
HasArg = lambda: has_arg
IsDone = lambda: is_done
RendererHeightIsKnown = lambda: renderer_height_is_known
ViNavigationMode = lambda: vi_navigation_mode
InPasteMode = lambda: in_paste_mode
EmacsMode = lambda: emacs_mode
EmacsInsertMode = lambda: emacs_insert_mode
ViMode = lambda: vi_mode
IsSearching = lambda: is_searching
HasSearch = lambda: is_searching
ControlIsSearchable = lambda: control_is_searchable
EmacsSelectionMode = lambda: emacs_selection_mode
ViDigraphMode = lambda: vi_digraph_mode
ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode
ViSelectionMode = lambda: vi_selection_mode
ViReplaceMode = lambda: vi_replace_mode
ViInsertMultipleMode = lambda: vi_insert_multiple_mode
ViInsertMode = lambda: vi_insert_mode
HasSelection = lambda: has_selection
HasCompletions = lambda: has_completions
IsReadOnly = lambda: is_read_only
IsMultiline = lambda: is_multiline
HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.)
InEditingMode = in_editing_mode
| 0.929688 | 1 |
metrics/overflow.py | DEKHTIARJonathan/pyinstrument | 1 | 6020 | from pyinstrument import Profiler
p = Profiler(use_signal=False)
p.start()
def func(num):
if num == 0:
return
b = 0
for x in range(1,100000):
b += x
return func(num - 1)
func(900)
p.stop()
print(p.output_text())
with open('overflow_out.html', 'w') as f:
f.write(p.output_html())
| 1.5 | 2 |
textvis/textprizm/models.py | scclab/textvisdrg-prototype | 0 | 6148 | from django.db import models
# Create your models here.
class Schema(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
class Code(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
active_instances = models.PositiveIntegerField(default=0)
schema = models.ForeignKey(Schema, related_name="codes")
code_type = models.IntegerField(default=0)
def __unicode__(self):
if self.description:
return "%s/%s (%d): %s" % (self.schema_id, self.name, self.id, self.description)
else:
return "%s/%s (%d)" % (self.schema_id, self.name, self.id)
class DataSet(models.Model):
name = models.CharField(max_length=100)
created = models.DateTimeField()
class Session(models.Model):
set = models.ForeignKey(DataSet)
started = models.DateTimeField()
ended = models.DateTimeField()
def __unicode__(self):
return "%d (%s - %s)" % (self.id, str(self.started), str(self.ended))
class Participant(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
def __unicode__(self):
return self.name
class Message(models.Model):
session = models.ForeignKey(Session)
idx = models.IntegerField()
time = models.DateTimeField()
type = models.IntegerField()
participant = models.ForeignKey(Participant, related_name='messages')
message = models.TextField()
codes = models.ManyToManyField(Code, through='CodeInstance')
@classmethod
def get_between(cls, start, end):
"""
Get messages that are inclusively between the two messages, or two dates.
Takes into account the exact ordering of messages,
meaning that you won't get messages at the same time but after the last message, for example.
"""
if isinstance(start, Message):
after_first = ~models.Q(session=start.session) | models.Q(idx__gte=start.idx)
after_first = models.Q(time__gte=start.time) & after_first
else:
after_first = models.Q(time__gte=start)
if isinstance(end, Message):
before_last = ~models.Q(session=end.session) | models.Q(idx__lte=end.idx)
before_last = models.Q(time__lte=end.time) & before_last
else:
before_last = models.Q(time__lte=end)
return cls.objects.filter(after_first, before_last)
@property
def text(self):
return self.message
@property
def user_name(self):
return self.participant.name
@property
def created_at(self):
return self.time
class User(models.Model):
name = models.CharField(max_length=100)
full_name = models.CharField(max_length=250)
email = models.CharField(max_length=250)
def __unicode__(self):
return self.name
class AbstractCodeInstance(models.Model):
class Meta:
abstract = True
code = models.ForeignKey(Code)
message = models.ForeignKey(Message)
added = models.DateTimeField()
class CodeInstance(AbstractCodeInstance):
user = models.ForeignKey(User)
task_id = models.PositiveIntegerField()
intensity = models.FloatField()
flag = models.IntegerField()
| 1.9375 | 2 |
clip/clip.py | keshav11/clip | 1 | 6276 | <filename>clip/clip.py
import os
import argparse
from pathlib import Path
CLIP_FILE = os.path.join(Path.home(), '.clip')
TEMP_FILE = '.TEMP_FILE'
def add_text(key, text):
if os.path.exists(CLIP_FILE):
open_mode = 'a'
else:
open_mode = 'w+'
with open(CLIP_FILE, open_mode) as clip_file:
clip_file.write(key + ": " + text + "\n")
def list_texts():
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
print(text)
def get_text(key):
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
key_val = text.split(':')
if key_val[0].strip() == key:
print(key_val[1].strip(), end='')
def delete_text(key):
exists = False
with open(TEMP_FILE, 'w+') as temp_file:
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
if text.strip() == "":
continue
key_val = text.split(':')
if key_val[0].strip() != key:
temp_file.write(text+"\n")
else:
exists = True
if not exists:
print("key:", key, "was not found in the clip store")
try:
os.rename(TEMP_FILE, CLIP_FILE)
except Exception as ex:
os.remove(TEMP_FILE)
print('remove text failed.', ex)
def main():
parser = argparse.ArgumentParser(description='clips and saves texts from the command line')
parser.add_argument('-a', '--add', nargs=2)
parser.add_argument('-g', '--get', nargs=1)
parser.add_argument('-d', '--delete', nargs=1)
parser.add_argument('-l', '--list', action='store_true')
args = parser.parse_args()
if args.add:
key, value = args.add[0], args.add[1]
add_text(key, value)
elif args.list:
list_texts()
elif args.get:
key = args.get[0]
get_text(key)
elif args.delete:
key = args.delete[0]
delete_text(key)
else:
parser.print_usage()
if __name__ == '__main__':
main()
| 2.28125 | 2 |
heat/api/openstack/v1/views/stacks_view.py | noironetworks/heat | 265 | 6404 | <reponame>noironetworks/heat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from heat.api.openstack.v1 import util
from heat.api.openstack.v1.views import views_common
from heat.rpc import api as rpc_api
_collection_name = 'stacks'
basic_keys = (
rpc_api.STACK_ID,
rpc_api.STACK_NAME,
rpc_api.STACK_DESCRIPTION,
rpc_api.STACK_STATUS,
rpc_api.STACK_STATUS_DATA,
rpc_api.STACK_CREATION_TIME,
rpc_api.STACK_DELETION_TIME,
rpc_api.STACK_UPDATED_TIME,
rpc_api.STACK_OWNER,
rpc_api.STACK_PARENT,
rpc_api.STACK_USER_PROJECT_ID,
rpc_api.STACK_TAGS,
)
def format_stack(req, stack, keys=None, include_project=False):
def transform(key, value):
if keys and key not in keys:
return
if key == rpc_api.STACK_ID:
yield ('id', value['stack_id'])
yield ('links', [util.make_link(req, value)])
if include_project:
yield ('project', value['tenant'])
elif key == rpc_api.STACK_ACTION:
return
elif (key == rpc_api.STACK_STATUS and
rpc_api.STACK_ACTION in stack):
# To avoid breaking API compatibility, we join RES_ACTION
# and RES_STATUS, so the API format doesn't expose the
# internal split of state into action/status
yield (key, '_'.join((stack[rpc_api.STACK_ACTION], value)))
else:
# TODO(zaneb): ensure parameters can be formatted for XML
# elif key == rpc_api.STACK_PARAMETERS:
# return key, json.dumps(value)
yield (key, value)
return dict(itertools.chain.from_iterable(
transform(k, v) for k, v in stack.items()))
def collection(req, stacks, count=None, include_project=False):
keys = basic_keys
formatted_stacks = [format_stack(req, s, keys, include_project)
for s in stacks]
result = {'stacks': formatted_stacks}
links = views_common.get_collection_links(req, formatted_stacks)
if links:
result['links'] = links
if count is not None:
result['count'] = count
return result
| 1.304688 | 1 |
unet3d/config.py | fcollman/pytorch-3dunet | 0 | 6532 | import argparse
import os
import torch
import yaml
DEFAULT_DEVICE = 'cuda:0'
def load_config():
parser = argparse.ArgumentParser(description='UNet3D training')
parser.add_argument('--config', type=str, help='Path to the YAML config file', required=True)
args = parser.parse_args()
config = _load_config_yaml(args.config)
# Get a device to train on
device = config.get('device', DEFAULT_DEVICE)
config['device'] = torch.device(device if torch.cuda.is_available() else "cpu")
return config
def _load_config_yaml(config_file):
return yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader)
| 1.585938 | 2 |
cmake/utils/gen-ninja-deps.py | stamhe/bitcoin-abc | 1,266 | 6660 | <filename>cmake/utils/gen-ninja-deps.py
#!/usr/bin/env python3
import argparse
import os
import subprocess
parser = argparse.ArgumentParser(description='Produce a dep file from ninja.')
parser.add_argument(
'--build-dir',
help='The build directory.',
required=True)
parser.add_argument(
'--base-dir',
help='The directory for which dependencies are rewriten.',
required=True)
parser.add_argument('--ninja', help='The ninja executable to use.')
parser.add_argument(
'base_target',
help="The target from the base's perspective.")
parser.add_argument(
'targets', nargs='+',
help='The target for which dependencies are extracted.')
parser.add_argument(
'--extra-deps', nargs='+',
help='Extra dependencies.')
args = parser.parse_args()
build_dir = os.path.abspath(args.build_dir)
base_dir = os.path.abspath(args.base_dir)
ninja = args.ninja
base_target = args.base_target
targets = args.targets
extra_deps = args.extra_deps
# Make sure we operate in the right folder.
os.chdir(build_dir)
if ninja is None:
ninja = subprocess.check_output(['command', '-v', 'ninja'])[:-1]
# Construct the set of all targets
all_targets = set()
doto_targets = set()
for t in subprocess.check_output([ninja, '-t', 'targets', 'all']).splitlines():
t, r = t.split(b':')
all_targets.add(t)
if r[:13] == b' C_COMPILER__' or r[:15] == b' CXX_COMPILER__':
doto_targets.add(t)
def parse_ninja_query(query):
deps = dict()
lines = query.splitlines()
while len(lines):
line = lines.pop(0)
if line[0] == ord(' '):
continue
# We have a new target
target = line.split(b':')[0]
assert lines.pop(0)[:8] == b' input:'
inputs = set()
while True:
i = lines.pop(0)
if i[:4] != b' ':
break
'''
ninja has 3 types of input:
1. Explicit dependencies, no prefix;
2. Implicit dependencies, | prefix.
3. Order only dependencies, || prefix.
Order only dependency do not require the target to be rebuilt
and so we ignore them.
'''
i = i[4:]
if i[0] == ord('|'):
if i[1] == ord('|'):
# We reached the order only dependencies.
break
i = i[2:]
inputs.add(i)
deps[target] = inputs
return deps
def extract_deps(workset):
# Recursively extract the dependencies of the target.
deps = dict()
while len(workset) > 0:
query = subprocess.check_output([ninja, '-t', 'query'] + list(workset))
target_deps = parse_ninja_query(query)
deps.update(target_deps)
workset = set()
for d in target_deps.values():
workset.update(t for t in d if t in all_targets and t not in deps)
# Extract build time dependencies.
bt_targets = [t for t in deps if t in doto_targets]
if len(bt_targets) == 0:
return deps
ndeps = subprocess.check_output(
[ninja, '-t', 'deps'] + bt_targets,
stderr=subprocess.DEVNULL)
lines = ndeps.splitlines()
while len(lines) > 0:
line = lines.pop(0)
t, m = line.split(b':')
if m == b' deps not found':
continue
inputs = set()
while True:
i = lines.pop(0)
if i == b'':
break
assert i[:4] == b' '
inputs.add(i[4:])
deps[t] = inputs
return deps
base_dir = base_dir.encode()
def rebase_deps(deps):
rebased = dict()
cache = dict()
def rebase(path):
if path in cache:
return cache[path]
abspath = os.path.abspath(path)
newpath = path if path == abspath else os.path.relpath(
abspath, base_dir)
cache[path] = newpath
return newpath
for t, s in deps.items():
rebased[rebase(t)] = set(rebase(d) for d in s)
return rebased
deps = extract_deps(set(targets))
deps = rebase_deps(deps)
def dump(deps):
for t, d in deps.items():
if len(d) == 0:
continue
str = t.decode() + ": \\\n "
str += " \\\n ".join(sorted(map((lambda x: x.decode()), d)))
print(str)
# Collapse everything under the base target.
basedeps = set() if extra_deps is None else set(d.encode() for d in extra_deps)
for d in deps.values():
basedeps.update(d)
base_target = base_target.encode()
basedeps.discard(base_target)
dump({base_target: basedeps})
| 1.296875 | 1 |
substitute_finder/templatetags/substitute_finder_extra.py | tohugaby/pur_beurre_web | 1 | 6788 | <reponame>tohugaby/pur_beurre_web<gh_stars>1-10
"""
substitute_finder app custom templatetags module
"""
from django import template
register = template.Library()
@register.filter
def range_tag(value, min_value=0):
"""
tag that return a range
"""
if value:
return range(min_value, value)
return range(min_value)
| 1.273438 | 1 |
asv_bench/benchmarks/tslibs/period.py | CitizenB/pandas | 6 | 6916 | """
Period benchmarks that rely only on tslibs. See benchmarks.period for
Period benchmarks that rely on other parts fo pandas.
"""
from pandas import Period
from pandas.tseries.frequencies import to_offset
class PeriodProperties:
params = (
["M", "min"],
[
"year",
"month",
"day",
"hour",
"minute",
"second",
"is_leap_year",
"quarter",
"qyear",
"week",
"daysinmonth",
"dayofweek",
"dayofyear",
"start_time",
"end_time",
],
)
param_names = ["freq", "attr"]
def setup(self, freq, attr):
self.per = Period("2012-06-01", freq=freq)
def time_property(self, freq, attr):
getattr(self.per, attr)
class PeriodUnaryMethods:
params = ["M", "min"]
param_names = ["freq"]
def setup(self, freq):
self.per = Period("2012-06-01", freq=freq)
def time_to_timestamp(self, freq):
self.per.to_timestamp()
def time_now(self, freq):
self.per.now(freq)
def time_asfreq(self, freq):
self.per.asfreq("A")
class PeriodConstructor:
params = [["D"], [True, False]]
param_names = ["freq", "is_offset"]
def setup(self, freq, is_offset):
if is_offset:
self.freq = to_offset(freq)
else:
self.freq = freq
def time_period_constructor(self, freq, is_offset):
Period("2012-06-01", freq=freq)
| 2.703125 | 3 |
basic_code/networks.py | J-asy/Emotion-FAN | 275 | 7044 | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
import cv2
import pdb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def norm_angle(angle):
norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1))
return norm_angle
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
###''' self-attention; relation-attention '''
class ResNet_AT(nn.Module):
def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''):
self.inplanes = 64
self.end2end = end2end
super(ResNet_AT, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.6)
self.alpha = nn.Sequential(nn.Linear(512, 1),
nn.Sigmoid())
self.beta = nn.Sequential(nn.Linear(1024, 1),
nn.Sigmoid())
self.pred_fc1 = nn.Linear(512, 7)
self.pred_fc2 = nn.Linear(1024, 7)
self.at_type = at_type
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''):
vs = []
alphas = []
assert phrase == 'train' or phrase == 'eval'
assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred'
if phrase == 'train':
num_pair = 3
for i in range(num_pair):
f = x[:, :, :, :, i] # x[128,3,224,224]
f = self.conv1(f)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
vs.append(f)
alphas.append(self.alpha(self.dropout(f)))
vs_stack = torch.stack(vs, dim=2)
alphas_stack = torch.stack(alphas, dim=2)
if self.at_type == 'self-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
if self.at_type == 'self_relation-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
betas = []
for i in range(len(vs)):
vs[i] = torch.cat([vs[i], vm1], dim=1)
betas.append(self.beta(self.dropout(vs[i])))
cascadeVs_stack = torch.stack(vs, dim=2)
betas_stack = torch.stack(betas, dim=2)
output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2))
if self.at_type == 'self-attention':
vm1 = self.dropout(vm1)
pred_score = self.pred_fc1(vm1)
if self.at_type == 'self_relation-attention':
output = self.dropout2(output)
pred_score = self.pred_fc2(output)
return pred_score
if phrase == 'eval':
if AT_level == 'first_level':
f = self.conv1(x)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
alphas = self.alpha(self.dropout(f))
return f, alphas
if AT_level == 'second_level':
assert self.at_type == 'self_relation-attention'
vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512]
vs_cate = torch.cat([vectors, vms], dim=1)
betas = self.beta(self.dropout(vs_cate))
''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc '''
''' alpha * beta '''
weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512]
alpha_beta = alphas_from1.mul(betas)
sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1]
weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas)
weightmean_catefc = self.dropout2(weightmean_catefc)
pred_score = self.pred_fc2(weightmean_catefc)
return pred_score
if AT_level == 'pred':
if self.at_type == 'self-attention':
pred_score = self.pred_fc1(self.dropout(vm))
return pred_score
''' self-attention; relation-attention '''
def resnet18_at(pretrained=False, **kwargs):
# Constructs base a ResNet-18 model.
model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| 2.234375 | 2 |
plugin.video.saltsrd.lite/js2py/translators/jsregexps.py | TheWardoctor/wardoctors-repo | 1 | 7172 | <filename>plugin.video.saltsrd.lite/js2py/translators/jsregexps.py
from salts_lib.pyjsparser.pyjsparserdata import *
REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'}
NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ???
CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'}
CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'}
CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
def SpecialChar(char):
return {'type': 'SpecialChar',
'content': char}
def isPatternCharacter(char):
return char not in NOT_PATTERN_CHARS
class JsRegExpParser:
def __init__(self, source, flags):
self.source = source
self.flags = flags
self.index = 0
self.length = len(source)
self.lineNumber = 0
self.lineStart = 0
def parsePattern(self):
'''Perform sctring escape - for regexp literals'''
return {'type': 'Pattern',
'contents': self.parseDisjunction()}
def parseDisjunction(self):
alternatives = []
while True:
alternatives.append(self.parseAlternative())
if not self.isEOF():
self.expect_character('|')
else:
break
return {'type': 'Disjunction',
'contents': alternatives}
def isEOF(self):
if self.index>=self.length:
return True
return False
def expect_character(self, character):
if self.source[self.index]!=character:
self.throwUnexpected(character)
self.index += 1
def parseAlternative(self):
contents = []
while not self.isEOF() and self.source[self.index]!='|':
contents.append(self.parseTerm())
return {'type': 'Alternative',
'contents': contents}
def follows(self, chars):
for i, c in enumerate(chars):
if self.index+i>=self.length or self.source[self.index+i] != c:
return False
return True
def parseTerm(self):
assertion = self.parseAssertion()
if assertion:
return assertion
else:
return {'type': 'Term',
'contents': self.parseAtom()} # quantifier will go inside atom!
def parseAssertion(self):
if self.follows('$'):
content = SpecialChar('$')
self.index += 1
elif self.follows('^'):
content = SpecialChar('^')
self.index += 1
elif self.follows('\\b'):
content = SpecialChar('\\b')
self.index += 2
elif self.follows('\\B'):
content = SpecialChar('\\B')
self.index += 2
elif self.follows('(?='):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': False}
elif self.follows('(?!'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': True}
else:
return None
return {'type': 'Assertion',
'content': content}
def parseAtom(self):
if self.follows('.'):
content = SpecialChar('.')
self.index += 1
elif self.follows('\\'):
self.index += 1
content = self.parseAtomEscape()
elif self.follows('['):
content = self.parseCharacterClass()
elif self.follows('(?:'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif self.follows('('):
self.index += 1
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif isPatternCharacter(self.source[self.index]):
content = self.source[self.index]
self.index += 1
else:
return None
quantifier = self.parseQuantifier()
return {'type': 'Atom',
'content': content,
'quantifier': quantifier}
def parseQuantifier(self):
prefix = self.parseQuantifierPrefix()
if not prefix:
return None
greedy = True
if self.follows('?'):
self.index += 1
greedy = False
return {'type': 'Quantifier',
'contents': prefix,
'greedy': greedy}
def parseQuantifierPrefix(self):
if self.isEOF():
return None
if self.follows('+'):
content = '+'
self.index += 1
elif self.follows('?'):
content = '?'
self.index += 1
elif self.follows('*'):
content = '*'
self.index += 1
elif self.follows('{'): # try matching otherwise return None and restore the state
i = self.index
self.index += 1
digs1 = self.scanDecimalDigs()
# if no minimal number of digs provided then return no quantifier
if not digs1:
self.index = i
return None
# scan char limit if provided
if self.follows(','):
self.index += 1
digs2 = self.scanDecimalDigs()
else:
digs2 = ''
# must be valid!
if not self.follows('}'):
self.index = i
return None
else:
self.expect_character('}')
content = int(digs1), int(digs2) if digs2 else None
else:
return None
return content
def parseAtomEscape(self):
ch = self.source[self.index]
if isDecimalDigit(ch) and ch!=0:
digs = self.scanDecimalDigs()
elif ch in CHAR_CLASS_ESCAPE:
self.index += 1
return SpecialChar('\\' + ch)
else:
return self.parseCharacterEscape()
def parseCharacterEscape(self):
ch = self.source[self.index]
if ch in CONTROL_ESCAPE_CHARS:
return SpecialChar('\\' + ch)
if ch=='c':
'ok, fuck this shit.'
def scanDecimalDigs(self):
s = self.index
while not self.isEOF() and isDecimalDigit(self.source[self.index]):
self.index += 1
return self.source[s:self.index]
a = JsRegExpParser('a(?=x)', '')
print(a.parsePattern()) | 1.679688 | 2 |
src/py_scripts/fc_phasing.py | pb-jchin/FALCON_unzip | 2 | 7300 | <filename>src/py_scripts/fc_phasing.py
from pypeflow.common import *
from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn
from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase
from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow
from falcon_kit.FastaReader import FastaReader
import subprocess, shlex
import os, re
cigar_re = r"(\d+)([MIDNSHP=X])"
def make_het_call(self):
bam_fn = fn(self.bam_file)
ctg_id = self.parameters["ctg_id"]
ref_seq = self.parameters["ref_seq"]
base_dir = self.parameters["base_dir"]
vmap_fn = fn(self.vmap_file)
vpos_fn = fn(self.vpos_file)
q_id_map_fn = fn(self.q_id_map_file)
p = subprocess.Popen(shlex.split("samtools view %s %s" % (bam_fn, ctg_id) ), stdout=subprocess.PIPE)
pileup = {}
q_id_map = {}
q_max_id = 0
q_id = 0
q_name_to_id = {}
try:
os.makedirs("%s/%s" % (base_dir, ctg_id))
except OSError:
pass
vmap = open(vmap_fn, "w")
vpos = open(vpos_fn, "w")
for l in p.stdout:
l = l.strip().split()
if l[0][0] == "@":
continue
QNAME = l[0]
if QNAME not in q_name_to_id:
q_id = q_max_id
q_name_to_id[QNAME] = q_id
q_max_id += 1
q_id = q_name_to_id[QNAME]
q_id_map[q_id] = QNAME
FLAG = int(l[1])
RNAME = l[2]
POS = int(l[3]) - 1 # convert to zero base
CIGAR = l[5]
SEQ = l[9]
rp = POS
qp = 0
skip_base = 0
total_aln_pos = 0
for m in re.finditer(cigar_re, CIGAR):
adv = int(m.group(1))
total_aln_pos += adv
if m.group(2) == "S":
skip_base += adv
if 1.0 - 1.0 * skip_base / total_aln_pos < 0.1:
continue
if total_aln_pos < 2000:
continue
for m in re.finditer(cigar_re, CIGAR):
adv = int(m.group(1))
if m.group(2) == "S":
qp += adv
if m.group(2) == "M":
matches = []
for i in range(adv):
matches.append( (rp, SEQ[qp]) )
rp += 1
qp += 1
matches = matches[1:-1]
for pos, b in matches:
pileup.setdefault(pos, {})
pileup[pos].setdefault(b, [])
pileup[pos][b].append(q_id)
elif m.group(2) == "I":
for i in range(adv):
qp += 1
elif m.group(2) == "D":
for i in range(adv):
rp += 1
pos_k = pileup.keys()
pos_k.sort()
th = 0.25
for pos in pos_k:
if pos < POS:
if len(pileup[pos]) < 2:
del pileup[pos]
continue
base_count = []
total_count = 0
for b in ["A", "C", "G", "T"]:
count = len(pileup[pos].get(b,[]))
base_count.append( (count, b) )
total_count += count
if total_count < 10:
del pileup[pos]
continue
base_count.sort()
base_count.reverse()
p0 = 1.0 * base_count[0][0] / total_count
p1 = 1.0 * base_count[1][0] / total_count
if p0 < 1.0 - th and p1 > th:
b0 = base_count[0][1]
b1 = base_count[1][1]
ref_base = ref_seq[pos]
print >> vpos, pos+1, ref_base, total_count, " ".join(["%s %d" % (x[1], x[0]) for x in base_count])
for q_id_ in pileup[pos][b0]:
print >> vmap, pos+1, ref_base, b0, q_id_
for q_id_ in pileup[pos][b1]:
print >> vmap, pos+1, ref_base, b1, q_id_
del pileup[pos]
q_id_map_f = open(q_id_map_fn, "w")
for q_id, q_name in q_id_map.items():
print >> q_id_map_f, q_id, q_name
def generate_association_table(self):
vmap_fn = fn(self.vmap_file)
atable_fn = fn(self.atable_file)
ctg_id = self.parameters["ctg_id"]
base_dir = self.parameters["base_dir"]
vmap = {}
v_positions = []
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
pos = int(l[0])
ref_b = l[1]
v_b = l[2]
q_id = int(l[3])
if (pos, ref_b) not in vmap:
v_positions.append( (pos, ref_b) )
vmap.setdefault( (pos, ref_b), {} )
vmap[ (pos, ref_b) ].setdefault(v_b, [])
vmap[ (pos, ref_b) ][v_b].append( q_id )
#xary = []
#yary = []
with open(atable_fn, "w") as out_f:
for i1 in xrange(len(v_positions)):
link_count = 0
for i2 in xrange(i1+1, len(v_positions)):
pos1, rb1 = v_positions[i1]
pos2, rb2 = v_positions[i2]
if pos2 - pos1 > (1 << 16):
continue
ct = {}
p1table = []
p2table = []
s1 = 0
list1 = vmap[ (pos1, rb1) ].items()
for b1, qids1 in list1:
p1table.append( (b1, len(qids1) ) )
s1 += len(qids1)
s2 = 0
list2 = vmap[ (pos2, rb2) ].items()
for b2, qids2 in list2:
p2table.append( (b2, len(qids2) ) )
s2 += len(qids2)
total_s = 0
for b1, qids1 in list1:
for b2, qids2 in list2:
s = len(set(qids1) & set(qids2))
ct[(b1,b2)] = s
total_s += s
if total_s < 6:
continue
b11 = p1table[0][0]
b12 = p1table[1][0]
b21 = p2table[0][0]
b22 = p2table[1][0]
print >> out_f, pos1, b11, b12, pos2, b21, b22, ct[(b11,b21)], ct[(b11,b22)], ct[(b12,b21)], ct[(b12,b22)]
#xary.append(pos1)
#yary.append(pos2)
link_count += 1
if link_count > 500:
break
def get_score( c_score, pos1, pos2, s1, s2 ):
if pos1 > pos2:
pos1, pos2 = pos2, pos1
s1, s2 = s2, s1
b11, b12 = s1
b21, b22 = s2
return c_score[ (pos1, pos2) ][ (b11+b21, b12+b22) ]
def get_phased_blocks(self):
vmap_fn = fn(self.vmap_file)
atable_fn = fn(self.atable_file)
p_variant_fn = fn(self.phased_variant_file)
left_connect = {}
right_connect = {}
c_score = {}
states = {}
positions = set()
ref_base = {}
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
pos = int(l[0])
ref_b = l[1]
v_b = l[2]
q_id = int(l[3])
ref_base[pos] = ref_b
with open(atable_fn) as f:
for l in f:
l = l.strip().split()
pos1, b11, b12, pos2, b21, b22, s11, s12, s21, s22 = l
s11, s12, s21, s22 = int(s11), int(s12), int(s21), int(s22)
if abs(s11+s22-s12-s21) < 6:
continue
pos1 = int(pos1)
pos2 = int(pos2)
positions.add(pos1)
positions.add(pos2)
right_connect.setdefault(pos1, [])
right_connect[pos1].append(pos2)
left_connect.setdefault(pos2, [])
left_connect[pos2].append(pos1)
c_score[ (pos1, pos2) ] = { (b11+b21, b12+b22): s11 + s22, (b12+b22, b11+b21): s11 + s22,
(b12+b21, b11+b22): s12 + s21, (b11+b22, b12+b21): s12 + s21 }
if pos1 not in states:
st1 = (b11, b12)
st2 = (b12, b11)
score1 = 0
score2 = 0
for pp in left_connect.get(pos1,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pp, pos1, st0, st1 )
score2 += get_score( c_score, pp, pos1, st0, st2 )
for pp in right_connect.get(pos1,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pos1, pp, st1, st0 )
score2 += get_score( c_score, pos1, pp, st2, st0 )
if score1 >= score2:
states[pos1] = st1
else:
states[pos1] = st2
if pos2 not in states:
st1 = (b21, b22)
st2 = (b22, b21)
score1 = 0
score2 = 0
for pp in left_connect.get(pos2,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pp, pos2, st0, st1 )
score2 += get_score( c_score, pp, pos2, st0, st2 )
for pp in right_connect.get(pos2,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pos2, pp, st1, st0 )
score2 += get_score( c_score, pos2, pp, st2, st0 )
if score1 >= score2:
states[pos2] = st1
else:
states[pos2] = st2
positions = list(positions)
positions.sort()
iter_count = 0
while 1:
iter_count += 1
if iter_count > 10:
break
update_count = 0
for p in positions:
b1, b2 = states[p]
st1 = (b1, b2)
st2 = (b2, b1)
score1 = 0
score2 = 0
for pp in left_connect.get(p,[]):
st0 = states[pp]
score1 += get_score( c_score, pp, p, st0 ,st1)
score2 += get_score( c_score, pp, p, st0, st2)
#for pp in right_connect.get(p,[]):
# st0 = states[pp]
# score1 += get_score( c_score, p, pp, st1 ,st0)
# score2 += get_score( c_score, p, pp, st2, st0)
if score1 >= score2:
states[p] = st1
else:
states[p] = st2
update_count += 1
if update_count == 0:
break
right_extent = {}
right_score = {}
left_extent = {}
left_score = {}
for p in positions:
left_extent[p] = p
left_score[p] = 0
if p in left_connect:
left = p
st0 = states[p]
st0_ = st0[1], st0[0]
for pp in left_connect[p]:
st1 = states[pp]
s = get_score( c_score, pp, p, st1, st0)
s_ = get_score( c_score, pp, p, st1, st0_)
left_score[p] += s - s_
if s - s_ > 0 and pp < left:
left = pp
left_extent[p] = left
right_extent[p] = p
right_score[p] = 0
if p in right_connect:
right = p
st0 = states[p]
st0_ = st0[1], st0[0]
for pp in right_connect[p]:
st1 = states[pp]
s = get_score( c_score, p, pp, st0, st1)
s_ = get_score( c_score, p, pp, st0_, st1)
right_score[p] += s - s_
if s - s_ > 0 and pp > right:
right = pp
right_extent[p] = right
phase_block_id = 1
phase_blocks = {}
pb = []
max_right_ext = 0
for p in positions:
if right_score[p] < 10 or left_score[p] < 10:
continue
b1, b2 = states[p]
if max_right_ext < left_extent[p]:
if len(pb) > 3:
phase_blocks[phase_block_id] = pb
phase_block_id += 1
pb = []
pb.append( (p, b1, b2) )
if right_extent[p] > max_right_ext:
max_right_ext = right_extent[p]
if len(pb) > 3:
phase_blocks[phase_block_id] = pb
else:
phase_block_id -= 1
with open(p_variant_fn, "w") as out_f:
for pid in xrange(1, phase_block_id+1):
if len(phase_blocks[pid]) == 0:
continue
min_ = min( [x[0] for x in phase_blocks[pid]] )
max_ = max( [x[0] for x in phase_blocks[pid]] )
print >>out_f, "P", pid, min_, max_, max_ - min_, len(phase_blocks[pid]), 1.0 * (max_-min_)/len(phase_blocks[pid])
for p, b1, b2 in phase_blocks[pid]:
rb = ref_base[p]
print >>out_f, "V", pid, p, "%d_%s_%s" % (p,rb,b1), "%d_%s_%s" % (p,rb,b2), left_extent[p], right_extent[p], left_score[p], right_score[p]
def get_phased_reads(self):
q_id_map_fn = fn(self.q_id_map_file)
vmap_fn = fn(self.vmap_file)
p_variant_fn = fn(self.phased_variant_file)
ctg_id = parameters["ctg_id"]
phased_read_fn = fn(self.phased_read_file)
rid_map = {}
with open(q_id_map_fn) as f:
for l in f:
l = l.strip().split()
rid_map[int(l[0])] = l[1]
read_to_variants = {}
variant_to_reads = {}
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
variant = "_".join(l[:3])
read_id = int(l[3])
read_to_variants.setdefault(read_id, set())
read_to_variants[read_id].add(variant)
variant_to_reads.setdefault(variant, set())
variant_to_reads[variant].add(read_id)
variant_to_phase = {}
with open(p_variant_fn) as f:
for l in f:
"""line format example: V 1 6854 6854_A_A 6854_A_G 6854 22781"""
l = l.strip().split()
if l[0] != "V":
continue
pb_id = int(l[1])
variant_to_phase[ l[3] ] = (pb_id, 0)
variant_to_phase[ l[4] ] = (pb_id, 1)
with open(phased_read_fn, "w") as out_f:
for r in read_to_variants:
vl = {}
pl = set()
for v in list( read_to_variants[r] ):
if v in variant_to_phase:
p = variant_to_phase[v]
vl[ p ] = vl.get(p, 0) + 1
pl.add(p[0])
pl = list(pl)
pl.sort()
for p in pl:
if vl.get( (p,0), 0) - vl.get( (p,1), 0) > 1:
print >> out_f, r, ctg_id, p, 0, vl.get( (p,0), 0), vl.get( (p,1), 0), rid_map[r]
elif vl.get( (p,1), 0) - vl.get( (p,0), 0) > 1:
print >> out_f, r, ctg_id, p, 1, vl.get( (p,0), 0), vl.get( (p,1), 0), rid_map[r]
if __name__ == "__main__":
import argparse
import re
parser = argparse.ArgumentParser(description='phasing variants and reads from a bam file')
# we can run this in parallel mode in the furture
#parser.add_argument('--n_core', type=int, default=4,
# help='number of processes used for generating consensus')
parser.add_argument('--bam', type=str, help='path to sorted bam file', required=True)
parser.add_argument('--fasta', type=str, help='path to the fasta file of contain the contig', required=True)
parser.add_argument('--ctg_id', type=str, help='contig identifier in the bam file', required=True)
parser.add_argument('--base_dir', type=str, default="./", help='the output base_dir, default to current working directory')
args = parser.parse_args()
bam_fn = args.bam
fasta_fn = args.fasta
ctg_id = args.ctg_id
base_dir = args.base_dir
ref_seq = ""
for r in FastaReader(fasta_fn):
rid = r.name.split()[0]
if rid != ctg_id:
continue
ref_seq = r.sequence.upper()
PypeThreadWorkflow.setNumThreadAllowed(1, 1)
wf = PypeThreadWorkflow()
bam_file = makePypeLocalFile(bam_fn)
vmap_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_map") )
vpos_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_pos") )
q_id_map_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "q_id_map") )
parameters = {}
parameters["ctg_id"] = ctg_id
parameters["ref_seq"] = ref_seq
parameters["base_dir"] = base_dir
make_het_call_task = PypeTask( inputs = { "bam_file": bam_file },
outputs = { "vmap_file": vmap_file, "vpos_file": vpos_file, "q_id_map_file": q_id_map_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/het_call") (make_het_call)
wf.addTasks([make_het_call_task])
atable_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "atable") )
parameters = {}
parameters["ctg_id"] = ctg_id
parameters["base_dir"] = base_dir
generate_association_table_task = PypeTask( inputs = { "vmap_file": vmap_file },
outputs = { "atable_file": atable_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/g_atable") (generate_association_table)
wf.addTasks([generate_association_table_task])
phased_variant_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_variants") )
get_phased_blocks_task = PypeTask( inputs = { "vmap_file": vmap_file, "atable_file": atable_file },
outputs = { "phased_variant_file": phased_variant_file },
TaskType = PypeThreadTaskBase,
URL = "task://localhost/get_phased_blocks") (get_phased_blocks)
wf.addTasks([get_phased_blocks_task])
phased_read_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_reads") )
get_phased_reads_task = PypeTask( inputs = { "vmap_file": vmap_file,
"q_id_map_file": q_id_map_file,
"phased_variant_file": phased_variant_file },
outputs = { "phased_read_file": phased_read_file },
parameters = {"ctg_id": ctg_id},
TaskType = PypeThreadTaskBase,
URL = "task://localhost/get_phased_reads") (get_phased_reads)
wf.addTasks([get_phased_reads_task])
wf.refreshTargets()
#with open("fc_phasing_wf.dot", "w") as f:
# print >>f, wf.graphvizDot
| 1.539063 | 2 |
src/sage/combinat/combinatorial_map.py | UCD4IDS/sage | 0 | 7428 | <gh_stars>0
"""
Combinatorial maps
This module provides a decorator that can be used to add semantic to a
Python method by marking it as implementing a *combinatorial map*,
that is a map between two :class:`enumerated sets <EnumeratedSets>`::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
By default, this decorator is a no-op: it returns the decorated method
as is::
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
See :func:`combinatorial_map_wrapper` for the various options this
decorator can take.
Projects built on top of Sage are welcome to customize locally this
hook to instrument the Sage code and exploit this semantic
information. Typically, the decorator could be used to populate a
database of maps. For a real-life application, see the project
`FindStat <http://findstat.org/>`. As a basic example, a variant of
the decorator is provided as :func:`combinatorial_map_wrapper`; it
wraps the decorated method, so that one can later use
:func:`combinatorial_maps_in_class` to query an object, or class
thereof, for all the combinatorial maps that apply to it.
.. NOTE::
Since decorators are evaluated upon loading Python modules,
customizing :obj:`combinatorial map` needs to be done before the
modules using it are loaded. In the examples below, where we
illustrate the customized ``combinatorial_map`` decorator on the
:mod:`sage.combinat.permutation` module, we resort to force a
reload of this module after dynamically changing
``sage.combinat.combinatorial_map.combinatorial_map``. This is
good enough for those doctests, but remains fragile.
For real use cases, it is probably best to just edit this source
file statically (see below).
"""
# ****************************************************************************
# Copyright (C) 2011 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
def combinatorial_map_trivial(f=None, order=None, name=None):
r"""
Combinatorial map decorator
See :ref:`sage.combinat.combinatorial_map` for a description of
this decorator and its purpose. This default implementation does
nothing.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- ``f`` unchanged
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_trivial as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: # ... code ...
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: # ... code ...
sage: MyPermutation.reverse
<function MyPermutation.reverse at ...>
sage: MyPermutation.descent_set
<function MyPermutation.descent_set at ...>
"""
if f is None:
return lambda f: f
else:
return f
def combinatorial_map_wrapper(f=None, order=None, name=None):
r"""
Combinatorial map decorator (basic example).
See :ref:`sage.combinat.combinatorial_map` for a description of
the ``combinatorial_map`` decorator and its purpose. This
implementation, together with :func:`combinatorial_maps_in_class`
illustrates how to use this decorator as a hook to instrument the
Sage code.
INPUT:
- ``f`` -- (default: ``None``, if combinatorial_map is used as a decorator) a function
- ``name`` -- (default: ``None``) the name for nicer outputs on combinatorial maps
- ``order`` -- (default: ``None``) the order of the combinatorial map, if it is known. Is not used, but might be helpful later
OUTPUT:
- A combinatorial map. This is an instance of the :class:`CombinatorialMap`.
EXAMPLES:
We define a class illustrating the use of this implementation of
the :obj:`combinatorial_map` decorator with its various arguments::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: class MyPermutation(object):
....: @combinatorial_map()
....: def reverse(self):
....: '''
....: Reverse the permutation
....: '''
....: pass
....: @combinatorial_map(order=2)
....: def inverse(self):
....: '''
....: The inverse of the permutation
....: '''
....: pass
....: @combinatorial_map(name='descent set of permutation')
....: def descent_set(self):
....: '''
....: The descent set of the permutation
....: '''
....: pass
....: def major_index(self):
....: '''
....: The major index of the permutation
....: '''
....: pass
sage: MyPermutation.reverse
Combinatorial map: reverse
sage: MyPermutation.descent_set
Combinatorial map: descent set of permutation
sage: MyPermutation.inverse
Combinatorial map: inverse
One can now determine all the combinatorial maps associated with a
given object as follows::
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: X = combinatorial_maps_in_class(MyPermutation); X # random
[Combinatorial map: reverse,
Combinatorial map: descent set of permutation,
Combinatorial map: inverse]
The method ``major_index`` defined about is not a combinatorial map::
sage: MyPermutation.major_index
<function MyPermutation.major_index at ...>
But one can define a function that turns ``major_index`` into a combinatorial map::
sage: def major_index(p):
....: return p.major_index()
sage: major_index
<function major_index at ...>
sage: combinatorial_map(major_index)
Combinatorial map: major_index
"""
if f is None:
return lambda f: CombinatorialMap(f, order=order, name=name)
else:
return CombinatorialMap(f, order=order, name=name)
##############################################################################
# Edit here to customize the combinatorial_map hook
##############################################################################
combinatorial_map = combinatorial_map_trivial
# combinatorial_map = combinatorial_map_wrapper
class CombinatorialMap(object):
r"""
This is a wrapper class for methods that are *combinatorial maps*.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
"""
def __init__(self, f, order=None, name=None):
"""
Constructor for combinatorial maps.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map_wrapper as combinatorial_map
sage: def f(x):
....: "doc of f"
....: return x
sage: x = combinatorial_map(f); x
Combinatorial map: f
sage: x.__doc__
'doc of f'
sage: x.__name__
'f'
sage: x.__module__
'__main__'
"""
import types
if not isinstance(f, types.FunctionType):
raise ValueError("Only plain functions are supported")
self._f = f
self._order = order
self._name = name
if hasattr(f, "__doc__"):
self.__doc__ = f.__doc__
if hasattr(f, "__name__"):
self.__name__ = f.__name__
else:
self.__name__ = "..."
if hasattr(f, "__module__"):
self.__module__ = f.__module__
def __repr__(self):
"""
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau.__repr__()
'Combinatorial map: Robinson-Schensted insertion tableau'
"""
return "Combinatorial map: %s" % self.name()
def _sage_src_lines_(self):
r"""
Return the source code location for the wrapped function.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = p.left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: (src, lines) = cm._sage_src_lines_()
sage: src[0]
" @combinatorial_map(name='Robinson-Schensted insertion tableau')\n"
sage: lines # random
2653
"""
from sage.misc.sageinspect import sage_getsourcelines
return sage_getsourcelines(self._f)
def __get__(self, inst, cls=None):
"""
Bounds the method of self to the given instance.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: p.left_tableau #indirect doctest
Combinatorial map: Robinson-Schensted insertion tableau
"""
self._inst = inst
return self
def __call__(self, *args, **kwds):
"""
Calls the combinatorial map.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: p = Permutation([1,3,2,4])
sage: cm = type(p).left_tableau; cm
Combinatorial map: Robinson-Schensted insertion tableau
sage: cm(p)
[[1, 2, 4], [3]]
sage: cm(Permutation([4,3,2,1]))
[[1], [2], [3], [4]]
"""
if self._inst is not None:
return self._f(self._inst, *args, **kwds)
else:
return self._f(*args, **kwds)
def unbounded_map(self):
r"""
Return the unbounded version of ``self``.
You can use this method to return a function which takes as input
an element in the domain of the combinatorial map.
See the example below.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.permutation import Permutation
sage: pi = Permutation([1,3,2])
sage: f = pi.reverse
sage: F = f.unbounded_map()
sage: F(pi)
[2, 3, 1]
"""
return self._f
def order(self):
"""
Returns the order of ``self``, or ``None`` if the order is not known.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(order=2)
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.order()
2
sage: CombinatorialClass.to_self_2.order() is None
True
"""
return self._order
def name(self):
"""
Returns the name of a combinatorial map.
This is used for the string representation of ``self``.
EXAMPLES::
sage: from sage.combinat.combinatorial_map import combinatorial_map
sage: class CombinatorialClass:
....: @combinatorial_map(name='map1')
....: def to_self_1(): pass
....: @combinatorial_map()
....: def to_self_2(): pass
sage: CombinatorialClass.to_self_1.name()
'map1'
sage: CombinatorialClass.to_self_2.name()
'to_self_2'
"""
if self._name is not None:
return self._name
else:
return self._f.__name__
def combinatorial_maps_in_class(cls):
"""
Return the combinatorial maps of the class as a list of combinatorial maps.
For further details and doctests, see
:ref:`sage.combinat.combinatorial_map` and
:func:`combinatorial_map_wrapper`.
EXAMPLES::
sage: sage.combinat.combinatorial_map.combinatorial_map = sage.combinat.combinatorial_map.combinatorial_map_wrapper
sage: from importlib import reload
sage: _ = reload(sage.combinat.permutation)
sage: from sage.combinat.combinatorial_map import combinatorial_maps_in_class
sage: p = Permutation([1,3,2,4])
sage: cmaps = combinatorial_maps_in_class(p)
sage: cmaps # random
[Combinatorial map: Robinson-Schensted insertion tableau,
Combinatorial map: Robinson-Schensted recording tableau,
Combinatorial map: Robinson-Schensted tableau shape,
Combinatorial map: complement,
Combinatorial map: descent composition,
Combinatorial map: inverse, ...]
sage: p.left_tableau in cmaps
True
sage: p.right_tableau in cmaps
True
sage: p.complement in cmaps
True
"""
result = set()
for method in dir(cls):
entry = getattr(cls, method)
if isinstance(entry, CombinatorialMap):
result.add(entry)
return list(result)
| 2.5 | 2 |
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py | samn/opencensus-python | 0 | 7556 | <filename>contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django middleware helper to capture and trace a request."""
import logging
from opencensus.ext.django.config import (settings, convert_to_import)
from opencensus.trace import attributes_helper
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
from opencensus.trace import tracer as tracer_module
from opencensus.trace import utils
from opencensus.trace.samplers import probability
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # pragma: NO COVER
MiddlewareMixin = object
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
REQUEST_THREAD_LOCAL_KEY = 'django_request'
SPAN_THREAD_LOCAL_KEY = 'django_span'
BLACKLIST_PATHS = 'BLACKLIST_PATHS'
GCP_EXPORTER_PROJECT = 'GCP_EXPORTER_PROJECT'
SAMPLING_RATE = 'SAMPLING_RATE'
TRANSPORT = 'TRANSPORT'
SERVICE_NAME = 'SERVICE_NAME'
ZIPKIN_EXPORTER_SERVICE_NAME = 'ZIPKIN_EXPORTER_SERVICE_NAME'
ZIPKIN_EXPORTER_HOST_NAME = 'ZIPKIN_EXPORTER_HOST_NAME'
ZIPKIN_EXPORTER_PORT = 'ZIPKIN_EXPORTER_PORT'
ZIPKIN_EXPORTER_PROTOCOL = 'ZIPKIN_EXPORTER_PROTOCOL'
JAEGER_EXPORTER_HOST_NAME = 'JAEGER_EXPORTER_HOST_NAME'
JAEGER_EXPORTER_PORT = 'JAEGER_EXPORTER_PORT'
JAEGER_EXPORTER_AGENT_HOST_NAME = 'JAEGER_EXPORTER_AGENT_HOST_NAME'
JAEGER_EXPORTER_AGENT_PORT = 'JAEGER_EXPORTER_AGENT_PORT'
JAEGER_EXPORTER_SERVICE_NAME = 'JAEGER_EXPORTER_SERVICE_NAME'
OCAGENT_TRACE_EXPORTER_ENDPOINT = 'OCAGENT_TRACE_EXPORTER_ENDPOINT'
BLACKLIST_HOSTNAMES = 'BLACKLIST_HOSTNAMES'
log = logging.getLogger(__name__)
class _DjangoMetaWrapper(object):
"""
Wrapper class which takes HTTP header name and retrieve the value from
Django request.META
"""
def __init__(self, meta=None):
self.meta = meta or _get_django_request().META
def get(self, key):
return self.meta.get('HTTP_' + key.upper().replace('-', '_'))
def _get_django_request():
"""Get Django request from thread local.
:rtype: str
:returns: Django request.
"""
return execution_context.get_opencensus_attr(REQUEST_THREAD_LOCAL_KEY)
def _get_django_span():
"""Get Django span from thread local.
:rtype: str
:returns: Django request.
"""
return execution_context.get_opencensus_attr(SPAN_THREAD_LOCAL_KEY)
def _get_current_tracer():
"""Get the current request tracer."""
return execution_context.get_opencensus_tracer()
def _set_django_attributes(span, request):
"""Set the django related attributes."""
django_user = getattr(request, 'user', None)
if django_user is None:
return
user_id = django_user.pk
try:
user_name = django_user.get_username()
except AttributeError:
# AnonymousUser in some older versions of Django doesn't implement
# get_username
return
# User id is the django autofield for User model as the primary key
if user_id is not None:
span.add_attribute('django.user.id', str(user_id))
if user_name is not None:
span.add_attribute('django.user.name', str(user_name))
class OpencensusMiddleware(MiddlewareMixin):
"""Saves the request in thread local"""
def __init__(self, get_response=None):
# One-time configuration and initialization.
self.get_response = get_response
self._sampler = settings.SAMPLER
self._exporter = settings.EXPORTER
self._propagator = settings.PROPAGATOR
self._blacklist_paths = settings.params.get(BLACKLIST_PATHS)
# Initialize the sampler
if self._sampler.__name__ == 'ProbabilitySampler':
_rate = settings.params.get(
SAMPLING_RATE, probability.DEFAULT_SAMPLING_RATE)
self.sampler = self._sampler(_rate)
else:
self.sampler = self._sampler()
# Initialize the exporter
transport = convert_to_import(settings.params.get(TRANSPORT))
if self._exporter.__name__ == 'GoogleCloudExporter':
_project_id = settings.params.get(GCP_EXPORTER_PROJECT, None)
self.exporter = self._exporter(
project_id=_project_id,
transport=transport)
elif self._exporter.__name__ == 'ZipkinExporter':
_service_name = self._get_service_name(settings.params)
_zipkin_host_name = settings.params.get(
ZIPKIN_EXPORTER_HOST_NAME, 'localhost')
_zipkin_port = settings.params.get(
ZIPKIN_EXPORTER_PORT, 9411)
_zipkin_protocol = settings.params.get(
ZIPKIN_EXPORTER_PROTOCOL, 'http')
self.exporter = self._exporter(
service_name=_service_name,
host_name=_zipkin_host_name,
port=_zipkin_port,
protocol=_zipkin_protocol,
transport=transport)
elif self._exporter.__name__ == 'TraceExporter':
_service_name = self._get_service_name(settings.params)
_endpoint = settings.params.get(
OCAGENT_TRACE_EXPORTER_ENDPOINT, None)
self.exporter = self._exporter(
service_name=_service_name,
endpoint=_endpoint,
transport=transport)
elif self._exporter.__name__ == 'JaegerExporter':
_service_name = settings.params.get(
JAEGER_EXPORTER_SERVICE_NAME,
self._get_service_name(settings.params))
_jaeger_host_name = settings.params.get(
JAEGER_EXPORTER_HOST_NAME, None)
_jaeger_port = settings.params.get(
JAEGER_EXPORTER_PORT, None)
_jaeger_agent_host_name = settings.params.get(
JAEGER_EXPORTER_AGENT_HOST_NAME, 'localhost')
_jaeger_agent_port = settings.params.get(
JAEGER_EXPORTER_AGENT_PORT, 6831)
self.exporter = self._exporter(
service_name=_service_name,
host_name=_jaeger_host_name,
port=_jaeger_port,
agent_host_name=_jaeger_agent_host_name,
agent_port=_jaeger_agent_port,
transport=transport)
else:
self.exporter = self._exporter(transport=transport)
self.blacklist_hostnames = settings.params.get(
BLACKLIST_HOSTNAMES, None)
# Initialize the propagator
self.propagator = self._propagator()
def process_request(self, request):
"""Called on each request, before Django decides which view to execute.
:type request: :class:`~django.http.request.HttpRequest`
:param request: Django http request.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
# Add the request to thread local
execution_context.set_opencensus_attr(
REQUEST_THREAD_LOCAL_KEY,
request)
execution_context.set_opencensus_attr(
'blacklist_hostnames',
self.blacklist_hostnames)
try:
# Start tracing this request
span_context = self.propagator.from_headers(
_DjangoMetaWrapper(_get_django_request().META))
# Reload the tracer with the new span context
tracer = tracer_module.Tracer(
span_context=span_context,
sampler=self.sampler,
exporter=self.exporter,
propagator=self.propagator)
# Span name is being set at process_view
span = tracer.start_span()
span.span_kind = span_module.SpanKind.SERVER
tracer.add_attribute_to_current_span(
attribute_key=HTTP_METHOD,
attribute_value=request.method)
tracer.add_attribute_to_current_span(
attribute_key=HTTP_URL,
attribute_value=str(request.path))
# Add the span to thread local
# in some cases (exceptions, timeouts) currentspan in
# response event will be one of a child spans.
# let's keep reference to 'django' span and
# use it in response event
execution_context.set_opencensus_attr(
SPAN_THREAD_LOCAL_KEY,
span)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_view(self, request, view_func, *args, **kwargs):
"""Process view is executed before the view function, here we get the
function name add set it as the span name.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
# Get the current span and set the span name to the current
# function name of the request.
tracer = _get_current_tracer()
span = tracer.current_span()
span.name = utils.get_func_name(view_func)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_response(self, request, response):
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return response
try:
span = _get_django_span()
span.add_attribute(
attribute_key=HTTP_STATUS_CODE,
attribute_value=str(response.status_code))
_set_django_attributes(span, request)
tracer = _get_current_tracer()
tracer.end_span()
tracer.finish()
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
finally:
return response
def _get_service_name(self, params):
_service_name = params.get(
SERVICE_NAME, None)
if _service_name is None:
_service_name = params.get(
ZIPKIN_EXPORTER_SERVICE_NAME, 'my_service')
return _service_name
| 1.117188 | 1 |
.virtual_documents/00_core.ipynb.py | AtomScott/image_folder_datasets | 0 | 7684 | <gh_stars>0
# default_exp core
#hide
from nbdev.showdoc import *
from fastcore.test import *
# export
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import warnings
import torchvision
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize
import pytorch_lightning as pl
# from pytorch_lightning.metrics.functional import classification, f1
from pytorch_lightning.loggers import TensorBoardLogger
import fastai.vision.augment
import fastai.vision.data
# from fastai.vision.data import ImageDataLoaders
# from fastai.vision.augment import Resize
#export
class ImageFolderDataModule(pl.LightningDataModule):
def __init__(self, data_dir, batch_size, transform):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.transform = transform
# Compose([
# Resize(256, interpolation=2),
# CenterCrop(224),
# ToTensor(),
# # TODO: check whether normalize is the same for imagenet and fractalDB
# Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
def prepare_data(self, stage=None):
pass
def setup(self, stage=None):
data_dir = self.data_dir
transform = self.transform
self.dls = fastai.vision.data.ImageDataLoaders.from_folder(data_dir, item_tfms=fastai.vision.augment.Resize(224))
self.trainset = ImageFolder(os.path.join(data_dir, 'train'), transform)
self.valset = ImageFolder(os.path.join(data_dir, 'valid'), transform)
def train_dataloader(self):
return DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.valset, batch_size=self.batch_size, shuffle=False)
def test_dataloader(self):
pass
data_dir = 'Datasets/cifar10'
transform = Compose([
Resize(256, interpolation=2),
CenterCrop(224),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
dm = ImageFolderDataModule(data_dir, 128, transform)
dm.setup()
for x,y in dm.train_dataloader():
test_eq(type(x), torch.Tensor)
test_eq(type(y), torch.Tensor)
break
#export
class CNNModule(pl.LightningModule):
def __init__(self, model=None, pretrained=False, freeze_extractor=False, log_level=10, num_classes=None, weight_path=None):
super().__init__()
self.num_classes = num_classes
self.pretrained = pretrained
self.freeze_extractor = freeze_extractor
assert model is not None, 'Select model from torchvision'
assert num_classes is not None, 'Must configure number of classes with num_classes'
if not model.startswith('resnet'):
warnings.warn('models other than resnet variants may need different setup for finetuning to work.')
# Prepare model for finetuning
if weight_path is not None:
param = torch.load(weight_path)
backbone = eval(f'torchvision.models.{model}(pretrained={False})')
backbone.load_state_dict(param)
else:
backbone = eval(f'torchvision.models.{model}(pretrained={pretrained})')
num_filters = backbone.fc.in_features
layers = list(backbone.children())[:-1]
self.feature_extractor = torch.nn.Sequential(*layers)
self.classifier = nn.Linear(num_filters, num_classes)
def forward(self, x):
if self.freeze_extractor:
self.feature_extractor.eval()
with torch.no_grad():
representations = self.feature_extractor(x).flatten(1)
else:
representations = self.feature_extractor(x).flatten(1)
y = self.classifier(representations)
return y
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def training_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/train", val, self.current_epoch)
avg_metrics[metric] = val
# epoch_dictionary = {'loss': avg_metrics['loss']}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def validation_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/validation", val, self.current_epoch)
avg_metrics[metric] = val
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02, weight_decay=1e-04)
# > return torch.optim.SGF(self.parameters(), lr=self.lr, aldsfk'a)
def calculate_metrics(self, y, y_hat):
loss = F.cross_entropy(y_hat, y)
y_pred = y_hat.argmax(dim=1)
acc = classification.accuracy(y_pred, y)
f1_score = f1(y_pred, y, self.num_classes)
return {
"loss":loss,
"acc": acc,
"f1": f1_score
}
def on_sanity_check_start(self):
self.logger.disable()
def on_sanity_check_end(self):
self.logger.enable()
modelname = 'resnet18'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes))
test_eq(trainer.fit(model, dm), 1)
weight_path = 'FractalDB-1000_resnet50_epoch90.pth'
modelname = 'resnet50'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes), weight_path=weight_path)
test_eq(trainer.fit(model, dm), 1)
| 2.03125 | 2 |
instructions/instructions.py | fernandozanutto/PyNES | 0 | 7812 | <reponame>fernandozanutto/PyNES
from addressing import *
from instructions.base_instructions import SetBit, ClearBit
from instructions.generic_instructions import Instruction
from status import Status
# set status instructions
class Sec(SetBit):
identifier_byte = bytes([0x38])
bit = Status.StatusTypes.carry
class Sei(SetBit):
identifier_byte = bytes([0x78])
bit = Status.StatusTypes.interrupt
class Sed(SetBit):
identifier_byte = bytes([0xF8])
bit = Status.StatusTypes.decimal
# clear status instructions
class Cld(ClearBit):
identifier_byte = bytes([0xD8])
bit = Status.StatusTypes.decimal
class Clc(ClearBit):
identifier_byte = bytes([0x18])
bit = Status.StatusTypes.carry
class Clv(ClearBit):
identifier_byte = bytes([0xB8])
bit = Status.StatusTypes.overflow
class Cli(ClearBit):
identifier_byte = bytes([0x58])
bit = Status.StatusTypes.interrupt
class Bit(Instruction):
@classmethod
def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]:
return cpu.bus.read_memory(memory_address)
@classmethod
def apply_side_effects(cls, cpu, memory_address, value):
and_result = cpu.a_reg & value
cpu.status_reg.bits[Status.StatusTypes.zero] = not and_result
cpu.status_reg.bits[Status.StatusTypes.overflow] = (
value & (1 << 6)) > 0
cpu.status_reg.bits[Status.StatusTypes.negative] = (
value & (1 << 7)) > 0
class BitZeroPage(ZeroPageAddressing, Bit):
identifier_byte = bytes([0x24])
class BitAbsolute(AbsoluteAddressing, Bit):
identifier_byte = bytes([0x2C])
class Brk(ImplicitAddressing, Instruction):
identifier_byte = bytes([0x00])
@classmethod
def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]:
return super().get_data(cpu, memory_address, data_bytes)
@classmethod
def write(cls, cpu, memory_address, value):
cpu.push_to_stack(cpu.pc_reg + 1, 2)
cpu.push_to_stack(cpu.status_reg.to_int() | (1 << 4), 1)
@classmethod
def apply_side_effects(cls, cpu, memory_address, value):
cpu.status_reg.bits[Status.StatusTypes.interrupt] = 1
cpu.running = False
@classmethod
def get_cycles(cls):
return 7
| 1.835938 | 2 |
formfactor_AL.py | kirichoi/PolymerConnectome | 0 | 7940 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 10:59:00 2020
@author: user
"""
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import itertools
import ctypes
def formfactor(args):
# with AL_dist_flat_glo.get_lock:
AL_dist_flat_glo_r = np.frombuffer(AL_dist_flat_glo.get_obj())
AL_dist_flat_glo_s = AL_dist_flat_glo_r.reshape((n_glo.value,m_glo.value))
# ffq = np.sum(np.cos(np.dot(np.logspace(-2,3,100)[args[0]]*np.array([1,0,0]),
# np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T)))
qr = np.logspace(-2,3,100)[args[0]]
rvec = np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T
cosx = np.cos(np.dot(qr*np.array([1,0,0]), rvec))
cosy = np.cos(np.dot(qr*np.array([0,1,0]), rvec))
cosz = np.cos(np.dot(qr*np.array([0,0,1]), rvec))
# cosxy = np.cos(np.dot(qr*np.array([0.707,0.707,0]), rvec))
# cosyz = np.cos(np.dot(qr*np.array([0,0.707,0.707]), rvec))
# cosxz = np.cos(np.dot(qr*np.array([0.707,0,0.707]), rvec))
# cosxyz = np.cos(np.dot(qr*np.array([0.577,0.577,0.577]), rvec))
ffq = np.sum(np.mean(np.array([cosx, cosy, cosz]), axis=0))
return ffq
def parallelinit(AL_dist_flat_glo_, n_glo_, m_glo_):
global AL_dist_flat_glo, n_glo, m_glo
AL_dist_flat_glo = AL_dist_flat_glo_
n_glo = n_glo_
m_glo = m_glo_
if __name__ == '__main__':
AL_dist_flat = np.load(r'./AL_dist_flat.npy')
n = np.shape(AL_dist_flat)[0]
m = np.shape(AL_dist_flat)[1]
q_range = np.logspace(-2,3,100)
# r_x = np.array([1, 0, 0])
# q_range_glo = mp.Array(ctypes.c_double, q_range)
AL_dist_flat_glo = mp.Array(ctypes.c_double, AL_dist_flat.flatten())
n_glo = mp.Value(ctypes.c_int, n)
m_glo = mp.Value(ctypes.c_int, m)
# r_x_glo = mp.Array(ctypes.c_double, r_x)
paramlist = list(itertools.product(range(100), range(n)))
pool = mp.Pool(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo))
t1 = time.time()
results = pool.map(formfactor, paramlist)
pool.close()
t2 = time.time()
print(t2-t1)
np.save(r'./AL_results.npy', results)
Pq = 2*np.divide(np.sum(np.array(results).reshape(100, n), axis=1), n)
# fig = plt.figure(figsize=(8,6))
# plt.plot(q_range, Pq, lw=3, color='tab:orange')
# plt.xscale('log')
# plt.xlabel('$q$', fontsize=15)
# plt.ylabel('$P(q)$', fontsize=15)
# plt.tight_layout()
# plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight')
# plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(q_range, Pq, lw=3, color='tab:orange')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$q$', fontsize=15)
plt.ylabel('$P(q)$', fontsize=15)
plt.tight_layout()
plt.savefig(r'./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')
plt.show()
| 1.382813 | 1 |
code/config/imports.py | farioso-fernando/cover-meu-beat | 0 | 8068 | from kivy.uix.screenmanager import ScreenManager
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.animation import Animation
from kivy.core.window import Window
from kivymd.app import MDApp
import kivymd
import kivy
print(
)
def version():
kivy.require('2.0.0')
print(
) | 0.941406 | 1 |
global_info.py | AkagiYui/AzurLaneTool | 0 | 8196 | from time import sleep
debug_mode = False
time_to_exit = False
exiting = False
exit_code = 0
def get_debug_mode():
return debug_mode
def trigger_exit(_exit_code):
global time_to_exit, exit_code
exit_code = _exit_code
time_to_exit = True
sleep(0.1)
| 1.28125 | 1 |
Python X/Dictionaries in python.py | nirobio/puzzles | 0 | 8324 | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# dictionaries, look-up tables & key-value pairs\n",
"# d = {} OR d = dict()\n",
"# e.g. d = {\"George\": 24, \"Tom\": 32}\n",
"\n",
"d = {}\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"d[\"George\"] = 24"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"d[\"Tom\"] = 32\n",
"d[\"Jenny\"] = 16"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'George': 24, 'Tom': 32, 'Jenny': 16}\n"
]
}
],
"source": [
"print(d)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'Jenny' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-0bdfff196d23>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mJenny\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'Jenny' is not defined"
]
}
],
"source": [
"print(d[Jenny])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32\n"
]
}
],
"source": [
"print(d[\"Tom\"])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"d[\"Jenny\"] = 20"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"20\n"
]
}
],
"source": [
"print(d[\"Jenny\"])"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# keys are strings or numbers \n",
"\n",
"d[10] = 100"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"100\n"
]
}
],
"source": [
"print(d[10])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"# how to iterate over key-value pairs"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"key:\n",
"George\n",
"value:\n",
"24\n",
"\n",
"key:\n",
"Tom\n",
"value:\n",
"32\n",
"\n",
"key:\n",
"Jenny\n",
"value:\n",
"20\n",
"\n",
"key:\n",
"10\n",
"value:\n",
"100\n",
"\n"
]
}
],
"source": [
" for key, value in d.items():\n",
" print(\"key:\")\n",
" print(key)\n",
" print(\"value:\")\n",
" print(value)\n",
" print(\"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 1.945313 | 2 |
practice/4_tracking/tracker.py | OrangeRedeng/CV-SUMMER-CAMP-2021 | 13 | 8452 | <gh_stars>10-100
import numpy as np
import math
import logging as log
import sys
from tqdm import tqdm
from common.feature_distance import calc_features_similarity
from common.common_objects import DetectedObject, validate_detected_object, Bbox
from common.common_objects import get_bbox_center, get_dist, calc_bbox_area
from common.find_best_assignment import solve_assignment_problem
from common.annotation import AnnotationObject, AnnotationStorage
class Track:
__next_track_id = 0
def __init__(self, first_obj):
self.objects = []
self._track_id = Track.__next_track_id
Track.__next_track_id += 1
self.objects.append(first_obj)
def _validate(self):
assert len(self.objects) > 0
for o in self.objects:
validate_detected_object(o)
for i in range(len(self.objects) - 1):
self.objects[i].frame_index < self.objects[i+1].frame_index
def add_object(self, o):
self._validate()
validate_detected_object(o)
last_frame_index = self.objects[-1].frame_index
if not last_frame_index < o.frame_index:
raise RuntimeError("Add object={} to track with the last_frame_index={}".format(o, last_frame_index))
self.objects.append(o)
def last(self):
return self.objects[-1]
def get_id(self):
return self._track_id
def get_bbox_for_frame(self, cur_frame_ind):
"""Finds bbox for frame index using linear approximation"""
self._validate()
i_found = None
for i, o in enumerate(self.objects):
if o.frame_index == cur_frame_ind:
return o.bbox
if o.frame_index > cur_frame_ind:
i_found = i
break
if i_found is None: # cur_frame_ind after the last frame_index in track
return None
if i_found == 0: # cur_frame_ind before the first frame_index in track
return None
log.debug("using linear approximation for track id={}, frame_index={}".format(self._track_id, cur_frame_ind))
o1 = self.objects[i_found-1]
o2 = self.objects[i_found]
assert o1.frame_index < cur_frame_ind < o2.frame_index
dindex = o2.frame_index - o1.frame_index
d_cur_index1 = cur_frame_ind - o1.frame_index
d_cur_index2 = o2.frame_index - cur_frame_ind
bbox1 = o1.bbox
bbox2 = o2.bbox
res_bbox = [None, None, None, None]
for k in range(4):
# linear approximation for all bbox fields
res_bbox[k] = (bbox1[k] * d_cur_index2 + bbox2[k] * d_cur_index1) / dindex
res_bbox = Bbox(res_bbox[0], res_bbox[1], res_bbox[2], res_bbox[3])
return res_bbox
class Tracker:
def __init__(self, num_frames_to_remove_track, num_objects_to_make_track_valid, affinity_threshold):
self.tracks = []
self.track_archive = []
self.num_frames_to_remove_track = num_frames_to_remove_track
self.num_objects_to_make_track_valid = num_objects_to_make_track_valid
self.affinity_threshold = affinity_threshold
def add_objects(self, det_objs):
log.debug("begin: handling {} objects".format(len(det_objs)))
if len(det_objs) == 0:
return
frame_index = det_objs[0].frame_index
assert all(o.frame_index == frame_index for o in det_objs), "All det_objs should have the same frame_index"
affinity_matrix = self._build_affinity_matrix(det_objs)
self._validate_affinity_matrix(affinity_matrix, len(self.tracks), len(det_objs))
self._log_affinity_matrix(affinity_matrix)
decision, best_affinity = self._solve_assignment_problem(affinity_matrix)
self._log_decision(decision, best_affinity, det_objs, frame_index)
self._apply_decision(decision, det_objs, frame_index)
self._move_obsolete_tracks_to_archive(frame_index)
log.debug("end: handling {} objects".format(len(det_objs)))
@staticmethod
def _validate_affinity_matrix(affinity_matrix, num_tracks, num_det_objs):
assert isinstance(affinity_matrix, list)
assert len(affinity_matrix) == num_tracks
for affinity_row in affinity_matrix:
assert isinstance(affinity_row, list)
assert len(affinity_row) == num_det_objs
assert all(isinstance(v, float) for v in affinity_row)
assert all(v >= 0 for v in affinity_row)
def _build_affinity_matrix(self, det_objs):
affinity_matrix = []
for t in self.tracks:
affinity_row = []
for o in det_objs:
cur_affinity = self._calc_affinity(t, o)
affinity_row.append(cur_affinity)
affinity_matrix.append(affinity_row)
return affinity_matrix
def _calc_affinity(self, track, obj):
affinity_appearance = self._calc_affinity_appearance(track, obj)
affinity_position = self._calc_affinity_position(track, obj)
affinity_shape = self._calc_affinity_shape(track, obj)
return affinity_appearance * affinity_position * affinity_shape
def _calc_affinity_appearance(self, track, obj):
raise NotImplementedError("The function _calc_affinity_appearance is not implemented -- implement it by yourself")
def _calc_affinity_position(self, track, obj):
raise NotImplementedError("The function _calc_affinity_position is not implemented -- implement it by yourself")
def _calc_affinity_shape(self, track, obj):
raise NotImplementedError("The function _calc_affinity_shape is not implemented -- implement it by yourself")
@staticmethod
def _log_affinity_matrix(affinity_matrix):
with np.printoptions(precision=2, suppress=True, threshold=sys.maxsize, linewidth=sys.maxsize):
log.debug("Affinity matrix =\n{}".format(np.array(affinity_matrix)))
def _solve_assignment_problem(self, affinity_matrix):
decision, best_affinity = solve_assignment_problem(affinity_matrix, self.affinity_threshold)
return decision, best_affinity
def _log_decision(self, decision, best_affinity, det_objs, frame_index):
log.debug("Logging decision for frame index={}".format(frame_index))
num_tracks = len(self.tracks)
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is not None:
assert 0 <= obj_index < len(det_objs)
obj_bbox = det_objs[obj_index].bbox
else:
obj_bbox = None
cur_best_affinity = best_affinity[track_index]
if cur_best_affinity is not None:
best_affinity_str = "{:.3f}".format(cur_best_affinity)
else:
best_affinity_str = str(cur_best_affinity)
log.debug("track_index={}, track id={}, last_bbox={}, decision={}, best_affinity={} => {}".format(
track_index, self.tracks[track_index].get_id(),
self.tracks[track_index].last().bbox,
decision[track_index],
best_affinity_str,
obj_bbox))
def _apply_decision(self, decision, det_objs, frame_index):
set_updated_tracks_indexes = set()
num_det_objs = len(det_objs)
num_tracks = len(self.tracks)
object_indexes_not_mapped_to_tracks = set(range(num_det_objs)) # all indexes from 0 to num_det_objs-1
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is None:
# no objects are mapped for this track
continue
assert 0 <= obj_index < num_det_objs
if obj_index not in object_indexes_not_mapped_to_tracks:
raise RuntimeError("ERROR: Algorithm assigned the object {} to several tracks".format(obj_index))
object_indexes_not_mapped_to_tracks.remove(obj_index)
o = det_objs[obj_index]
self.tracks[track_index].add_object(o)
# create new tracks for all the objects not mapped to tracks
for obj_index in object_indexes_not_mapped_to_tracks:
o = det_objs[obj_index]
self._create_new_track(o)
def _create_new_track(self, o):
new_track = Track(o)
self.tracks.append(new_track)
log.debug("created new track: id={} object: frame_index={}, {}".format(
new_track.get_id(), o.frame_index, o.bbox))
def _move_obsolete_tracks_to_archive(self, frame_index):
new_tracks = []
for t in self.tracks:
last_frame_index = t.last().frame_index
if frame_index - last_frame_index >= self.num_frames_to_remove_track:
log.debug("Move the track id={} to archive: the current frame_index={}, "
"the last frame_index in track={}".format(
t.get_id(), frame_index, last_frame_index))
self.track_archive.append(t)
else:
new_tracks.append(t)
self.tracks = new_tracks
def is_track_valid(self, track):
assert isinstance(track, Track)
return len(track.objects) > self.num_objects_to_make_track_valid
def get_all_valid_tracks(self):
res = []
for t in self.track_archive:
if self.is_track_valid(t):
res.append(t)
for t in self.tracks:
if self.is_track_valid(t):
res.append(t)
return res
def convert_tracks_to_annotation_storage(tracks):
ann_objects_by_frame_index = {}
for cur_track in tqdm(tracks, desc="Converting"):
track_id = cur_track.get_id()
first_frame_index = cur_track.objects[0].frame_index
last_frame_index = cur_track.objects[-1].frame_index
for frame_index in range(first_frame_index, last_frame_index+1):
bbox = cur_track.get_bbox_for_frame(frame_index)
tl_x = math.floor(bbox.tl_x)
tl_y = math.floor(bbox.tl_y)
br_x = math.ceil(bbox.br_x)
br_y = math.ceil(bbox.br_y)
detect_obj = DetectedObject(frame_index=frame_index,
bbox=Bbox(tl_x, tl_y, br_x, br_y),
appearance_feature=[])
ann_obj = AnnotationObject(detect_obj=detect_obj,
track_id=track_id)
if frame_index not in ann_objects_by_frame_index:
ann_objects_by_frame_index[frame_index] = {}
ann_objects_by_frame_index[frame_index][track_id] = ann_obj
annotation_objects = []
for frame_index in sorted(ann_objects_by_frame_index.keys()):
cur_ann_objects = ann_objects_by_frame_index[frame_index]
for track_id in sorted(cur_ann_objects.keys()):
annotation_objects.append(cur_ann_objects[track_id])
annotation_storage = AnnotationStorage.create_annotation_storage_from_list(annotation_objects)
return annotation_storage
| 2.34375 | 2 |
qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py | Zoufalc/qiskit-machine-learning | 1 | 8580 | <reponame>Zoufalc/qiskit-machine-learning<filename>qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Neural network regressor """
from typing import Union
import numpy as np
from qiskit.algorithms.optimizers import Optimizer
from ...exceptions import QiskitMachineLearningError
from ...neural_networks import NeuralNetwork
from ...utils.loss_functions import (Loss, L1Loss, L2Loss, CrossEntropyLoss,
CrossEntropySigmoidLoss)
class NeuralNetworkRegressor:
""" Quantum neural network regressor"""
def __init__(self, neural_network: NeuralNetwork,
loss: Union[str, Loss] = 'l2',
optimizer: Optimizer = None,
warm_start: bool = False):
"""
Args:
neural_network: An instance of an quantum neural network. If the neural network has a
one-dimensional output, i.e., `neural_network.output_shape=(1,)`, then it is
expected to return values in [-1, +1] and it can only be used for binary
classification. If the output is multi-dimensional, it is assumed that the result
is a probability distribution, i.e., that the entries are non-negative and sum up
to one. Then there are two options, either one-hot encoding or not. In case of
one-hot encoding, each probability vector resulting a neural network is considered
as one sample and the loss function is applied to the whole vector. Otherwise, each
entry of the probability vector is considered as an individual sample and the loss
function is applied to the index and weighted with the corresponding probability.
loss: A target loss function to be used in training. Default is `l2`, i.e. L2 loss.
Can be given either as a string for 'l1', 'l2', 'cross_entropy',
'cross_entropy_sigmoid', or as a loss function implementing the Loss interface.
optimizer: An instance of an optimizer to be used in training.
warm_start: Use weights from previous fit to start next fit.
Raises:
QiskitMachineLearningError: unknown loss, invalid neural network
"""
self._neural_network = neural_network
if len(neural_network.output_shape) > 1:
raise QiskitMachineLearningError('Invalid neural network output shape!')
if isinstance(loss, Loss):
self._loss = loss
else:
if loss.lower() == 'l1':
self._loss = L1Loss()
elif loss.lower() == 'l2':
self._loss = L2Loss()
elif loss.lower() == 'cross_entropy':
self._loss = CrossEntropyLoss()
elif loss.lower() == 'cross_entropy_sigmoid':
self._loss = CrossEntropySigmoidLoss()
else:
raise QiskitMachineLearningError(f'Unknown loss {loss}!')
self._optimizer = optimizer
self._warm_start = warm_start
self._fit_result = None
@property
def neural_network(self):
""" Returns the underlying neural network."""
return self._neural_network
@property
def loss(self):
""" Returns the underlying neural network."""
return self._loss
@property
def warm_start(self) -> bool:
""" Returns the warm start flag."""
return self._warm_start
@warm_start.setter
def warm_start(self, warm_start: bool) -> None:
""" Sets the warm start flag."""
self._warm_start = warm_start
def fit(self, X: np.ndarray, y: np.ndarray): # pylint: disable=invalid-name
"""
Fit the model to data matrix X and target(s) y.
Args:
X: The input data.
y: The target values.
Returns:
self: returns a trained classifier.
Raises:
QiskitMachineLearningError: In case of invalid data (e.g. incompatible with network)
"""
if self._neural_network.output_shape == (1,):
# TODO: we should add some reasonable compatibility checks and raise meaningful errors.
def objective(w):
predict = self._neural_network.forward(X, w)
target = np.array(y).reshape(predict.shape)
value = np.sum(self._loss(predict, target))
return value
def objective_grad(w):
# TODO should store output from forward pass (implement loss interface?)
# TODO: need to be able to turn off input grads if not needed.
output = self._neural_network.forward(X, w)
_, weights_grad = self._neural_network.backward(X, w)
grad = np.zeros((1, self._neural_network.num_weights))
for i in range(len(X)):
grad += self._loss.gradient(output[i][0], y[i]) * weights_grad[i]
return grad
else:
def objective(w):
val = 0.0
probs = self._neural_network.forward(X, w)
for i in range(len(X)):
for y_predict, prob in enumerate(probs[i]):
val += prob * self._loss(y_predict, y[i])
return val
def objective_grad(w):
num_classes = self._neural_network.output_shape[0]
grad = np.zeros((1, self._neural_network.num_weights))
for x, y_target in zip(X, y):
# TODO: do batch eval
_, weight_prob_grad = self._neural_network.backward(x, w)
for i in range(num_classes):
grad += weight_prob_grad[
0, i, :].reshape(grad.shape) * self._loss(i, y_target)
return grad
if self._warm_start and self._fit_result is not None:
initial_point = self._fit_result[0]
else:
initial_point = np.random.rand(self._neural_network.num_weights)
self._fit_result = self._optimizer.optimize(self._neural_network.num_weights, objective,
objective_grad, initial_point=initial_point)
return self
def predict(self, X: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name
"""
Predict using the network specified to the regression.
Args:
X: The input data.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
The predicted values.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
# TODO: proper handling of batching
return self._neural_network.forward(X, self._fit_result[0])
def score(self, X: np.ndarray, y: np.ndarray) -> int: # pylint: disable=invalid-name
"""
Return R-squared on the given test data and targeted values.
Args:
X: Test samples.
y: True target values given `X`.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
R-squared value.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
predict = self.predict(X)
# Compute R2 for score
ss_res = sum(map(lambda k: (k[0] - k[1]) ** 2, zip(y, predict)))
ss_tot = sum([(k - np.mean(y)) ** 2 for k in y])
score = 1 - (ss_res / ss_tot)
if len(np.array(score).shape) > 0:
return score[0]
else:
return score
| 1.921875 | 2 |
experimental/attentive_uncertainty/toy_regression/datasets.py | miksu/edward2 | 0 | 8708 | # coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses real and synthetic datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import collections
import tensorflow as tf
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("context_x", "context_y", "target_x", "target_y"))
class GPCurvesReader(object):
"""Generates curves using a Gaussian Process (GP).
Supports vector inputs (x) and vector outputs (y). Kernel is
mean-squared exponential, using the x-value l2 coordinate distance scaled by
some factor chosen randomly in a range. Outputs are independent gaussian
processes.
"""
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
l1_scale=0.6,
sigma_scale=1.0,
random_kernel_parameters=False,
testing=False):
"""Creates a regression dataset of functions sampled from a GP.
Args:
batch_size: An integer.
max_num_context: The max number of observations in the context.
x_size: Integer >= 1 for length of "x values" vector.
y_size: Integer >= 1 for length of "y values" vector.
l1_scale: Float; typical scale for kernel distance function.
sigma_scale: Float; typical scale for variance.
random_kernel_parameters: If `True`, the kernel parameters (l1 and sigma)
are sampled uniformly within [0.1, l1_scale] and [0.1, sigma_scale].
testing: Boolean that indicates whether we are testing. If so there are
more targets for visualization.
"""
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._l1_scale = l1_scale
self._sigma_scale = sigma_scale
self._random_kernel_parameters = random_kernel_parameters
self._testing = testing
def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):
"""Applies the Gaussian kernel to generate curve data.
Args:
xdata: Tensor of shape [B, num_total_points, x_size] with
the values of the x-axis data.
l1: Tensor of shape [B, y_size, x_size], the scale
parameter of the Gaussian kernel.
sigma_f: Tensor of shape [B, y_size], the magnitude
of the std.
sigma_noise: Float, std of the noise that we add for stability.
Returns:
The kernel, a float tensor of shape
[B, y_size, num_total_points, num_total_points].
"""
num_total_points = tf.shape(xdata)[1]
# Expand and take the difference
xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]
xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]
diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]
# [B, y_size, num_total_points, num_total_points, x_size]
norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])
norm = tf.reduce_sum(
norm, -1) # [B, data_size, num_total_points, num_total_points]
# [B, y_size, num_total_points, num_total_points]
kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)
# Add some noise to the diagonal to make the cholesky work.
kernel += (sigma_noise**2) * tf.eye(num_total_points)
return kernel
def generate_curves(self, num_context=None):
"""Builds the op delivering the data.
Generated functions are `float32` with x values between -2 and 2.
Args:
num_context: Number of context points. If None, chosen randomly.
Returns:
A `CNPRegressionDescription` namedtuple.
"""
if num_context is None:
num_context = tf.random_uniform(
shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32)
# If we are testing we want to have more targets and have them evenly
# distributed in order to plot the function.
if self._testing:
num_target = 400
num_total_points = num_target
x_values = tf.tile(
tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),
[self._batch_size, 1])
x_values = tf.expand_dims(x_values, axis=-1)
# During training the number of target points and their x-positions are
# selected at random
else:
num_target = tf.random_uniform(shape=(), minval=0,
maxval=self._max_num_context - num_context,
dtype=tf.int32)
num_total_points = num_context + num_target
x_values = tf.random_uniform(
[self._batch_size, num_total_points, self._x_size], -2, 2)
# Set kernel parameters
# Either choose a set of random parameters for the mini-batch
if self._random_kernel_parameters:
l1 = tf.random_uniform([self._batch_size, self._y_size,
self._x_size], 0.1, self._l1_scale)
sigma_f = tf.random_uniform([self._batch_size, self._y_size],
0.1, self._sigma_scale)
# Or use the same fixed parameters for all mini-batches
else:
l1 = tf.ones(shape=[self._batch_size, self._y_size,
self._x_size]) * self._l1_scale
sigma_f = tf.ones(shape=[self._batch_size,
self._y_size]) * self._sigma_scale
# Pass the x_values through the Gaussian kernel
# [batch_size, y_size, num_total_points, num_total_points]
kernel = self._gaussian_kernel(x_values, l1, sigma_f)
# Calculate Cholesky, using double precision for better stability:
cholesky = tf.cast(tf.cholesky(tf.cast(kernel, tf.float64)), tf.float32)
# Sample a curve
# [batch_size, y_size, num_total_points, 1]
y_values = tf.matmul(
cholesky,
tf.random_normal([self._batch_size, self._y_size, num_total_points, 1]))
# [batch_size, num_total_points, y_size]
y_values = tf.transpose(tf.squeeze(y_values, 3), [0, 2, 1])
if self._testing:
# Select the targets
target_x = x_values
target_y = y_values
# Select the observations
idx = tf.random_shuffle(tf.range(num_target))
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
else:
# Select the targets which will consist of the context points as well as
# some new target points
target_x = x_values[:, :num_target + num_context, :]
target_y = y_values[:, :num_target + num_context, :]
# Select the observations
context_x = x_values[:, :num_context, :]
context_y = y_values[:, :num_context, :]
return NPRegressionDescription(
context_x=context_x,
context_y=context_y,
target_x=target_x,
target_y=target_y)
| 2.46875 | 2 |
heat/tests/convergence/framework/testutils.py | maestro-hybrid-cloud/heat | 0 | 8836 | <gh_stars>0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log as logging
from heat.tests.convergence.framework import reality
from heat.tests.convergence.framework import scenario_template
LOG = logging.getLogger(__name__)
def verify(test, reality, tmpl):
for name in tmpl.resources:
rsrc_count = len(reality.resources_by_logical_name(name))
test.assertEqual(1, rsrc_count,
'Found %d copies of resource "%s"' % (rsrc_count,
name))
all_rsrcs = reality.all_resources()
for name, defn in tmpl.resources.items():
phys_rsrc = reality.resources_by_logical_name(name)[0]
for prop_name, prop_def in defn.properties.items():
real_value = reality.resource_properties(phys_rsrc, prop_name)
if isinstance(prop_def, scenario_template.GetAtt):
targs = reality.resources_by_logical_name(prop_def.target_name)
att_value = targs[0].properties_data[prop_def.attr]
test.assertEqual(att_value, real_value)
elif isinstance(prop_def, scenario_template.GetRes):
targs = reality.resources_by_logical_name(prop_def.target_name)
test.assertEqual(targs[0].nova_instance, real_value)
else:
test.assertEqual(prop_def, real_value)
test.assertEqual(len(defn.properties), len(phys_rsrc.properties_data))
test.assertEqual(len(tmpl.resources), len(all_rsrcs))
def scenario_globals(procs, testcase):
return {
'test': testcase,
'reality': reality.reality,
'verify': functools.partial(verify,
testcase,
reality.reality),
'Template': scenario_template.Template,
'RsrcDef': scenario_template.RsrcDef,
'GetRes': scenario_template.GetRes,
'GetAtt': scenario_template.GetAtt,
'engine': procs.engine,
'worker': procs.worker,
}
| 1.367188 | 1 |
migrations/versions/e86dd3bc539c_change_admin_to_boolean.py | jonzxz/project-piscator | 0 | 8964 | """change admin to boolean
Revision ID: e86dd3bc539c
Revises: <KEY>
Create Date: 2020-11-11 22:32:00.707936
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e86dd3bc539c'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('email_address', sa.Column('active', sa.Boolean(), nullable=False))
op.add_column('email_address', sa.Column('email_password', sa.String(length=255), nullable=False))
op.add_column('email_address', sa.Column('last_mailbox_size', sa.Integer(), nullable=True))
op.add_column('email_address', sa.Column('last_updated', sa.DateTime(), nullable=True))
op.add_column('email_address', sa.Column('phishing_mail_detected', sa.Integer(), nullable=True))
op.add_column('user', sa.Column('is_active', sa.Boolean(), nullable=False))
op.add_column('user', sa.Column('is_admin', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('last_logged_in', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_logged_in')
op.drop_column('user', 'is_admin')
op.drop_column('user', 'is_active')
op.drop_column('email_address', 'phishing_mail_detected')
op.drop_column('email_address', 'last_updated')
op.drop_column('email_address', 'last_mailbox_size')
op.drop_column('email_address', 'email_password')
op.drop_column('email_address', 'active')
# ### end Alembic commands ###
| 1.15625 | 1 |
Day24_Python/part1.py | Rog3rSm1th/PolyglotOfCode | 7 | 9092 | <reponame>Rog3rSm1th/PolyglotOfCode
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from itertools import combinations
def solve(packages, groups):
total = sum(packages)
result = 9999999999999999
# we should use `for i in range(1, len(packages) - 2)` but it would
# make the computation significantly slower
for i in range(1, 7):
for c in combinations(packages, i):
if sum(c) == total / groups:
quantum_entanglement = reduce(lambda a, b: a * b, list(c))
result = min(result, quantum_entanglement)
return result
packages = [int(num) for num in open('input.txt')]
print(solve(packages, 3)) | 1.820313 | 2 |
src/code/djangotest/migrations/0001_initial.py | jielyu/notebook | 2 | 9220 | # Generated by Django 2.2.5 on 2019-10-05 23:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Password',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('website', models.CharField(max_length=128)),
('username', models.CharField(max_length=128)),
('pwd', models.CharField(max_length=128)),
('time_add', models.DateTimeField(auto_now_add=True, null=True)),
('time_modify', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'password_tab',
},
),
]
| 0.753906 | 1 |
tests/unit/sagemaker/tensorflow/test_estimator_init.py | LastRemote/sagemaker-python-sdk | 1,690 | 9348 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import Mock, patch
from packaging import version
import pytest
from sagemaker.tensorflow import TensorFlow
REGION = "us-west-2"
ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"}
@pytest.fixture()
def sagemaker_session():
return Mock(name="sagemaker_session", boto_region_name=REGION)
def _build_tf(sagemaker_session, **kwargs):
return TensorFlow(
sagemaker_session=sagemaker_session,
entry_point="dummy.py",
role="dummy-role",
instance_count=1,
instance_type="ml.c4.xlarge",
**kwargs,
)
@patch("sagemaker.fw_utils.python_deprecation_warning")
def test_estimator_py2_deprecation_warning(warning, sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="2.1.1", py_version="py2")
assert estimator.py_version == "py2"
warning.assert_called_with("tensorflow", "2.1.1")
def test_py2_version_deprecated(sagemaker_session):
with pytest.raises(AttributeError) as e:
_build_tf(sagemaker_session, framework_version="2.1.2", py_version="py2")
msg = (
"Python 2 containers are only available with 2.1.1 and lower versions. "
"Please use a Python 3 container."
)
assert msg in str(e.value)
def test_py2_version_is_not_deprecated(sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="1.15.0", py_version="py2")
assert estimator.py_version == "py2"
estimator = _build_tf(sagemaker_session, framework_version="2.0.0", py_version="py2")
assert estimator.py_version == "py2"
def test_framework_name(sagemaker_session):
tf = _build_tf(sagemaker_session, framework_version="1.15.2", py_version="py3")
assert tf._framework_name == "tensorflow"
def test_tf_add_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=ENV_INPUT,
)
assert tf.environment == ENV_INPUT
def test_tf_miss_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=None,
)
assert not tf.environment
def test_enable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=True,
)
assert tf.enable_sagemaker_metrics
def test_disable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=False,
)
assert not tf.enable_sagemaker_metrics
def test_disable_sm_metrics_if_fw_ver_is_less_than_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.14"):
pytest.skip("This test is for TF 1.14 and lower.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
image_uri="old-image",
)
assert tf.enable_sagemaker_metrics is None
def test_enable_sm_metrics_if_fw_ver_is_at_least_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) < version.Version("1.15"):
pytest.skip("This test is for TF 1.15 and higher.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
assert tf.enable_sagemaker_metrics
def test_require_image_uri_if_fw_ver_is_less_than_1_11(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.10"):
pytest.skip("This test is for TF 1.10 and lower.")
with pytest.raises(ValueError) as e:
_build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
expected_msg = (
"TF {version} supports only legacy mode. Please supply the image URI directly with "
"'image_uri=520713654638.dkr.ecr.{region}.amazonaws.com/"
"sagemaker-tensorflow:{version}-cpu-py2' and set 'model_dir=False'. If you are using any "
"legacy parameters (training_steps, evaluation_steps, checkpoint_path, requirements_file), "
"make sure to pass them directly as hyperparameters instead."
).format(version=tensorflow_training_version, region=REGION)
assert expected_msg in str(e.value)
| 0.996094 | 1 |
output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_pattern_2_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 9476 | <filename>output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_pattern_2_xsd/__init__.py
from output.models.nist_data.list_pkg.decimal.schema_instance.nistschema_sv_iv_list_decimal_pattern_2_xsd.nistschema_sv_iv_list_decimal_pattern_2 import NistschemaSvIvListDecimalPattern2
__all__ = [
"NistschemaSvIvListDecimalPattern2",
]
| 0.380859 | 0 |
output/models/ms_data/regex/re_l32_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 9604 | <reponame>tefra/xsdata-w3c-tests<filename>output/models/ms_data/regex/re_l32_xsd/__init__.py<gh_stars>1-10
from output.models.ms_data.regex.re_l32_xsd.re_l32 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| 0.458984 | 0 |
divsum_stats.py | fjruizruano/SatIntExt | 0 | 9732 | <filename>divsum_stats.py<gh_stars>0
#!/usr/bin/python
import sys
from subprocess import call
print "divsum_count.py ListOfDivsumFiles\n"
try:
files = sys.argv[1]
except:
files = raw_input("Introduce RepeatMasker's list of Divsum files with library size (tab separated): ")
files = open(files).readlines()
to_join = []
header = "Coverage for each repeat class and divergence (Kimura)\n"
results = {}
for line in files:
line = line.split("\t")
file = line[0]
size = int(line[1])
data = open(file).readlines()
matrix_start = data.index(header)
matrix = data[matrix_start+1:]
li= []
names_line = matrix[0]
info = names_line.split()
for fam in info:
li.append([fam])
info_len = len(li)
for line in matrix[1:]:
info = line.split()
for i in range(0,info_len):
li[i].append(info[i])
out = open(file+".counts","w")
out.write("Sequence\tAbundance\n")
stats = open(file+".stats","w")
stats.write("Sequence\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n")
for el in li[1:]:
numbers = el[1:]
numbers = [int(x) for x in numbers]
numbers_prop = [1.0*x/size for x in numbers]
prop_dict = {}
prop_li = []
for prop in range(0,len(numbers_prop)):
prop_dict[prop] = numbers_prop[prop]
prop_li.append(numbers_prop[prop])
prop_dict_sorted = sorted(prop_dict.items(), key=lambda x: x[1], reverse=True)
total = sum(numbers_prop)
top = prop_dict_sorted[0]
top_div = top[0]
top_ab = top[1]
peak = []
if top_div >= 2:
for div in range(top_div-2,top_div+3):
peak.append(prop_dict[div])
else:
for div in range(0,5):
peak.append(prop_dict[div])
sum_peak = sum(peak)
rps = sum_peak/total
divpeak = top_div
out.write(el[0]+"\t"+str(sum(numbers))+"\n")
all_divs = []
for d in li[0][1:]:
all_divs.append(int(d)+0.5)
div_sumproduct = 0
for x,y in zip(all_divs,prop_li):
div_sumproduct += x * y
divergence = div_sumproduct/total
data = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (el[0],str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
stats.write(data)
data2 = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (file, str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
if el[0] in results:
results[el[0]].append(data2)
else:
results[el[0]] = [data2]
out.close()
stats.close()
to_join.append(file+".counts")
out = open("results.txt", "w")
for el in sorted(results):
info = results[el]
out.write("%s\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n" % (el))
for i in info:
out.write(i)
out.write("\n\n\n")
out.close()
call("join_multiple_lists.py %s" % (" ".join(to_join)), shell=True)
| 1.773438 | 2 |
tools/generate_serialization_header.py | StableCoder/vulkan-mini-libs-2 | 1 | 9860 | <gh_stars>1-10
#!/usr/bin/env python3
import sys
import getopt
import xml.etree.ElementTree as ET
def processVendors(outFile, vendors):
outFile.writelines(["\nconstexpr std::array<std::string_view, ", str(
len(vendors)), "> vendors = {{\n"])
for vendor in vendors:
outFile.writelines([' \"', vendor.tag, '\",\n'])
outFile.write('}};\n')
def processEnumValue(outFile, enum, value):
if not value.get('value') is None:
# Spitting out plain values
outFile.write(value.get('value'))
elif not value.get('bitpos') is None:
# Bitflag
outFile.writelines(
['0x', format(1 << int(value.get('bitpos')), '08X')])
elif not value.get('alias') is None:
processEnumValue(outFile, enum, enum.find(value.get('alias')))
def processEnums(outFile, enums, vendors, first, last):
for enum in enums:
# Skip VkResult
if enum.tag == 'VkResult':
continue
# Skip if there's no values, MSVC can't do zero-sized arrays
if len(enum.findall('./')) == 0:
continue
outFile.writelines(
['\nconstexpr EnumValueSet ', enum.tag, 'Sets[] = {\n'])
# Determine how much to chop off the front
strName = enum.tag
typeDigit = ''
# Determine if type ends with vendor tag
vendorName = ''
for vendor in vendors:
if strName.endswith(vendor.tag):
vendorName = vendor.tag
strName = strName[:-len(vendorName)]
if strName[-1].isdigit():
typeDigit = strName[-1]
strName = strName[:-1]
if strName.endswith('FlagBits'):
strName = strName[:-8]
# Construct most likely enum prefix
mainPrefix = ''
for char in strName:
if mainPrefix == '':
mainPrefix += char
elif char.isupper():
mainPrefix += '_'
mainPrefix += char.upper()
else:
mainPrefix += char.upper()
mainPrefix += '_'
if typeDigit != '':
mainPrefix += typeDigit
mainPrefix += '_'
current = first
while current <= last:
for value in enum.findall('./'):
if int(value.get('first')) != current:
continue
outFile.write(" {\"")
valueStr = value.tag
if valueStr.startswith(mainPrefix):
valueStr = valueStr[len(mainPrefix):]
if vendorName != '' and valueStr.endswith(vendorName):
valueStr = valueStr[:-len(vendorName)-1]
if valueStr.endswith('_BIT'):
valueStr = valueStr[:-4]
outFile.write(valueStr)
outFile.write("\", ")
processEnumValue(outFile, enum, value)
outFile.write("},\n")
current += 1
outFile.write('};\n')
def main(argv):
inputFile = ''
outputFile = ''
try:
opts, args = getopt.getopt(argv, 'i:o:', [])
except getopt.GetoptError:
print('Error parsing options')
sys.exit(1)
for opt, arg in opts:
if opt == '-i':
inputFile = arg
elif opt == '-o':
outputFile = arg
if(inputFile == ''):
print("Error: No Vulkan XML file specified")
sys.exit(1)
if(outputFile == ''):
print("Error: No output file specified")
sys.exit(1)
try:
dataXml = ET.parse(inputFile)
dataRoot = dataXml.getroot()
except:
print("Error: Could not open input file: ", inputFile)
sys.exit(1)
firstVersion = int(dataRoot.get('first'))
lastVersion = int(dataRoot.get('last'))
outFile = open(outputFile, "w")
# Common Header
with open("common_header.txt") as fd:
outFile.write(fd.read())
outFile.write('\n')
#
outFile.write("""#ifndef VK_VALUE_SERIALIZATION_HPP
#define VK_VALUE_SERIALIZATION_HPP
/* USAGE:
To use, include this header where the declarations for the boolean checks are required.
On *ONE* compilation unit, include the definition of `#define VK_VALUE_SERIALIZATION_CONFIG_MAIN`
so that the definitions are compiled somewhere following the one definition rule.
*/
#include <vulkan/vulkan.h>
#include <string>
#include <string_view>
""")
# Static Asserts
outFile.writelines(["\nstatic_assert(VK_HEADER_VERSION >= ", str(
firstVersion), ", \"VK_HEADER_VERSION is from before the supported range.\");\n"])
outFile.writelines(["static_assert(VK_HEADER_VERSION <= ", str(
lastVersion), ", \"VK_HEADER_VERSION is from after the supported range.\");\n"])
# Function Declarataions
outFile.write("""
/**
* @brief Macro that automatically stringifies the given Vulkan type for serialization
* @param VKTYPE Actual Vulkan type
* @param VALUE Value to be serialized
* @param STRPTR Pointer to the string to store the serialization in. Only modified if true is
* returned.
* @return True if serialization was successful. False otherwise.
*/
#define VK_SERIALIZE(VKTYPE, VALUE, STRPTR) vk_serialize<VKTYPE>(#VKTYPE, VALUE, STRPTR)
/**
* @brief Macro that automatically stringifies the given Vulkan type for parsing
* @param VKTYPE Actual Vulkan type
* @param STRING String to be parsed
* @param VALPTR Pointer to the value to store the parsed value in. Only modified if true is
* returned.
* @return True if serialization was successful. False otherwise.
*/
#define VK_PARSE(VKTYPE, STRING, VALPTR) vk_parse<VKTYPE>(#VKTYPE, STRING, VALPTR)
/**
* @brief Serializes a Vulkan enumerator/flag type (32-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkValue Value being serialized
* @param pString Pointer to a string that will be modified with the serialized value. Only modified
* if true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_serialize(std::string_view vkType, uint32_t vkValue, std::string *pString);
/**
* @brief Parses a Vulkan enumerator/flag serialized string (32-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkString String being parsed
* @param pValue Pointer to a value that will be modified with the parsed value. Only modified if
* true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_parse(std::string_view vkType, std::string vkString, uint32_t *pValue);
/**
* @brief Serializes a Vulkan enumerator/flag type (64-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkValue Value being serialized
* @param pString Pointer to a string that will be modified with the serialized value. Only modified
* if true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_serialize(std::string_view vkType, uint64_t vkValue, std::string *pString);
/**
* @brief Parses a Vulkan enumerator/flag serialized string (64-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkString String being parsed
* @param pValue Pointer to a value that will be modified with the parsed value. Only modified if
* true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_parse(std::string_view vkType, std::string vkString, uint64_t *pValue);
/**
* @brief Serializes a Vulkan enumerator/flag type
* @tparam Vulkan type being serialized
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkValue Value being serialized
* @param pString Pointer to a string that will be modified with the serialized value. Only modified
* if true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
template <typename T>
bool vk_serialize(std::string_view vkType, T vkValue, std::string *pString) {
return vk_serialize(vkType, static_cast<uint32_t>(vkValue), pString);
}
/**
* @brief Parses a Vulkan enumerator/flag serialized string
* @tparam Vulkan type being parsed
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkString String being parsed
* @param pValue Pointer to a value that will be modified with the parsed value. Only modified if
* true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
template <typename T>
bool vk_parse(std::string_view vkType, std::string vkString, T *pValue) {
uint32_t retVal = 0;
auto found = vk_parse(vkType, vkString, &retVal);
if (found) {
*pValue = static_cast<T>(retVal);
}
return found;
}
""")
# Definition Start
outFile.write("\n#ifdef VK_VALUE_SERIALIZATION_CONFIG_MAIN\n")
outFile.write("\n#include <algorithm>\n")
outFile.write("#include <array>\n")
outFile.write("#include <cstring>\n")
outFile.write("\nnamespace {\n")
# Vendors
vendors = dataRoot.findall('vendors/')
processVendors(outFile, vendors)
# EnumSet Declaration
outFile.write("\nstruct EnumValueSet {\n")
outFile.write(" std::string_view name;\n")
outFile.write(" int64_t value;\n")
outFile.write("};\n")
# Enums
enums = dataRoot.findall('enums/')
processEnums(outFile, enums, vendors, firstVersion, lastVersion)
# Enum Type Declaration
outFile.write("\nstruct EnumType {\n")
outFile.write(" std::string_view name;\n")
outFile.write(" EnumValueSet const* data;\n")
outFile.write(" uint32_t count;\n")
outFile.write(" bool allowEmpty;\n")
outFile.write("};\n")
# Enum Pointer Array
outFile.writelines(["\nconstexpr std::array<EnumType, ", str(
len(enums)-1), "> enumTypes = {{\n"]) # -1 for not doing VkResult
for enum in enums:
if enum.tag == 'VkResult':
continue
valueCount = len(enum.findall('./'))
if valueCount == 0:
outFile.writelines(
[" {\"", str(enum.tag), "\", nullptr, 0, true},\n"])
else:
allowEmpty = "true"
for enumVal in enum.findall('./'):
if enumVal.get('first') == enum.get('first'):
allowEmpty = "false"
outFile.writelines([" {\"", str(enum.tag), "\", ", str(
enum.tag), "Sets, ", str(valueCount), ", ", allowEmpty, "},\n"])
outFile.write('}};\n')
# Function definitions
outFile.write("""
/**
* @brief Removes a vendor tag from the end of the given string view
* @param view String view to remove the vendor tag from
* @return A string_view without the vendor tag, if it was suffixed
*/
std::string_view stripVendor(std::string_view view) {
for (auto const &it : vendors) {
// Don't strip if it's all that's left
if (view == it)
break;
if (strncmp(view.data() + view.size() - it.size(), it.data(), it.size()) == 0) {
view = view.substr(0, view.size() - it.size());
break;
}
}
return view;
}
/**
* @brief Strips '_BIT' from the end of a string, if there
*/
std::string_view stripBit(std::string_view view) {
if (view.size() > strlen("_BIT")) {
if (view.substr(view.size() - strlen("_BIT")) == "_BIT") {
return view.substr(0, view.size() - strlen("_BIT"));
}
}
return view;
}
bool getEnumType(std::string_view vkType,
EnumValueSet const **ppStart,
EnumValueSet const **ppEnd,
bool *pAllowEmpty) {
// Check for a conversion from Flags -> FlagBits
std::string localString;
if (vkType.rfind("Flags") != std::string::npos) {
localString = vkType;
auto it = localString.rfind("Flags");
localString = localString.replace(it, strlen("Flags"), "FlagBits");
vkType = localString;
}
// Try the original name
for (auto const &it : enumTypes) {
if (vkType == std::string_view{it.name}) {
*ppStart = it.data;
*ppEnd = it.data + it.count;
*pAllowEmpty = it.allowEmpty;
return true;
}
}
// Try a vendor-stripped name
vkType = stripVendor(vkType);
for (auto const &it : enumTypes) {
if (vkType == std::string_view{it.name}) {
*ppStart = it.data;
*ppEnd = it.data + it.count;
*pAllowEmpty = it.allowEmpty;
return true;
}
}
return false;
}
/**
* @brief Converts a Vulkan Flag typename into the prefix that is used for it's enums
* @param typeName Name of the type to generate the Vk enum prefix for
* @return Generated prefix string
*
* Any capitalized letters except for the first has an underscore inserted before it, an underscore
* is added to the end, and all characters are converted to upper case.
*
* It also removed the 'Flags' or 'FlagBits' suffixes.
*/
std::string processEnumPrefix(std::string_view typeName) {
// Flag Bits
std::size_t flagBitsSize = strlen("FlagBits");
if (typeName.size() > flagBitsSize) {
if (strncmp(typeName.data() + typeName.size() - flagBitsSize, "FlagBits", flagBitsSize) ==
0) {
typeName = typeName.substr(0, typeName.size() - strlen("FlagBits"));
}
}
// Flags
std::size_t flagsSize = strlen("Flags");
if (typeName.size() > flagsSize) {
if (strncmp(typeName.data() + typeName.size() - flagsSize, "Flags", flagsSize) == 0) {
typeName = typeName.substr(0, typeName.size() - strlen("Flags"));
}
}
std::string retStr;
for (auto it = typeName.begin(); it != typeName.end(); ++it) {
if (it == typeName.begin()) {
retStr += ::toupper(*it);
} else if (::isupper(*it)) {
retStr += '_';
retStr += *it;
} else {
retStr += toupper(*it);
}
}
retStr += '_';
return retStr;
}
bool findValue(std::string_view findValue,
std::string_view prefix,
uint64_t *pValue,
EnumValueSet const *start,
EnumValueSet const *end) {
// Remove the vendor tag suffix if it's on the value
findValue = stripVendor(findValue);
if (findValue[findValue.size() - 1] == '_')
findValue = findValue.substr(0, findValue.size() - 1);
// Remove '_BIT' if it's there
findValue = stripBit(findValue);
// Iterate until we find the value
while (start != end) {
if (findValue == start->name) {
*pValue |= start->value;
return true;
}
std::string prefixedName{prefix};
prefixedName += start->name;
if (findValue == prefixedName) {
*pValue |= start->value;
return true;
}
++start;
}
return false;
}
/**
* @brief Takes a given string and formats it for use with parsing
* @param str The string to format
* @return Formatted string
*
* First, any non alphanumeric characters are trimmed from both ends of the string.
* After than, any spaces are replaced with underscores, and finally all the characters are
* capitalized. This will generate the string closest to the original ones found in the XML spec.
*/
std::string formatString(std::string str) {
// Trim left
std::size_t cutOffset = 0;
for (auto c : str) {
if (::isalnum(c))
break;
else
++cutOffset;
}
str = str.substr(cutOffset);
// Trim right
cutOffset = 0;
for (std::size_t i = 0; i < str.size(); ++i) {
if (::isalnum(str[i]))
cutOffset = i + 1;
}
str = str.substr(0, cutOffset);
std::replace(str.begin(), str.end(), ' ', '_');
std::for_each(str.begin(), str.end(), [](char &c) { c = ::toupper(c); });
return str;
}
bool serializeBitmask(EnumValueSet const *end,
EnumValueSet const *start,
bool allowEmpty,
uint64_t vkValue,
std::string *pString) {
--end;
--start;
if(start == end) {
// If this is a non-existing bitmask, then return an empty string
*pString = {};
return true;
}
std::string retStr;
while (start != end) {
if(vkValue == 0 && !retStr.empty()) {
break;
}
if ((start->value & vkValue) == start->value) {
// Found a compatible bit mask, add it
if (!retStr.empty()) {
retStr += " | ";
}
retStr += start->name;
vkValue = vkValue ^ start->value;
}
--start;
}
if (vkValue != 0 || (retStr.empty() && !allowEmpty)) {
// Failed to find a valid bitmask for the value
return false;
}
*pString = retStr;
return true;
}
bool serializeEnum(EnumValueSet const *start,
EnumValueSet const *end,
uint64_t vkValue,
std::string *pString) {
while (start != end) {
if (start->value == vkValue) {
*pString = start->name;
return true;
}
++start;
}
return false;
}
bool parseBitmask(std::string_view vkString,
EnumValueSet const *start,
EnumValueSet const *end,
std::string_view prefix,
uint64_t *pValue) {
uint64_t retVal = 0;
auto startCh = vkString.begin();
auto endCh = startCh;
for (; endCh != vkString.end(); ++endCh) {
if (*endCh == '|') {
std::string token(startCh, endCh);
token = formatString(token);
bool foundVal = findValue(token, prefix, &retVal, start, end);
if (!foundVal)
return false;
startCh = endCh + 1;
}
}
if (startCh != endCh) {
std::string token(startCh, endCh);
token = formatString(token);
bool foundVal = findValue(token, prefix, &retVal, start, end);
if (!foundVal)
return false;
}
*pValue = retVal;
return true;
}
bool parseEnum(std::string_view vkString,
EnumValueSet const *start,
EnumValueSet const *end,
std::string_view prefix,
uint64_t *pValue) {
uint64_t retVal = 0;
std::string token = formatString(std::string{vkString});
bool found = findValue(token, prefix, &retVal, start, end);
if (found) {
*pValue = retVal;
}
return found;
}
} // namespace
bool vk_serialize(std::string_view vkType, uint64_t vkValue, std::string *pString) {
if (vkType.empty()) {
return false;
}
EnumValueSet const *start, *end;
bool allowEmpty;
if (!getEnumType(vkType, &start, &end, &allowEmpty)) {
return false;
}
if (vkType.find("Flags") != std::string::npos || vkType.find("FlagBits") != std::string::npos) {
return serializeBitmask(start, end, allowEmpty, vkValue, pString);
}
return serializeEnum(start, end, vkValue, pString);
}
bool vk_serialize(std::string_view vkType, uint32_t vkValue, std::string *pString) {
return vk_serialize(vkType, static_cast<uint64_t>(vkValue), pString);
}
bool vk_parse(std::string_view vkType, std::string vkString, uint64_t *pValue) {
if (vkType.empty()) {
return false;
}
EnumValueSet const *start, *end;
bool allowEmpty;
if (!getEnumType(vkType, &start, &end, &allowEmpty)) {
return false;
}
if (vkString.empty()) {
if (allowEmpty) {
*pValue = 0;
return true;
} else {
return false;
}
}
std::string prefix = processEnumPrefix(stripVendor(vkType));
if (vkType.find("Flags") != std::string::npos || vkType.find("FlagBits") != std::string::npos) {
return parseBitmask(vkString, start, end, prefix, pValue);
}
return parseEnum(vkString, start, end, prefix, pValue);
}
bool vk_parse(std::string_view vkType, std::string vkString, uint32_t *pValue) {
uint64_t tempValue;
if (vk_parse(vkType, vkString, &tempValue)) {
*pValue = static_cast<uint32_t>(tempValue);
return true;
}
return false;
}
""")
# endif
outFile.write("\n#endif // VK_VALUE_SERIALIZATION_CONFIG_MAIN\n")
outFile.write("#endif // VK_VALUE_SERIALIZATION_HPP\n")
outFile.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 1.632813 | 2 |
util/n_download_util.py | TwrFyr/n-hen.py | 0 | 9988 | <reponame>TwrFyr/n-hen.py
import urllib.request
import os
from typing import List
from util.n_util import NUser
from util.n_util import get_n_entry
import time
import threading
from util.array_util import slice_array
delay: float = 2.5
class ProgressWrapper:
"""The progress wrapper keeps track of the progress of a operation by wrapping a current number and a total number.
It also wraps an optional function, which uses the current values and has to have the form 'func(current, total)'."""
def __init__(self, start, total, update):
self.current = start
self.total = total
self.update_callback = update
def update(self):
if self.update_callback is not None:
self.update_callback(self.current, self.total)
def download_images(lock, file_url_list: List[str], path: str, progress=None):
for file_url in file_url_list:
filename = os.path.join(path, file_url.split('/')[-1])
print('writing {} to {}'.format(file_url, filename))
urllib.request.urlretrieve(file_url, filename)
if progress is not None:
with lock:
progress.current += 1
progress.update()
def save_files_to_dir(file_url_list: List[str], path: str, update=None, thread_count: int = 1) -> None:
"""Saves all files represented by a list of url resources to the folder specified.
The files are being named after the last part of the url.
The number of threads can be increased to use more threads for the downloading of the images."""
# pretend to be normal user
# opener=urllib.request.build_opener()
# opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
# urllib.request.install_opener(opener)
progress = ProgressWrapper(0, len(file_url_list), update)
progress.update()
if thread_count < 1 or thread_count > 16:
print(f'invalid thread count: {thread_count} not in [1, 16]')
return
else:
lock = threading.Lock()
threads = []
for i in range(thread_count):
slices = slice_array(file_url_list, thread_count)
t = threading.Thread(target=download_images, kwargs=dict(lock=lock, file_url_list=slices[i], path=path,
progress=progress),
daemon=True)
threads.append(t)
t.start()
for t in threads:
t.join()
def download_all_favorites(n_user: NUser, base_dir: str, update_entry=None, update_page=None, thread_count=1) -> None:
"""Downloads all entries favorited by `n_user` using the number of `thread_count` threads."""
print('downloading {}\'s {} favorites...'.format(n_user.username, n_user.fav_count))
current_entry = 1
total_entries = n_user.fav_count
for min_entry in n_user.favorite_list:
if update_entry is not None:
update_entry(current_entry=min_entry, current=current_entry, total=total_entries)
# get entry data
print('downloading entry with id {}'.format(min_entry.n_id))
entry = get_n_entry(min_entry.n_id)
if entry is None:
print('no connection possible, skipping...')
current_entry += 1
continue
# check directory is valid
if not os.path.exists(base_dir):
print('base directory does not exist, aborting...')
break
save_dir = os.path.join(base_dir, entry.digits)
if os.path.exists(save_dir):
print('entry already exists, skipping...')
current_entry += 1
continue
else:
os.mkdir(save_dir)
# download images
save_files_to_dir(entry.image_url_list, save_dir, update=update_page, thread_count=thread_count)
print('waiting for {} seconds...'.format(delay))
time.sleep(delay)
current_entry += 1
if update_entry is not None:
update_entry(current_entry=None, current=current_entry, total=total_entries)
print('download finished')
| 2.46875 | 2 |
wificontrol/utils/networkstranslate.py | patrislav1/pywificontrol | 1 | 10116 | # Written by <NAME> and <NAME> <<EMAIL>>
#
# Copyright (c) 2016, Emlid Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def create_security(proto, key_mgmt, group):
if not proto:
return 'open'
if not key_mgmt:
if "wep" in group:
return 'wep'
else:
return None
else:
if "wpa-psk" in key_mgmt:
if proto == "WPA":
return "wpapsk"
elif proto == "RSN":
return "wpa2psk"
else:
return None
elif "wpa-eap" in key_mgmt:
return 'wpaeap'
else:
return None
def convert_to_wpas_network(network):
return dict(WpasNetworkConverter(network))
def convert_to_wificontrol_network(network, current_network):
wifinetwork = dict(WifiControlNetworkConverter(network))
try:
if wifinetwork['ssid'] == current_network['ssid']:
wifinetwork.update(current_network)
wifinetwork["connected"] = True
except TypeError:
pass
finally:
return wifinetwork
class WpasNetworkConverter(object):
def __init__(self, network_dict):
def rawUtf8(s):
return "{}".format(s.encode('utf-8'))[2:-1]
self.security = network_dict.get('security')
self.name = rawUtf8(network_dict.get('ssid', ''))
self.password = rawUtf8(network_dict.get('password', ''))
self.identity = rawUtf8(network_dict.get('identity', ''))
def __iter__(self):
if (self.security == 'open'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
elif (self.security == 'wep'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
yield "group", "WEP104 WEP40"
yield "wep_key0", "{}".format(self.password)
elif (self.security == 'wpapsk'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpa2psk'):
yield "ssid", "{}".format(self.name)
yield "proto", "RSN"
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpaeap'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-EAP"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "identity", "{}".format(self.identity)
yield "password", <PASSWORD>(self.password)
yield "phase1", "peaplable=0"
else:
yield "ssid", "{}".format(self.name)
yield "psk", "{}".format(self.password)
class WifiControlNetworkConverter(object):
def __init__(self, network_dict):
self.name = network_dict.get('ssid')
self.key_mgmt = network_dict.get('key_mgmt')
self.proto = network_dict.get('proto')
self.group = network_dict.get('group')
def __iter__(self):
if (self.key_mgmt == 'NONE'):
if not self.group:
yield "ssid", self.name
yield "security", "Open"
else:
yield "ssid", self.name
yield "security", "WEP"
elif (self.key_mgmt == 'WPA-PSK'):
if not self.proto:
yield "ssid", self.name
yield "security", "WPA-PSK"
else:
yield "ssid", self.name
yield "security", "WPA2-PSK"
elif (self.key_mgmt == 'WPA-EAP'):
yield "ssid", self.name
yield "security", "WPA-EAP"
else:
yield "ssid", self.name
yield "security", "NONE"
yield "connected", False
if __name__ == '__main__':
network = {'ssid': "MySSID", 'password': "<PASSWORD>", 'security': "wpaeap", 'identity': "<EMAIL>"}
conv = convert_to_wpas_network(network)
reconv = convert_to_wificontrol_network(conv)
print(conv, reconv)
| 1.28125 | 1 |
poi_mining/biz/LSA/logEntropy.py | yummydeli/machine_learning | 1 | 10244 | <filename>poi_mining/biz/LSA/logEntropy.py
#!/usr/bin/env python
# encoding:utf-8
# ##############################################################################
# The MIT License (MIT)
#
# Copyright (c) [2015] [baidu.com]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ##############################################################################
"""
生成LogEntropy矩阵并筛选出合适的词汇
"""
import glob
import collections
import pandas
from sklearn.feature_extraction.text import CountVectorizer
import math
class LogEntropy(object):
"""计算logentropy, 得到类别关键字"""
def __init__(self):
self.fnames = glob.glob('data/segs/names.*')
def extract_segs(self):
"""分词文件中获取分词结果"""
idx = []
words = []
for f in self.fnames:
lines = []
for i, line in enumerate(open(f)):
if i % 2 == 1:
non_int = '\t'.join([e for e in line.decode('GBK').rstrip('\n').split('\t') \
if not e.isdigit()])
lines.append(non_int)
words.append('\t'.join(lines))
idx.append(f.split('.')[1][1:])
return words, idx
def mk_document_term_matrix(self):
"""生成TDM矩阵"""
words, idx = self.extract_segs()
countvec = CountVectorizer()
dtm = pandas.DataFrame(countvec.fit_transform(words).toarray(),
columns=countvec.get_feature_names(),
index=idx)
"""
canting faguo riben zhongwen
1001 1 0 0 1
991 1 0 1 0
203 1 1 0 0
"""
return dtm
def global_weighting(self, dtm):
""" 1 - Entropy(words) / log(N) """
# normalized entropy for word
pdtm = (dtm / dtm.sum(axis=0))
ndocs = pdtm.shape[0]
gw = 1 + (pdtm.applymap(lambda x: x * math.log(x) if x != 0 else 0).sum() / math.log(ndocs))
"""
canting 2.220446e-16
faguo 1.000000e+00
riben 1.000000e+00
zhongwen 1.000000e+00
"""
return gw
def local_weighting(self, dtm):
""" math.log(freq + 1)"""
lw = dtm.applymap(lambda freq: math.log(freq + 1))
"""
canting faguo riben zhongwen
1001 0.693147 0.000000 0.000000 0.693147
991 0.693147 0.000000 0.693147 0.000000
203 0.693147 0.693147 0.000000 0.000000
"""
return lw
def logEntropyWeighting(self):
"""计算最终的logentropy得分"""
dtm = self.mk_document_term_matrix()
"""
canting faguo riben zhongwen
1001 1.539096e-16 0.000000 0.000000 0.693147
991 1.539096e-16 0.000000 0.693147 0.000000
203 1.539096e-16 0.693147 0.000000 0.000000
"""
logEntro = (self.global_weighting(dtm.copy()) *
self.local_weighting(dtm)).applymap(
lambda x: 0 if x < 0.001 else x
)
logEntro.T.to_csv('data/keyWords.cates', sep='\t', encoding='UTF-8')
if __name__ == '__main__':
lsaEntropy = LogEntropy()
lsaEntropy.logEntropyWeighting()
| 1.460938 | 1 |
mercury_ml/keras/containers.py | gabrieloexle/mercury-ml | 0 | 10372 | """
Simple IoC containers that provide direct access to various Keras providers
"""
class ModelSavers:
from mercury_ml.keras.providers import model_saving
save_hdf5 = model_saving.save_keras_hdf5
save_tensorflow_graph = model_saving.save_tensorflow_graph
save_tensorrt_pbtxt_config = model_saving.save_tensorrt_pbtxt_config
save_tensorrt_json_config = model_saving.save_tensorrt_json_config
save_labels_txt = model_saving.save_labels_txt
save_tensorflow_serving_predict_signature_def = model_saving.save_tensorflow_serving_predict_signature_def
class ModelLoaders:
from mercury_ml.keras.providers import model_loading
load_hdf5 = model_loading.load_hdf5_model
class LossFunctionFetchers:
from mercury_ml.keras.providers import loss_function_fetching
get_keras_loss = loss_function_fetching.get_keras_loss
get_custom_loss = loss_function_fetching.get_custom_loss
class OptimizerFetchers:
from mercury_ml.keras.providers import optimizer_fetching
get_keras_optimizer = optimizer_fetching.get_keras_optimizer
class ModelCompilers:
from mercury_ml.keras.providers import model_compilation
compile_model = model_compilation.compile_model
class ModelFitters:
from mercury_ml.keras.providers import model_fitting
fit = model_fitting.fit
fit_generator = model_fitting.fit_generator
class ModelDefinitions:
from mercury_ml.keras.providers.model_definition import conv_simple, mlp_simple
# these are just two small example model definitions. Users should define their own models
# to use as follows:
# >>> ModelDefinitions.my_model = my_model_module.define_model
define_conv_simple = conv_simple.define_model
define_mlp_simple = mlp_simple.define_model
class GeneratorPreprocessingFunctionGetters:
from mercury_ml.keras.providers.generator_preprocessors import get_random_eraser
get_random_eraser = get_random_eraser
class CallBacks:
from mercury_ml.keras.providers.model_callbacks import TensorBoardProvider, \
BaseLoggerProvider, EarlyStoppingProvider, ModelCheckpointProvider, TerminateOnNaNProvider, \
ProgbarLoggerProvider, RemoteMonitorProvider, LearningRateSchedulerProvider, ReduceLROnPlateauProvider, \
CSVLoggerProvider
tensorboard = TensorBoardProvider
base_logger = BaseLoggerProvider
terminate_on_nan = TerminateOnNaNProvider
progbar_logger = ProgbarLoggerProvider
model_checkpoint = ModelCheckpointProvider
early_stopping = EarlyStoppingProvider
remote_monitor = RemoteMonitorProvider
learning_rate_scheduler = LearningRateSchedulerProvider
reduce_lr_on_plateau = ReduceLROnPlateauProvider
csv_logger = CSVLoggerProvider
class ModelEvaluators:
from mercury_ml.keras.providers import model_evaluation
evaluate = model_evaluation.evaluate
evaluate_generator = model_evaluation.evaluate_generator
class PredictionFunctions:
from mercury_ml.keras.providers import prediction
predict = prediction.predict
predict_generator = prediction.predict_generator
| 1.335938 | 1 |
city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | 2 | 10500 | """
Django settings for city-infrastructure-platform project.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from helusers.defaults import SOCIAL_AUTH_PIPELINE # noqa: F401
from sentry_sdk.integrations.django import DjangoIntegration
from .utils import git_version
# Set up .env file
checkout_dir = environ.Path(__file__) - 2
assert os.path.exists(checkout_dir("manage.py"))
parent_dir = checkout_dir.path("..")
if parent_dir() != "/" and os.path.isdir(parent_dir("etc")):
env_file = parent_dir("etc/env")
default_var_root = parent_dir("var")
else:
env_file = checkout_dir(".env")
default_var_root = checkout_dir("var")
BASE_DIR = checkout_dir()
env = environ.Env(
DEBUG=(bool, False),
TIER=(str, "dev"), # one of: prod, qa, stage, test, dev
SECRET_KEY=(str, ""),
VAR_ROOT=(str, default_var_root),
ALLOWED_HOSTS=(list, []),
TRUST_X_FORWARDED_HOST=(bool, False),
DATABASE_URL=(
str,
"postgis:///city-infrastructure-platform",
),
CACHE_URL=(str, "locmemcache://"),
EMAIL_URL=(str, "consolemail://"),
SENTRY_DSN=(str, ""),
AZURE_DEPLOYMENT=(bool, False),
AZURE_ACCOUNT_KEY=(str, False),
AZURE_CONTAINER=(str, False),
AZURE_ACCOUNT_NAME=(str, False),
OIDC_AUTHENTICATION_ENABLED=(bool, True),
SOCIAL_AUTH_TUNNISTAMO_KEY=(str, None),
SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),
OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None),
OIDC_API_TOKEN_AUTH_ISSUER=(str, None),
TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600),
OIDC_ENDPOINT=(str, None),
HELUSERS_ADGROUPS_CLAIM=(str, "groups"),
LOGGING_AUTH_DEBUG=(bool, False),
OVERLAY_SOURCE_URL=(str, "https://geoserver.hel.fi/geoserver/city-infra/wms"),
BASEMAP_SOURCE_URL=(str, "https://kartta.hel.fi/ws/geoserver/avoindata/wms"),
STATIC_URL=(str, "/static/"),
MEDIA_URL=(str, "/media/"),
)
if os.path.exists(env_file):
env.read_env(env_file)
SOCIAL_AUTH_TUNNISTAMO_KEY = env("SOCIAL_AUTH_TUNNISTAMO_KEY")
SOCIAL_AUTH_TUNNISTAMO_SECRET = env("SOCIAL_AUTH_TUNNISTAMO_SECRET")
HELUSERS_ADGROUPS_CLAIM = env("HELUSERS_ADGROUPS_CLAIM")
SOCIAL_AUTH_ID_TOKEN_IN_END_SESSION = False
if env("OIDC_ENDPOINT"):
SOCIAL_AUTH_TUNNISTAMO_OIDC_ENDPOINT = env("OIDC_ENDPOINT")
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": env("OIDC_API_TOKEN_AUTH_AUDIENCE"),
"ISSUER": env("OIDC_API_TOKEN_AUTH_ISSUER"),
}
# General settings
DEBUG = env("DEBUG")
OIDC_AUTHENTICATION_ENABLED = env("OIDC_AUTHENTICATION_ENABLED")
TIER = env("TIER")
SECRET_KEY = env("SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "xxx"
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
if OIDC_AUTHENTICATION_ENABLED and (
not SOCIAL_AUTH_TUNNISTAMO_KEY
or not SOCIAL_AUTH_TUNNISTAMO_SECRET
or not OIDC_API_TOKEN_AUTH["AUDIENCE"]
or not OIDC_API_TOKEN_AUTH["ISSUER"]
):
raise ImproperlyConfigured("Authentication not configured properly")
CACHES = {"default": env.cache()}
vars().update(env.email_url()) # EMAIL_BACKEND etc.
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"helusers": {
"handlers": ["console"],
"level": "DEBUG" if env("LOGGING_AUTH_DEBUG") else "INFO",
"propagate": False,
},
},
}
# Application definition
DJANGO_APPS = [
"helusers",
"social_django",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"drf_yasg",
"django_filters",
"auditlog",
]
LOCAL_APPS = [
"users.apps.UsersConfig",
"traffic_control.apps.TrafficControlConfig",
"map.apps.MapConfig",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
AUTHENTICATION_BACKENDS = (
"helusers.tunnistamo_oidc.TunnistamoOIDCAuth",
"django.contrib.auth.backends.ModelBackend",
)
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/admin/"
LOGOUT_REDIRECT_URL = "/admin/login/"
SOCIAL_AUTH_TUNNISTAMO_AUTH_EXTRA_ARGUMENTS = {"ui_locales": "fi"}
WAGTAIL_SITE_NAME = _("City Infrastructure Platform")
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
MIDDLEWARE = [
"deployment.middleware.HealthCheckMiddleware",
"azure_client_ip.middleware.AzureClientIPMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"auditlog.middleware.AuditlogMiddleware",
]
ROOT_URLCONF = "city-infrastructure-platform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [checkout_dir("templates"), checkout_dir("map-view/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "city-infrastructure-platform.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fi"
LANGUAGES = [("fi", _("Finnish")), ("en", _("English"))]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
var_root = env.path("VAR_ROOT")
STATIC_ROOT = var_root("static")
MEDIA_ROOT = var_root("media")
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [checkout_dir("map-view/build/static")]
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Django REST Framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"helusers.oidc.ApiTokenAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"PAGE_SIZE": 20,
"OIDC_LEEWAY": env("TOKEN_AUTH_MAX_TOKEN_AGE"),
"GROUP_CLAIM_NAME": "groups",
}
# django-cors
if DEBUG:
CORS_ORIGIN_ALLOW_ALL = True
# Azure CLIENT_IP middleware
AZURE_DEPLOYMENT = env.bool("AZURE_DEPLOYMENT")
if AZURE_DEPLOYMENT:
AZURE_ACCOUNT_KEY = env.str("AZURE_ACCOUNT_KEY")
AZURE_CONTAINER = env.str("AZURE_CONTAINER")
AZURE_ACCOUNT_NAME = env.str("AZURE_ACCOUNT_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage"
# Sentry-SDK
SENTRY_DSN = env.str("SENTRY_DSN")
VERSION = git_version()
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=VERSION)
# Custom settings
SRID = 3879 # the spatial reference id used for geometries
OVERLAY_SOURCE_URL = env.str("OVERLAY_SOURCE_URL")
BASEMAP_SOURCE_URL = env.str("BASEMAP_SOURCE_URL")
LOCALE_PATHS = [
"./templates/locale",
]
| 1.03125 | 1 |
cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 117 | 10628 | <gh_stars>100-1000
"""Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package."""
| 0.261719 | 0 |
run.py | romeroyakovlev/ii | 1 | 10756 | # -*- coding: utf-8 -*-
import api,points
from api.bottle import *
II_PATH=os.path.dirname(__file__) or '.'
TEMPLATE_PATH.insert(0,II_PATH)
@route('/list.txt')
def list_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
lst = api.load_echo(False)[1:]
if request.query.n:
return '\n'.join([t[0] for t in lst])
else:
return '\n'.join(['%s:%s:%s' % t for t in lst])
@route('/blacklist.txt')
def blacklist_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
return api.ru('blacklist.txt')
@route('/u/m/<h:path>')
def jt_outmsg(h):
response.set_header ('content-type','text/plain; charset=iso-8859-1')
lst = [x for x in h.split('/') if len(x) == 20]
return '\n'.join( [api.mk_jt(x,api.raw_msg(x)) for x in lst] )
@route('/u/e/<names:path>')
def index_list(names):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.echoareas(names.split('/'))
def _point_msg(pauth,tmsg):
msgfrom, addr = points.check_hash(pauth)
if not addr: return 'auth error!'
cfg = api.load_echo(False)
mo = api.toss(msgfrom,'%s,%s' % (cfg[0][1],addr),tmsg.strip())
if mo.msg.startswith('@repto:'):
tmpmsg = mo.msg.splitlines()
mo.repto = tmpmsg[0][7:]
mo.msg = '\n'.join(tmpmsg[1:])
# а ещё лучше - засунуть это в api.toss
if len(mo.msg.encode('utf-8')) < 64100:
h = api.point_newmsg(mo)
if h:
return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea)
else:
return 'error:unknown'
else:
return 'msg big!'
@route('/u/point/<pauth>/<tmsg:path>')
def point_msg_get(pauth,tmsg):
return _point_msg(pauth,tmsg)
@post('/u/point')
def point_msg_get():
return _point_msg(request.POST['pauth'],request.POST['tmsg'])
@route('/m/<msg>')
def get_msg(msg):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.raw_msg(msg)
@route('/e/<echoarea>')
def get_echolist(echoarea):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.get_echoarea(echoarea,True)
import iitpl
iitpl.II_PATH=II_PATH
run(host='127.0.0.1',port=62220,debug=False)
| 1.4375 | 1 |
src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | 0 | 10884 | import csv
import joblib
from sklearn.metrics import accuracy_score
data = []
features = []
targets = []
feature_names = []
users = []
with open('satisfaction_feature_names.csv') as name_file:
column_name_file = csv.reader(name_file)
feature_names = next(column_name_file)[2:394]
with open('cza_satisfaction_train_0922.csv') as data_file:
csv_file = csv.reader(data_file)
idx = 0
for content in csv_file:
idx = idx + 1
if idx <= 10000:
continue
if idx > 50000:
break
content = content[:2] + list(map(float, content[2:]))
if len(content) != 0:
data.append(content)
features.append(content[2:394])
targets.append(content[-1])
users.append(content[1])
clf, sorted_feature_scores = joblib.load("cza_rf.pkl")
predict_result = clf.predict(features)
print(sorted_feature_scores)
print(accuracy_score(predict_result, targets))
result = list(zip(users, predict_result))
print(result[:10])
print(sum(predict_result))
print(sum([flag[1] for flag in result]))
with open("rf_predict_result.csv", "w", encoding="UTF-8") as w_file:
result_file = csv.writer(w_file)
for idx, row in enumerate(result):
if idx > 10:
break
row = list(row)
row.insert(0, 20200928)
result_file.writerow(row)
| 2.203125 | 2 |
tests/plot_profile/test_utils.py | mizeller/plot_profile | 0 | 11012 | """Test module ``plot_profile/utils.py``."""
# Standard library
import logging
# First-party
from plot_profile.utils import count_to_log_level
def test_count_to_log_level():
assert count_to_log_level(0) == logging.ERROR
assert count_to_log_level(1) == logging.WARNING
assert count_to_log_level(2) == logging.INFO
assert count_to_log_level(3) == logging.DEBUG
| 1.289063 | 1 |
233_number_of_digt_one.py | gengwg/leetcode | 2 | 11140 | <gh_stars>1-10
# Given an integer n, count the total number of digit 1 appearing
# in all non-negative integers less than or equal to n.
#
# For example:
# Given n = 13,
# Return 6, because digit 1 occurred in the following numbers:
# 1, 10, 11, 12, 13.
#
class Solution:
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
# sum all the '1's inside the n numbers
count = 0
for i in range(1, n+1): # count including n
count += self.numberOfDigitOne(i)
return count
def numberOfDigitOne(self, n):
"""
function to count number of digit ones in a number n.
mod by 10 to test if 1st digit is 1;
then divide by 10 to get next digit;
next test if next digit is 1.
"""
result = 0
while n:
if n % 10 == 1:
result += 1
n = n / 10
return result
if __name__ == "__main__":
print Solution().countDigitOne(13)
| 3.21875 | 3 |
core/migrations/0009_measurement.py | Potanist/Potanist | 0 | 11268 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_grow_owner'),
]
operations = [
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('air_temperature', models.IntegerField(null=True, blank=True)),
('water_temperature', models.IntegerField(null=True, blank=True)),
('humidity', models.IntegerField(null=True, blank=True)),
('co2', models.IntegerField(null=True, blank=True)),
('ppm', models.IntegerField(null=True, blank=True)),
('tds', models.IntegerField(null=True, blank=True)),
('ec', models.IntegerField(null=True, blank=True)),
('ph', models.IntegerField(null=True, blank=True)),
('lumen', models.IntegerField(null=True, blank=True)),
('plant', models.ForeignKey(to='core.Plant')),
],
),
]
| 0.785156 | 1 |
05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 0 | 11396 | a = int(input())
for i in range(a):
print('* '*(a-a//2))
print(' *'*(a//2)) | 1.617188 | 2 |
simple_ddl_parser/tokens.py | burakuyar/simple-ddl-parser | 46 | 11524 | # statements that used at the start of defenition or in statements without columns
defenition_statements = {
"DROP": "DROP",
"CREATE": "CREATE",
"TABLE": "TABLE",
"DATABASE": "DATABASE",
"SCHEMA": "SCHEMA",
"ALTER": "ALTER",
"TYPE": "TYPE",
"DOMAIN": "DOMAIN",
"REPLACE": "REPLACE",
"OR": "OR",
"CLUSTERED": "CLUSTERED",
"SEQUENCE": "SEQUENCE",
"TABLESPACE": "TABLESPACE",
}
common_statements = {
"INDEX": "INDEX",
"REFERENCES": "REFERENCES",
"KEY": "KEY",
"ADD": "ADD",
"AS": "AS",
"CLONE": "CLONE",
"DEFERRABLE": "DEFERRABLE",
"INITIALLY": "INITIALLY",
"IF": "IF",
"NOT": "NOT",
"EXISTS": "EXISTS",
"ON": "ON",
"FOR": "FOR",
"ENCRYPT": "ENCRYPT",
"SALT": "SALT",
"NO": "NO",
"USING": "USING",
# bigquery
"OPTIONS": "OPTIONS",
}
columns_defenition = {
"DELETE": "DELETE",
"UPDATE": "UPDATE",
"NULL": "NULL",
"ARRAY": "ARRAY",
",": "COMMA",
"DEFAULT": "DEFAULT",
"COLLATE": "COLLATE",
"ENFORCED": "ENFORCED",
"ENCODE": "ENCODE",
"GENERATED": "GENERATED",
"COMMENT": "COMMENT",
}
first_liners = {
"LIKE": "LIKE",
"CONSTRAINT": "CONSTRAINT",
"FOREIGN": "FOREIGN",
"PRIMARY": "PRIMARY",
"UNIQUE": "UNIQUE",
"CHECK": "CHECK",
"WITH": "WITH",
}
common_statements.update(first_liners)
defenition_statements.update(common_statements)
after_columns_tokens = {
"PARTITIONED": "PARTITIONED",
"PARTITION": "PARTITION",
"BY": "BY",
# hql
"INTO": "INTO",
"STORED": "STORED",
"LOCATION": "LOCATION",
"ROW": "ROW",
"FORMAT": "FORMAT",
"TERMINATED": "TERMINATED",
"COLLECTION": "COLLECTION",
"ITEMS": "ITEMS",
"MAP": "MAP",
"KEYS": "KEYS",
"SERDE": "SERDE",
"CLUSTER": "CLUSTER",
"SERDEPROPERTIES": "SERDEPROPERTIES",
"TBLPROPERTIES": "TBLPROPERTIES",
"SKEWED": "SKEWED",
# oracle
"STORAGE": "STORAGE",
"TABLESPACE": "TABLESPACE",
# mssql
"TEXTIMAGE_ON": "TEXTIMAGE_ON",
}
sequence_reserved = {
"INCREMENT": "INCREMENT",
"START": "START",
"MINVALUE": "MINVALUE",
"MAXVALUE": "MAXVALUE",
"CACHE": "CACHE",
"NO": "NO",
}
tokens = tuple(
set(
["ID", "DOT", "STRING", "DQ_STRING", "LP", "RP", "LT", "RT", "COMMAT"]
+ list(defenition_statements.values())
+ list(common_statements.values())
+ list(columns_defenition.values())
+ list(sequence_reserved.values())
+ list(after_columns_tokens.values())
)
)
symbol_tokens = {
")": "RP",
"(": "LP",
}
symbol_tokens_no_check = {"<": "LT", ">": "RT"}
| 1.625 | 2 |
jinahub/indexers/storage/PostgreSQLStorage/tests/test_postgres_dbms.py | Taekyoon/executors | 29 | 11652 | <filename>jinahub/indexers/storage/PostgreSQLStorage/tests/test_postgres_dbms.py
import os
import time
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina.logging.profile import TimeContext
from jina_commons.indexers.dump import import_metas, import_vectors
from ..postgres_indexer import PostgreSQLStorage
from ..postgreshandler import doc_without_embedding
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d "
f"--remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down "
f"--remove-orphans"
)
d_embedding = np.array([1, 1, 1, 1, 1, 1, 1])
c_embedding = np.array([2, 2, 2, 2, 2, 2, 2])
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
if _port is not None and _port not in used_ports:
used_ports.add(_port)
return _port
raise Exception('no port available')
mocker.patch('jina.helper.random_port', new_callable=lambda: _random_port)
def get_documents(chunks, same_content, nr=10, index_start=0, same_tag_content=None):
next_chunk_id = nr + index_start
for i in range(index_start, nr + index_start):
d = Document()
d.id = i
if same_content:
d.text = 'hello world'
d.embedding = np.random.random(d_embedding.shape)
else:
d.text = f'hello world {i}'
d.embedding = np.random.random(d_embedding.shape)
if same_tag_content:
d.tags['field'] = 'tag data'
elif same_tag_content is False:
d.tags['field'] = f'tag data {i}'
for j in range(chunks):
c = Document()
c.id = next_chunk_id
if same_content:
c.text = 'hello world from chunk'
c.embedding = np.random.random(c_embedding.shape)
else:
c.text = f'hello world from chunk {j}'
c.embedding = np.random.random(c_embedding.shape)
if same_tag_content:
c.tags['field'] = 'tag data'
elif same_tag_content is False:
c.tags['field'] = f'tag data {next_chunk_id}'
next_chunk_id += 1
d.chunks.append(c)
yield d
def validate_db_side(postgres_indexer, expected_data):
ids, vecs, metas = zip(*expected_data)
with postgres_indexer.handler as handler:
cursor = handler.connection.cursor()
cursor.execute(
f'SELECT doc_id, embedding, doc from {postgres_indexer.table} ORDER BY '
f'doc_id::int'
)
record = cursor.fetchall()
for i in range(len(expected_data)):
np.testing.assert_equal(ids[i], str(record[i][0]))
embedding = np.frombuffer(record[i][1], dtype=postgres_indexer.dump_dtype)
np.testing.assert_equal(vecs[i], embedding)
np.testing.assert_equal(metas[i], bytes(record[i][2]))
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[1] / 'config.yml'), override_with={'dry_run': True}
)
assert ex.username == 'postgres'
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_postgres(tmpdir, docker_compose):
postgres_indexer = PostgreSQLStorage()
NR_DOCS = 10000
original_docs = DocumentArray(
list(get_documents(nr=NR_DOCS, chunks=0, same_content=False))
)
postgres_indexer.delete(original_docs, {})
with TimeContext(f'### indexing {len(original_docs)} docs'):
postgres_indexer.add(original_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS)
info_original_docs = [
(doc.id, doc.embedding, doc_without_embedding(doc)) for doc in original_docs
]
validate_db_side(postgres_indexer, info_original_docs)
new_docs = DocumentArray(
list(get_documents(chunks=False, nr=10, same_content=True))
)
postgres_indexer.update(new_docs, {})
info_new_docs = [
(doc.id, doc.embedding, doc_without_embedding(doc)) for doc in new_docs
]
ids, vecs, metas = zip(*info_new_docs)
expected_info = [(ids[0], vecs[0], metas[0])]
validate_db_side(postgres_indexer, expected_info)
postgres_indexer.delete(new_docs, {})
np.testing.assert_equal(postgres_indexer.size, len(original_docs) - len(new_docs))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mwu_empty_dump(tmpdir, docker_compose):
f = Flow().add(uses=PostgreSQLStorage)
with f:
resp = f.post(
on='/index', inputs=DocumentArray([Document()]), return_results=True
)
print(f'{resp}')
dump_path = os.path.join(tmpdir, 'dump')
with f:
f.post(
on='/dump',
parameters={'dump_path': os.path.join(tmpdir, 'dump'), 'shards': 1},
)
# assert dump contents
ids, vecs = import_vectors(dump_path, pea_id='0')
assert ids is not None
ids, metas = import_metas(dump_path, pea_id='0')
assert vecs is not None
assert metas is not None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_return_embeddings(docker_compose):
indexer = PostgreSQLStorage()
doc = Document(embedding=np.random.random(10))
da = DocumentArray([doc])
query1 = DocumentArray([Document(id=doc.id)])
indexer.add(da, parameters={})
indexer.search(query1, parameters={})
assert query1[0].embedding is not None
assert query1[0].embedding.shape == (10,)
query2 = DocumentArray([Document(id=doc.id)])
indexer.search(query2, parameters={"return_embeddings": False})
assert query2[0].embedding is None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_get_documents(docker_compose):
indexer = PostgreSQLStorage()
NR = 10
docs = DocumentArray(
list(
get_documents(
nr=NR,
chunks=0,
same_content=False,
)
)
)
indexer.add(docs)
assert len(list(indexer.get_document_iterator())) == NR
indexer.delete(docs)
assert len(list(indexer.get_document_iterator())) == 0
assert indexer.size == 0
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_clear(docker_compose):
indexer = PostgreSQLStorage()
NR = 10
docs = DocumentArray(
list(
get_documents(
nr=NR,
chunks=0,
same_content=False,
)
)
)
indexer.add(docs)
assert len(list(indexer.get_document_iterator())) == NR
indexer.clear()
assert indexer.size == 0
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
@pytest.mark.parametrize('psql_virtual_shards', [44, 128])
@pytest.mark.parametrize('real_shards', [1, 5])
def test_snapshot(docker_compose, psql_virtual_shards, real_shards):
postgres_indexer = PostgreSQLStorage(virtual_shards=psql_virtual_shards)
def _assert_snapshot_shard_distribution(func, nr_shards, total_docs_expected):
total_docs = 0
for i in range(nr_shards):
data = func(shard_id=i, total_shards=nr_shards)
docs_this_shard = len(list(data))
assert docs_this_shard >= postgres_indexer.virtual_shards // real_shards
total_docs += docs_this_shard
np.testing.assert_equal(total_docs, total_docs_expected)
NR_SHARDS = real_shards
NR_DOCS = postgres_indexer.virtual_shards * 2 + 3
original_docs = DocumentArray(
list(get_documents(nr=NR_DOCS, chunks=0, same_content=False))
)
NR_NEW_DOCS = 30
new_docs = DocumentArray(
list(
get_documents(
nr=NR_NEW_DOCS, index_start=NR_DOCS, chunks=0, same_content=False
)
)
)
# make sure to cleanup if the PSQL instance is kept running
postgres_indexer.delete(original_docs, {})
postgres_indexer.delete(new_docs, {})
# indexing the documents
postgres_indexer.add(original_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS)
# create a snapshot
postgres_indexer.snapshot()
# data added the snapshot will not be part of the export
postgres_indexer.add(new_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS + NR_NEW_DOCS)
np.testing.assert_equal(postgres_indexer.snapshot_size, NR_DOCS)
_assert_snapshot_shard_distribution(
postgres_indexer.get_snapshot, NR_SHARDS, NR_DOCS
)
# create another snapshot
postgres_indexer.snapshot()
timestamp = postgres_indexer.last_snapshot_timestamp
# docs for the delta resolving
NR_DOCS_DELTA = 33
docs_delta = DocumentArray(
list(
get_documents(
nr=NR_DOCS_DELTA,
index_start=NR_DOCS + NR_NEW_DOCS,
chunks=0,
same_content=False,
)
)
)
time.sleep(3)
postgres_indexer.add(docs_delta, {})
np.testing.assert_equal(
postgres_indexer.size, NR_DOCS + NR_NEW_DOCS + NR_DOCS_DELTA
)
np.testing.assert_equal(postgres_indexer.snapshot_size, NR_DOCS + NR_NEW_DOCS)
NR_DOCS_DELTA_DELETED = 10
docs_delta_deleted = DocumentArray(
list(
get_documents(
nr=NR_DOCS_DELTA_DELETED, index_start=0, chunks=0, same_content=False
)
)
)
postgres_indexer.delete(docs_delta_deleted, {'soft_delete': True})
_assert_snapshot_shard_distribution(
postgres_indexer.get_snapshot,
NR_SHARDS,
NR_DOCS + NR_NEW_DOCS,
)
# we use total_shards=1 in order to guarantee getting all the data in the delta
deltas = postgres_indexer.get_delta_updates(
shard_id=0, total_shards=1, timestamp=timestamp
)
deltas = list(deltas)
np.testing.assert_equal(len(deltas), NR_DOCS_DELTA + NR_DOCS_DELTA_DELETED)
def test_postgres_shard_distribution():
assert ['0'] == PostgreSQLStorage._vshards_to_get(0, 3, 5)
assert ['1'] == PostgreSQLStorage._vshards_to_get(1, 3, 5)
assert ['2', '3', '4'] == PostgreSQLStorage._vshards_to_get(2, 3, 5)
assert [str(s) for s in range(5)] == PostgreSQLStorage._vshards_to_get(0, 1, 5)
with pytest.raises(ValueError):
PostgreSQLStorage._vshards_to_get(1, 1, 5)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_save_get_trained_model(docker_compose):
postgres_indexer = PostgreSQLStorage()
model = np.random.random((100, 5)).tobytes()
postgres_indexer.save_trained_model(model, None)
trained_model, trained_model_checksum = postgres_indexer.get_trained_model()
assert trained_model == model
assert trained_model_checksum is None
postgres_indexer.save_trained_model(model, 'sha256:hello')
trained_model, trained_model_checksum = postgres_indexer.get_trained_model()
assert trained_model == model
assert trained_model_checksum == 'sha256:hello'
| 1.351563 | 1 |
tensornetwork/backends/backend_test.py | ashoknar/TensorNetwork | 0 | 11780 | <gh_stars>0
"""Tests for graphmode_tensornetwork."""
import builtins
import sys
import pytest
import numpy as np
from tensornetwork import connect, contract, Node
from tensornetwork.backends.base_backend import BaseBackend
from tensornetwork.backends import backend_factory
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
#never do this outside testing
clean_tensornetwork_modules()
yield # use as teardown
clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
# pylint: disable=redefined-builtin
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
# Nuke the cache.
backend_factory._INSTANTIATED_BACKENDS = dict()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
#not sure why config is imported here?
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork.config
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
#pylint: disable=reimported
import tensornetwork
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.pytorch.pytorch_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.tensorflow.tensorflow_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.jax.jax_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
a = Node(np.ones((10,)), backend="numpy")
b = Node(np.ones((10,)), backend="numpy")
edge = connect(a[0], b[0])
final_node = contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="jax")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="tensorflow")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="pytorch")
def test_base_backend_name():
backend = BaseBackend()
assert backend.name == "base backend"
def test_base_backend_tensordot_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.tensordot(np.ones((2, 2)), np.ones((2, 2)), axes=[[0], [0]])
def test_base_backend_reshape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.reshape(np.ones((2, 2)), (4, 1))
def test_base_backend_transpose_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.transpose(np.ones((2, 2)), [0, 1])
def test_base_backend_slice_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.slice(np.ones((2, 2)), (0, 1), (1, 1))
def test_base_backend_svd_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.svd_decomposition(np.ones((2, 2)), 0)
def test_base_backend_qr_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.qr_decomposition(np.ones((2, 2)), 0)
def test_base_backend_rq_decompositon_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.rq_decomposition(np.ones((2, 2)), 0)
def test_base_backend_shape_concat_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_concat([np.ones((2, 2)), np.ones((2, 2))], 0)
def test_base_backend_shape_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tensor(np.ones((2, 2)))
def test_base_backend_shape_tuple_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_tuple(np.ones((2, 2)))
def test_base_backend_shape_prod_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.shape_prod(np.ones((2, 2)))
def test_base_backend_sqrt_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sqrt(np.ones((2, 2)))
def test_base_backend_diag_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.diag(np.ones((2, 2)))
def test_base_backend_convert_to_tensor_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.convert_to_tensor(np.ones((2, 2)))
def test_base_backend_trace_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.trace(np.ones((2, 2)))
def test_base_backend_outer_product_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.outer_product(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_einsul_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.einsum("ii", np.ones((2, 2)))
def test_base_backend_norm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.norm(np.ones((2, 2)))
def test_base_backend_eye_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eye(2, dtype=np.float64)
def test_base_backend_ones_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.ones((2, 2), dtype=np.float64)
def test_base_backend_zeros_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.zeros((2, 2), dtype=np.float64)
def test_base_backend_randn_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.randn((2, 2))
def test_base_backend_random_uniforl_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.random_uniform((2, 2))
def test_base_backend_conj_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.conj(np.ones((2, 2)))
def test_base_backend_eigh_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigh(np.ones((2, 2)))
def test_base_backend_eigs_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigs(np.ones((2, 2)))
def test_base_backend_eigs_lanczos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.eigsh_lanczos(lambda x: x, np.ones((2)))
def test_base_backend_addition_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.addition(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_subtraction_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.subtraction(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_multiply_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.multiply(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_divide_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.divide(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_index_update_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.index_update(np.ones((2, 2)), np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_inv_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.inv(np.ones((2, 2)))
def test_base_backend_sin_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sin(np.ones((2, 2)))
def test_base_backend_cos_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.cos(np.ones((2, 2)))
def test_base_backend_exp_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.exp(np.ones((2, 2)))
def test_base_backend_log_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.log(np.ones((2, 2)))
def test_base_backend_expm_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.expm(np.ones((2, 2)))
def test_base_backend_sparse_shape_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.sparse_shape(np.ones((2, 2)))
def test_base_backend_broadcast_right_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_right_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_base_backend_broadcast_left_multiplication_not_implemented():
backend = BaseBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_left_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_backend_instantiation(backend):
backend1 = backend_factory.get_backend(backend)
backend2 = backend_factory.get_backend(backend)
assert backend1 is backend2
| 1.25 | 1 |
tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py | aaron-parsons/pymalcolm | 0 | 11908 | <reponame>aaron-parsons/pymalcolm
from mock import Mock
from malcolm.testutil import ChildTestCase
from malcolm.modules.ADPandABlocks.blocks import pandablocks_runnable_block
class TestADPandABlocksBlocks(ChildTestCase):
def test_pandablocks_runnable_block(self):
self.create_child_block(
pandablocks_runnable_block, Mock(),
mri_prefix="mri_prefix", pv_prefix="pv_prefix", config_dir="/tmp")
| 1.085938 | 1 |
validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | 136 | 12036 | <filename>validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py
"""Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import sys
import anuga
from anuga import myid, finalize, distribute
from anuga import Domain as Domain
from math import cos
from numpy import zeros, ones, array, interp, polyval, ones_like, zeros_like
from numpy import where, logical_and
from time import localtime, strftime, gmtime
from scipy.interpolate import interp1d
from anuga.geometry.polygon import inside_polygon, is_inside_triangle
#from balanced_dev import *
#-------------------------------------------------------------------------------
# Copy scripts to time stamped output directory and capture screen
# output to file
#-------------------------------------------------------------------------------
time = strftime('%Y%m%d_%H%M%S',localtime())
#output_dir = 'varying_width'+time
output_dir = '.'
output_file = 'varying_width'
#anuga.copy_code_files(output_dir,__file__)
#start_screen_catcher(output_dir+'_')
args = anuga.get_args()
alg = args.alg
verbose = args.verbose
#------------------------------------------------------------------------------
# Setup domain
#------------------------------------------------------------------------------
dx = 1.
dy = dx
L = 1500.
W = 60.
#===============================================================================
# Create sequential domain
#===============================================================================
if myid == 0:
# structured mesh
points, vertices, boundary = anuga.rectangular_cross(int(L/dx), int(W/dy), L, W, (0.,-W/2.))
#domain = anuga.Domain(points, vertices, boundary)
domain = Domain(points, vertices, boundary)
domain.set_name(output_file)
domain.set_datadir(output_dir)
#------------------------------------------------------------------------------
# Setup Algorithm, either using command line arguments
# or override manually yourself
#------------------------------------------------------------------------------
domain.set_flow_algorithm(alg)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
domain.set_quantity('friction', 0.0)
domain.set_quantity('stage', 12.0)
XX = array([0.,50.,100.,150.,250.,300.,350.,400.,425.,435.,450.,470.,475.,500.,
505.,530.,550.,565.,575.,600.,650.,700.,750.,800.,820.,900.,950.,
1000.,1500.])
ZZ = array([0.,0.,2.5,5.,5.,3.,5.,5.,7.5,8.,9.,9.,9.,9.1,9.,9.,6.,5.5,5.5,5.,
4.,3.,3.,2.3,2.,1.2,0.4,0.,0.])
WW = array([40.,40.,30.,30.,30.,30.,25.,25.,30.,35.,35.,40.,40.,40.,45.,45.,50.,
45.,40.,40.,30.,40.,40.,5.,40.,35.,25.,40.,40.])/2.
depth = interp1d(XX, ZZ)
width = interp1d(XX, WW)
def bed_elevation(x,y):
z = 25.0*ones_like(x)
wid = width(x)
dep = depth(x)
z = where( logical_and(y < wid, y>-wid), dep, z)
return z
domain.set_quantity('elevation', bed_elevation)
else:
domain = None
#===========================================================================
# Create Parallel domain
#===========================================================================
domain = distribute(domain)
#-----------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
from math import sin, pi, exp
Br = anuga.Reflective_boundary(domain) # Solid reflective wall
#Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary
#Bd = anuga.Dirichlet_boundary([1,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Produce a documentation of parameters
#------------------------------------------------------------------------------
if myid == 0:
parameter_file=open('parameters.tex', 'w')
parameter_file.write('\\begin{verbatim}\n')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.write('\\end{verbatim}\n')
parameter_file.close()
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
import time
t0 = time.time()
for t in domain.evolve(yieldstep = 0.1, finaltime = 5.0):
#print(domain.timestepping_statistics(track_speeds=True))
if myid == 0 and verbose: print(domain.timestepping_statistics())
#vis.update()
if myid == 0 and verbose: print('That took %s sec' % str(time.time()-t0))
domain.sww_merge(delete_old=True)
finalize()
| 2.265625 | 2 |
1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py | jiadaizhao/LeetCode | 49 | 12164 | <filename>1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py
import collections
from itertools import combinations
from collections import Counter
class Solution:
def mostVisitedPattern(self, username: List[str], timestamp: List[int], website: List[str]) -> List[str]:
visit = collections.defaultdict(list)
for t, u, w in sorted(zip(timestamp, username, website)):
visit[u].append(w)
table = sum([Counter(set(combinations(w, 3))) for w in visit.values()], Counter())
return list(min(table, key=lambda k: (-table[k], k)))
| 2.515625 | 3 |
src/python/deepseq2.py | yotamfr/prot2vec | 8 | 12292 | import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from src.python.baselines import *
from pymongo import MongoClient
from tqdm import tqdm
import tensorflow as tf
### Keras
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Activation
from keras.layers import Conv2D, Conv1D
from keras.layers import Dropout, BatchNormalization
from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import Concatenate, Flatten, Reshape
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler
# from keras.losses import hinge, binary_crossentropy
from keras import backend as K
from sklearn.metrics import log_loss
import math
import argparse
sess = tf.Session()
K.set_session(sess)
LR = 0.001
BATCH_SIZE = 32
LONG_EXPOSURE = True
t0 = datetime(2014, 1, 1, 0, 0)
t1 = datetime(2014, 9, 1, 0, 0)
MAX_LENGTH = 2000
MIN_LENGTH = 30
def get_classes(db, onto, start=t0, end=t1):
q1 = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$lte": start},
'Aspect': ASPECT}
q2 = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$gt": start, "$lte": end},
'Aspect': ASPECT}
def helper(q):
seq2go, _ = GoAnnotationCollectionLoader(
db.goa_uniprot.find(q), db.goa_uniprot.count(q), ASPECT).load()
for i, (k, v) in enumerate(seq2go.items()):
sys.stdout.write("\r{0:.0f}%".format(100.0 * i / len(seq2go)))
seq2go[k] = onto.propagate(v)
return reduce(lambda x, y: set(x) | set(y), seq2go.values(), set())
return onto.sort(helper(q1) | helper(q2))
def get_training_and_validation_streams(db, limit=None):
q_train = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$lte": t0},
'Aspect': ASPECT}
seq2go_trn, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_train), db.goa_uniprot.count(q_train), ASPECT).load()
query = {"_id": {"$in": unique(list(seq2go_trn.keys())).tolist()}}
count = limit if limit else db.uniprot.count(query)
source = db.uniprot.find(query).batch_size(10)
if limit: source = source.limit(limit)
stream_trn = DataStream(source, count, seq2go_trn)
q_valid = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$gt": t0, "$lte": t1},
'Aspect': ASPECT}
seq2go_tst, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_valid), db.goa_uniprot.count(q_valid), ASPECT).load()
query = {"_id": {"$in": unique(list(seq2go_tst.keys())).tolist()}}
count = limit if limit else db.uniprot.count(query)
source = db.uniprot.find(query).batch_size(10)
if limit: source = source.limit(limit)
stream_tst = DataStream(source, count, seq2go_tst)
return stream_trn, stream_tst
class DataStream(object):
def __init__(self, source, count, seq2go):
self._count = count
self._source = source
self._seq2go = seq2go
def __iter__(self):
count = self._count
source = self._source
seq2go = self._seq2go
for k, seq in UniprotCollectionLoader(source, count):
if not MIN_LENGTH <= len(seq) <= MAX_LENGTH:
continue
x = [AA.aa2index[aa] for aa in seq]
yield k, x, seq2go[k]
def __len__(self):
return self._count
def step_decay(epoch):
initial_lrate = LR
drop = 0.5
epochs_drop = 1.0
lrate = max(0.0001, initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop)))
return lrate
def OriginalIception(inpt, num_channels=64):
# tower_0 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 3, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_2)
# tower_3 = MaxPooling1D(3, padding='same')(inpt)
# tower_3 = Conv1D(num_channels, 1, padding='same')(tower_3)
return Concatenate(axis=2)([tower_1, tower_2,])
def LargeInception(inpt, num_channels=64):
tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(inpt)
tower_1 = BatchNormalization()(tower_1)
tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(inpt)
tower_2 = BatchNormalization()(tower_2)
tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(tower_2)
return Concatenate(axis=2)([tower_1, tower_2])
def SmallInception(inpt, num_channels=150):
tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_1)
# tower_1 = BatchNormalization()(tower_1)
tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(num_channels, 15, padding='same', activation='relu')(tower_2)
# tower_2 = BatchNormalization()(tower_2)
return Concatenate(axis=2)([tower_1, tower_2])
def Classifier(inp1d, classes):
out = Dense(len(classes))(inp1d)
out = BatchNormalization()(out)
out = Activation('sigmoid')(out)
return out
def MotifNet(classes, opt):
inpt = Input(shape=(None,))
out = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
out = Conv1D(250, 15, activation='relu', padding='valid')(out)
out = Dropout(0.2)(out)
out = Conv1D(100, 15, activation='relu', padding='valid')(out)
out = SmallInception(out)
out = Dropout(0.2)(out)
out = SmallInception(out)
out = Dropout(0.2)(out)
out = Conv1D(250, 5, activation='relu', padding='valid')(out)
out = Dropout(0.2)(out)
out = Classifier(GlobalMaxPooling1D()(out), classes)
model = Model(inputs=[inpt], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def Inception(inpt, tower1=6, tower2=10):
tower_1 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(64, tower1, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(64, tower2, padding='same', activation='relu')(tower_2)
# tower_3 = MaxPooling1D(3, strides=1, padding='same')(inpt)
# tower_3 = Conv1D(64, 1, padding='same', activation='relu')(tower_3)
return Concatenate(axis=2)([tower_1, tower_2])
def ProteinInception(classes, opt):
inpt = Input(shape=(None,))
img = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
feats = Inception(Inception(img))
out = Classifier(GlobalMaxPooling1D()(feats), classes)
model = Model(inputs=[inpt], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def Features(inpt):
feats = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = GlobalMaxPooling1D()(feats)
return feats
def DeeperSeq(classes, opt):
inp = Input(shape=(None,))
out = Classifier(Features(inp), classes)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def batch_generator(stream, onto, classes):
s_cls = set(classes)
data = dict()
def labels2vec(lbl):
y = np.zeros(len(classes))
for go in onto.propagate(lbl, include_root=False):
if go not in s_cls:
continue
y[classes.index(go)] = 1
return y
def pad_seq(seq, max_length=MAX_LENGTH):
delta = max_length - len(seq)
left = [PAD for _ in range(delta // 2)]
right = [PAD for _ in range(delta - delta // 2)]
seq = left + seq + right
return np.asarray(seq)
def prepare_batch(sequences, labels):
b = max(map(len, sequences)) + 100
Y = np.asarray([labels2vec(lbl) for lbl in labels])
X = np.asarray([pad_seq(seq, b) for seq in sequences])
return X, Y
for k, x, y in stream:
lx = len(x)
if lx in data:
data[lx].append([k, x, y])
ids, seqs, lbls = zip(*data[lx])
if len(seqs) == BATCH_SIZE:
yield ids, prepare_batch(seqs, lbls)
del data[lx]
else:
data[lx] = [[k, x, y]]
for packet in data.values():
ids, seqs, lbls = zip(*packet)
yield ids, prepare_batch(seqs, lbls)
class LossHistory(Callback):
def __init__(self):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def train(model, gen_xy, length_xy, epoch, num_epochs,
history=LossHistory(), lrate=LearningRateScheduler(step_decay)):
pbar = tqdm(total=length_xy)
for _, (X, Y) in gen_xy:
model.fit(x=X, y=Y,
batch_size=BATCH_SIZE,
epochs=num_epochs if LONG_EXPOSURE else epoch + 1,
verbose=0,
validation_data=None,
initial_epoch=epoch,
callbacks=[history])
pbar.set_description("Training Loss:%.5f" % np.mean(history.losses))
pbar.update(len(Y))
pbar.close()
def zeroone2oneminusone(vec):
return np.add(np.multiply(np.array(vec), 2), -1)
def oneminusone2zeroone(vec):
return np.divide(np.add(np.array(vec), 1), 2)
def calc_loss(y_true, y_pred):
return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)])
def predict(model, gen_xy, length_xy, classes):
pbar = tqdm(total=length_xy, desc="Predicting...")
i, m, n = 0, length_xy, len(classes)
ids = list()
y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
for i, (keys, (X, Y)) in enumerate(gen_xy):
k = len(Y)
ids.extend(keys)
y_hat, y = model.predict(X), Y
y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y
pbar.update(k)
pbar.close()
return ids, y_true, y_pred
def evaluate(y_true, y_pred, classes):
y_pred = y_pred[~np.all(y_pred == 0, axis=1)]
y_true = y_true[~np.all(y_true == 0, axis=1)]
prs, rcs, f1s = performance(y_pred, y_true, classes)
return calc_loss(y_true, y_pred), prs, rcs, f1s
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB"),
parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'],
default="F", help="Specify the ontology aspect.")
parser.add_argument("--init_epoch", type=int, default=0,
help="Which epoch to start training the model?")
parser.add_argument("--arch", type=str, choices=['deepseq', 'motifnet', 'inception'],
default="deepseq", help="Specify the model arch.")
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
ASPECT = args.aspect # default: Molecular Function
client = MongoClient(args.mongo_url)
db = client['prot2vec']
print("Loading Ontology...")
onto = get_ontology(ASPECT)
# classes = get_classes(db, onto)
classes = onto.classes
classes.remove(onto.root)
assert onto.root not in classes
opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
if args.arch == 'inception':
model = ProteinInception(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
elif args.arch == 'deepseq':
model = DeeperSeq(classes, opt)
LONG_EXPOSURE = True
num_epochs = 20
elif args.arch == 'motifnet':
model = MotifNet(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
else:
print('Unknown model arch')
exit(0)
if args.resume:
model.load_weights(args.resume)
print("Loaded model from disk")
model.summary()
for epoch in range(args.init_epoch, num_epochs):
trn_stream, tst_stream = get_training_and_validation_streams(db)
train(model, batch_generator(trn_stream, onto, classes), len(trn_stream), epoch, num_epochs)
_, y_true, y_pred = predict(model, batch_generator(tst_stream, onto, classes), len(tst_stream), classes)
loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes)
i = np.argmax(f1s)
f_max = f1s[i]
print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)"
% (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i]))
model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max)
model.save_weights("checkpoints/%s.hdf5" % model_str)
with open("checkpoints/%s.json" % model_str, "w+") as f:
f.write(model.to_json())
np.save("checkpoints/%s.npy" % model_str, np.asarray(classes))
| 1.539063 | 2 |
lambda.py | deepanshu-yadav/NSFW-Classifier | 13 | 12420 | <filename>lambda.py
import boto3
import json
import numpy as np
import base64, os, boto3, ast, json
endpoint = 'myprojectcapstone'
def format_response(message, status_code):
return {
'statusCode': str(status_code),
'body': json.dumps(message),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
}
def lambda_handler(event, context):
try :
body = json.loads(event['body'])
image = base64.b64decode(body['data'].replace('data:image/png;base64,', ''))
try :
runtime = boto3.Session().client(service_name='sagemaker-runtime', region_name='us-east-2')
response = runtime.invoke_endpoint(EndpointName=endpoint, ContentType='application/x-image', Body=image)
print(response)
try:
probs = response['Body'].read()
probs = json.loads(probs)
#probs = ast.literal_eval(probs)
#pred = probs.index(max(probs))
pred = np.argmax( np.array( probs ) )
if pred == 0:
resp = 'Animated Nsfw'
elif pred == 1:
resp = 'Conatins Nudity'
elif pred == 2:
resp = 'Contains Porn'
elif pred == 4:
resp = 'Conatins semi Nudity'
else :
resp = 'Safe For viewing'
return format_response(resp, 200)
except:
return format_response('Ouch! Something went wrong with loading json data from endpoint'+response['Body'].read() , 200)
except :
return format_response('Ouch! Something went wrong with endpoint' , 200)
except :
return format_response('Ouch! Something went wrong with decoding' , 200)
| 1.507813 | 2 |
python/svm.py | mwalton/em-machineLearning | 0 | 12548 | import numpy as np
import argparse
import os.path
import plots as plot
from sklearn.preprocessing import StandardScaler
from sklearn.grid_search import GridSearchCV
import time
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.cross_validation import StratifiedKFold
def loadData(XPath, yPath):
X = np.genfromtxt(XPath, delimiter=",", dtype="float32")
y = np.genfromtxt(yPath, delimiter=",", dtype="float32")
return (X, y)
def convertToClasses(targetVector):
return np.argmax(targetVector[:,1:5], axis=1)
def standardize(featureVector):
scaler = StandardScaler()
return scaler.fit_transform(featureVector)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-x", "--xTrain", required = True,
help = "path to training feature set")
ap.add_argument("-y", "--yTrain", required = True,
help = "path to training target set")
ap.add_argument("-X", "--xTest", required = True,
help = "path to testing feature set")
ap.add_argument("-Y", "--yTest", required = True,
help = "path to testing target set")
ap.add_argument("-o", "--optimize", type = int, default = 0,
help = "optomization mode: 0 use default, 1 optomize, 2 use pkl model if possible")
ap.add_argument("-m", "--multiClass", type = int, default=1,
help = "exclusive multi class or regression")
ap.add_argument("-p", "--pickle", default="models/svmModel.pkl",
help = "pickle dump of model (output if optomize = 1, input if optomize = 0)")
ap.add_argument("-v", "--visualize", type=int, default=0,
help = "whether or not to show visualizations after a run")
args = vars(ap.parse_args())
(trainX, trainY) = loadData(args["xTrain"], args["yTrain"])
(testX, testY) = loadData(args["xTest"], args["yTest"])
# required scaling for SVM
trainX = standardize(trainX)
testX = standardize(testX)
if (args["multiClass"] == 1):
trainY = convertToClasses(trainY)
testY = convertToClasses(testY)
# check to see if a grid search should be done
if args["optimize"] == 1:
#configure stratified k-fold cross validation
cv = StratifiedKFold(y=trainY, n_folds=4, shuffle=True)
# perform a grid search on the 'C' and 'gamma' parameter
# of SVM
print "SEARCHING SVM"
C_range = 2. ** np.arange(-15, 15, step=1)
gamma_range = 2. ** np.arange(-15, 15, step=1)
param_grid = dict(gamma=gamma_range, C=C_range)
start = time.time()
gs = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv, n_jobs = -1, verbose = 2)
gs.fit(trainX, trainY)
# print diagnostic information to the user and grab the
# best model
print "done in %0.3fs" % (time.time() - start)
print "best score: %0.3f" % (gs.best_score_)
print "SVM PARAMETERS"
bestParams = gs.best_estimator_.get_params()
# loop over the parameters and print each of them out
# so they can be manually set
print("Best Estimator: %s" % gs.best_estimator_)
#for p in sorted(params.keys()):
# print "\t %s: %f" % (p, bestParams[p])
print("Accuracy Score On Validation Set: %s\n" % accuracy_score(testY, gs.predict(testX)))
# show a reminder message
print "\nIMPORTANT"
print "Now that your parameters have been searched, manually set"
print "them and re-run this script with --optomize 0"
joblib.dump(gs.best_estimator_, args["pickle"])
# otherwise, use the manually specified parameters
else:
# evaluate using SVM
if (os.path.isfile(args["pickle"]) and args["optimize"] == 2):
clf = joblib.load(args["pickle"])
else:
clf = svm.SVC()
clf.fit(trainX, trainY)
print "SVM PERFORMANCE"
pred = clf.predict(testX)
print classification_report(testY, pred)
print("Accuracy Score: %s\n" % accuracy_score(testY, pred))
if (args["visualize"] == 1):
plot.accuracy(testY, pred, "SVM")
| 2.0625 | 2 |
src/jellyroll/managers.py | jacobian-archive/jellyroll | 3 | 12676 | import datetime
from django.db import models
from django.db.models import signals
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from tagging.fields import TagField
class ItemManager(models.Manager):
def __init__(self):
super(ItemManager, self).__init__()
self.models_by_name = {}
def create_or_update(self, instance, timestamp=None, url=None, tags="", source="INTERACTIVE", source_id="", **kwargs):
"""
Create or update an Item from some instace.
"""
# If the instance hasn't already been saved, save it first. This
# requires disconnecting the post-save signal that might be sent to
# this function (otherwise we could get an infinite loop).
if instance._get_pk_val() is None:
try:
signals.post_save.disconnect(self.create_or_update, sender=type(instance))
except Exception, err:
reconnect = False
else:
reconnect = True
instance.save()
if reconnect:
signals.post_save.connect(self.create_or_update, sender=type(instance))
# Make sure the item "should" be registered.
if not getattr(instance, "jellyrollable", True):
return
# Check to see if the timestamp is being updated, possibly pulling
# the timestamp from the instance.
if hasattr(instance, "timestamp"):
timestamp = instance.timestamp
if timestamp is None:
update_timestamp = False
timestamp = datetime.datetime.now()
else:
update_timestamp = True
# Ditto for tags.
if not tags:
for f in instance._meta.fields:
if isinstance(f, TagField):
tags = getattr(instance, f.attname)
break
if not url:
if hasattr(instance,'url'):
url = instance.url
# Create the Item object.
ctype = ContentType.objects.get_for_model(instance)
item, created = self.get_or_create(
content_type = ctype,
object_id = force_unicode(instance._get_pk_val()),
defaults = dict(
timestamp = timestamp,
source = source,
source_id = source_id,
tags = tags,
url = url,
)
)
item.tags = tags
item.source = source
item.source_id = source_id
if update_timestamp:
item.timestamp = timestamp
# Save and return the item.
item.save()
return item
def follow_model(self, model):
"""
Follow a particular model class, updating associated Items automatically.
"""
self.models_by_name[model.__name__.lower()] = model
signals.post_save.connect(self.create_or_update, sender=model)
def get_for_model(self, model):
"""
Return a QuerySet of only items of a certain type.
"""
return self.filter(content_type=ContentType.objects.get_for_model(model))
def get_last_update_of_model(self, model, **kwargs):
"""
Return the last time a given model's items were updated. Returns the
epoch if the items were never updated.
"""
qs = self.get_for_model(model)
if kwargs:
qs = qs.filter(**kwargs)
try:
return qs.order_by('-timestamp')[0].timestamp
except IndexError:
return datetime.datetime.fromtimestamp(0)
| 1.710938 | 2 |