metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "01dkg/googleAdsAutomations",
"score": 2
} |
#### File: googleAdsAutomations/googleAds/last_7_days.py
```python
import logging
import sys
from googleads import adwords
import pandas as pd
logging.basicConfig(level=logging.INFO)
logging.getLogger('suds.transport').setLevel(logging.DEBUG)
def main(client):
f = open("random.csv","w")
report_downloader = client.GetReportDownloader(version='v201705')
report = {
'reportName': 'Last 30 days CRITERIA_PERFORMANCE_REPORT',
'dateRangeType': 'LAST_30_DAYS',
'reportType': 'PLACEHOLDER_REPORT',
'downloadFormat': 'CSV',
'selector': {
'fields': ['Date','CampaignId','AdGroupId','ClickType','Conversions']
}
}
data=report_downloader.DownloadReport(
report, f, skip_report_header=True, skip_column_header=False,
skip_report_summary=True, include_zero_impressions=False)
f.close()
if __name__ == '__main__':
adwords_client = adwords.AdWordsClient.LoadFromStorage()
adwords_client.SetClientCustomerId('XXX-XXX-XXXX')
main(adwords_client)
``` |
{
"source": "01FinTech/pyalgotrade-cn",
"score": 2
} |
#### File: pyalgotrade-cn/dataframefeed/data_sql.py
```python
from itertools import izip
#import sys
import constant as ct
import pylab as plt
import pandas as pd
import tushare as ts
import numpy as np
import time,os
import sqlalchemy as sa
from pandas import DataFrame
from sqlalchemy import create_engine
from datetime import datetime, timedelta
#reload(sys)
#sys.setdefaultencoding('utf-8')
def set_h_data(start = ct._START_,middle = ct._MIDDLE_,autype="qfq",index=False,retry_count = 3,pause=0):
"""
获取历史交易信息存入数据库中,默认从1994-2015年。若不设定默认取最近一年,其他参数与tushare相同
指数行情tushare其实可以查询,但未提供列表,因此自行构造
Parameters
------
return
"""
_CODE_INDEX = pd.DataFrame({'code':['000001','399001','399006'],'name':['上证指数','深证指数','创业板指数'],'c_name':['指数','指数','指数']})
code_index = _CODE_INDEX.set_index('code')
dat = ts.get_industry_classified()
dat = dat.drop_duplicates('code')
engine = create_engine(ct._ENGINE_)
dat.to_sql('code',engine,if_exists ='replace') #如果存在就覆盖表
dat = dat.append(code_index)
_time_= pd.period_range(start,middle,freq='Y') #time[0] 为1994-12-31
_start_ = start
i = 0
for code in dat['code'].values:
i+= 1
if dat[dat['code']==code]['c_name'] is "指数": #若为上证或深证指数。则设定index为True
index = True
for _end_ in _time_:
_end_ = _end_.strftime('%Y-%m-%d')
print i,code,_end_
try:
_data_ = ts.get_h_data(code,start=_start_,end=_end_,index=index,autype=autype,retry_count=retry_count,pause=pause) #两个日期之间的前复权数据
#_iterables_ = [[code],_data_.index] #无奈,选择multi——index,且需重新构造
#_index_ = pd.MultiIndex.from_product(_iterables_, names=['code', 'date'])
#_data_ = DataFrame(_data_, index= _index_,columns =_data_.columns)
if _data_ is not None:
_data_['code'] =code
_data_.to_sql('h_data',engine,if_exists='append')
except Exception,e:
print e.args[0]
pass #不行的话还是continue
_start_ = _end_
def get_h_data(code):
engine = create_engine(ct._ENGINE_)
return pd.read_sql(sa.text('SELECT * FROM h_data where code=:col1'), engine, params={'col1': code},parse_dates=['date'],index_col=['date'])
def set_hist_data(start = None,end = None,ktype = None,retry_count = 3,pause=0):
"""
获取近三年交易信息存入数据库中,不同的ktype存入不同的表,参数与tushare相同
None 即为取近三年
若ktype = None ,则设定为全部
Parameters
------
return
"""
engine = create_engine(ct._ENGINE_)
dat =pd.read_sql_table('code', engine)
dat =dat[dat['c_name']!='指数']['code'].values
dat = dat.tolist()
dat += ['sh','sz','hs300','sz50','zxb','cyb']
i = 0
if ktype is None:
ktype = ['D','W','M','5','15','30','60']
else:
ktype = [ktype]
for key_item in ktype:
i+= 1
for code in dat:
print i,code,key_item
try:
_data_ = ts.get_hist_data(code,start=start,end=end,ktype=key_item,retry_count=retry_count,pause=pause) #两个日期之间的前复权数据
if _data_ is not None:
_data_['code'] =code
_data_.to_sql('hist_data_%s'%key_item,engine,if_exists='append')
except Exception,e:
print e.args[0]
pass #不行的话还是continue
def get_hist_data(code,ktype="D"):
"""
获取数据库中全部的(hist)交易信息,默认取日线
Parameters
------
return
"""
engine = create_engine(ct._ENGINE_)
return pd.read_sql(sa.text('SELECT * FROM "hist_data_%s" where code=:col1'%ktype), engine, params={'col1': code},parse_dates=['date'],index_col=['date'])
def set_realtime_quotes(code=['sh'],pause = 10):
"""
获取当日所选股票代码的实时数据,code为股票代码列表,pause为每隔多少秒请求一次.从当前时间开始,未测试
将数据存储到数据库中,若当前时间在9:00--15:00之间则实时获取并存入dic{code:dataFrame}中,否则进入睡眠状态
目前睡眠,未考虑是否为交易日
Parameters
------
return list[DataFrame]
"""
engine = create_engine(ct._ENGINE_)
curTime = datetime.now()
startTime = curTime.replace(hour=9, minute=0, second=0, microsecond=0)
endTime = curTime.replace(hour=15, minute=0, second=0, microsecond=0)
delta_s = startTime - curTime
delta_e = endTime - startTime
if delta_s > timedelta(0, 0, 0):
time.sleep(delta_s.total_seconds())
elif delta_e <timedelta(0, 0, 0):
time.sleep(delta_s.total_seconds()+86400)
_data_ = {}
for items in code:
_data_[items] = DataFrame()
while(curTime<endTime):
for item in code:
df = ts.get_realtime_quotes(item) #Single stock symbol
_data_[item].append(df)
time.sleep(pause)
curTime = datetime.now()
for ite in code:
_data_[ite].to_sql('realtime_data',engine,if_exists='append')
return _data_
print get_hist_data('600051')
def set_stock_basics():
"""
获取股本信息存入数据库中
Parameters
------
return
"""
dat = ts.get_stock_basics()
engine = create_engine(ct._ENGINE_)
dat.to_sql('stock_basics',engine,if_exists ='replace')
```
#### File: cn/tushare/test_tuSharePollingThread.py
```python
from unittest import TestCase
from pandas import DataFrame
from pyalgotrade.tushare.barfeed import TuSharePollingThread
class TestTuSharePollingThread(TestCase):
def test_get_tushare_tick_data(self):
self.fail()
class TestTuSharePollingThread(TestCase):
def test_valid_tick_data_with_right_timestamp(self):
stock_id = '000001'
thread = TuSharePollingThread([stock_id])
data_list = [[u'10.00', u'9.00', u'1000', u'2000', u'14:00:01']]
columns = ['pre_close', 'price', 'volume', 'amount', 'time']
df = DataFrame(data_list, columns=columns)
self.assertTrue(thread.valid_tick_data(stock_id, df.ix[0]))
df.ix[0].time = u'14:00:02'
self.assertTrue(thread.valid_tick_data(stock_id, df.ix[0]))
df.ix[0].time = u'14:00:00'
self.assertFalse(thread.valid_tick_data(stock_id, df.ix[0]))
def test_valid_tick_data_with_right_price(self):
stock_id = '000001'
thread = TuSharePollingThread([stock_id])
data_list = [[u'10.00', u'10.00', u'1000', u'2000', u'14:00:01']]
columns = ['pre_close', 'price', 'volume', 'amount', 'time']
df = DataFrame(data_list, columns=columns)
self.assertTrue(thread.valid_tick_data(stock_id, df.ix[0]))
# price > pre_close * 1.1
df.ix[0].price = u'11.01'
df.ix[0].time = '14:00:03'
self.assertFalse(thread.valid_tick_data(stock_id, df.ix[0]))
# price < pre_close * 0.9
df.ix[0].price = u'8.90'
df.ix[0].time = '14:00:04'
self.assertFalse(thread.valid_tick_data(stock_id, df.ix[0]))
``` |
{
"source": "01kazu/tongue",
"score": 2
} |
#### File: tongue/reports/models.py
```python
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
# User = settings.AUTH_USER_MODEL
# Create your models here.
class Report(models.Model):
user = models.CharField(max_length=30)
title = models.CharField(max_length=50)
post = models.TextField()
date = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return reverse("reports:all_posts_detail", kwargs={'pk': self.pk})
# class Profile(models.Model):
# user = models.OneToOneField(User, on_delete=models.CASCADE)
# matric_number = models.CharField(max_length=17)
# @receiver(post_save, sender=User)
# def create_user_profile(sender, instance, created, **kwargs):
# if created:
# Profile.objects.create(user=instance)
# @receiver(post_save, sender=User)
# def save_user_profile(sender, instance, **kwargs):
# instance.profile.save()
``` |
{
"source": "01king-ori/password",
"score": 4
} |
#### File: 01king-ori/password/user.py
```python
class Users:
"""
This creates new instances of a user
"""
users_list = []
def __init__(self, name, username, login_password):
'''
__init__ method that helps us save properties of the user
'''
self.name = name
self.username = username
self.login_password = login_password
def save_user(self):
'''
this saves the user info
'''
Users.users_list.append(self)
def delete_user(self):
'''
delete user details
'''
Users.users_list.remove(self)
@classmethod
def find_by_username(cls, username):
'''
Searching for username
'''
for user in Users.users_list:
if user.username == username:
return user
else:
print("You have entered an invalid username")
@classmethod
def user_exists(cls,username,login_password):
'''
authenticate user username and password
'''
for user in Users.users_list:
if user.username == username and user.login_password == login_password:
return True
else:
return False
``` |
{
"source": "01king-ori/pitch-project",
"score": 2
} |
#### File: app/main/views.py
```python
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..models import User,Pitch,Comment
from .forms import UpdateProfile,PitchForm,CommentForm
from .. import db,photos
from flask_login import login_required,current_user
from datetime import datetime
@main.route('/')
def index():
pitches = Pitch.query.order_by(Pitch.time.desc()).all()
return render_template('index.html', pitches = pitches)
@main.route('/add',methods = ['GET','POST'])
@login_required
def add_pitch():
form = PitchForm()
if form.validate_on_submit():
pitch = Pitch(title = form.title.data, pitch = form.pitch.data,user=current_user)
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.index'))
return render_template('add.html',pitch_form=form)
@main.route('/pitch/<int:id>')
def pitch(id):
pitch = Pitch.query.filter_by(id=id).first()
comments = Comment.get_comments(pitch.id)
return render_template('pitch.html',comments = comments, pitch = pitch)
@main.route('/pitch/comment/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_comment(id):
form = CommentForm()
pitch = Pitch.query.filter_by(id=id).first()
if form.validate_on_submit():
comment = form.comment.data
new_comment = Comment(pitch_comment=comment,pitch_id = pitch.id,user=current_user)
new_comment.save_comment()
return redirect(url_for('.pitch',id = pitch.id))
return render_template('new_comment.html',comment_form=form, pitch=pitch)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form=form)
@main.route('/user/<uname>/update/pic',methods=['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname)).
```
#### File: migrations/versions/f6893ba0692f_update_pitch_and_user_classes.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f6893ba0692f'
down_revision = '0000b85fa4c9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('time', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('bio', sa.String(length=255), nullable=True))
op.add_column('users', sa.Column('email', sa.String(length=255), nullable=True))
op.add_column('users', sa.Column('pass_code', sa.String(length=255), nullable=True))
op.add_column('users', sa.Column('profile_pic_path', sa.String(), nullable=True))
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_column('users', 'profile_pic_path')
op.drop_column('users', 'pass_code')
op.drop_column('users', 'email')
op.drop_column('users', 'bio')
op.drop_column('pitches', 'time')
# ### end Alembic commands ###
``` |
{
"source": "01ly/Amipy",
"score": 2
} |
#### File: amipy/commands/runspider.py
```python
import re,string
from amipy.BaseClass import Command
from amipy.exceptions import CommandUsageError
from amipy.core.workstation import WorkStation
class AnyNameYouWant(Command):
requires_project = True
def handle(self, settings, opts, args):
if not all(args):
raise CommandUsageError(self,self.parser)
spider_names = [re.sub('[%s ]'%string.punctuation,'',i) for i in args]
works = WorkStation(settings)
works.work(spider_names=spider_names)
@classmethod
def short_desc(self):
return 'run a specified spider by a given name.'
def help(self):
pass
def syntax(self):
return ' <spider name> [options] args '
```
#### File: amipy/core/crawler.py
```python
import asyncio
from amipy.BaseClass import Crawler,CrawlRequester
from amipy.cmd import _iter_specify_classes
from amipy.middlewares import MiddleWareManager
from amipy.log import getLogger
class WebCrawler(Crawler):
tasks = []
mw_manager = None
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super(WebCrawler, cls).__new__(cls)
return cls._instance
def __init__(self,settings):
super(WebCrawler,self).__init__()
self.settings = settings
self.logger = getLogger(__name__)
self.semaphore = asyncio.Semaphore(
self.settings['project'].CONCURRENCY)
self._install_requester()
def _install_requester(self):
_cls = {}
_module = self.settings['project'].CRAWLING_REQUESTER_MODULE
for cls in _iter_specify_classes(_module,CrawlRequester):
cls._crawler = self
_cls[cls._down_type]=cls()
self.logger.debug(f'Installed requester "{cls.__name__}".')
self.requesters = _cls
@MiddleWareManager.handle_req
def convert(self,requests):
self.logger.debug(f'Received {len(requests)} Requests.')
tasks = []
for req in requests:
coro = self.requesters[req.down_type].crawl(req)
task = asyncio.ensure_future(coro)
task.add_done_callback(req.delegate_func)
tasks.append(task)
self.tasks.extend(tasks)
self.logger.debug(f'Converted {len(tasks)} Tasks.')
return tasks
@property
def runing_tasks(self):
return [i for i in self.tasks if not i.done()]
@property
def finished_tasks(self):
return [i for i in self.tasks if i.done()]
```
#### File: amipy/core/looper.py
```python
import asyncio
from inspect import iscoroutine
from amipy.log import getLogger
class Looper(object):
def __init__(self):
self.loop = asyncio.get_event_loop()
self.logger = getLogger(__name__)
def run_coroutine(self,coroutines):
if isinstance(coroutines,list):
tasks = [asyncio.ensure_future(i) for i in coroutines if iscoroutine(i)]
_coroutine = asyncio.gather(*tasks)
elif iscoroutine(coroutines):
_coroutine = coroutines
else:
raise TypeError('Not a coroutine or coroutine list to run with,got "%s".'%type(coroutines).__name__)
try:
results = self.loop.run_until_complete(_coroutine)
except Exception as e:
_coroutine.cancel()
else:
return results
def run_forever(self):
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.stop()
def run_tasks(self,tasks):
if tasks is None:
return
try:
res = self.loop.run_until_complete(asyncio.gather(*tasks))
return res
except KeyboardInterrupt:
self.logger.info('Shutting down Amipy.')
self.stop()
raise StopAsyncIteration
def stop(self):
tasks = asyncio.Task.all_tasks(loop=self.loop)
group = asyncio.gather(*tasks, return_exceptions=True)
group.cancel()
self.loop.stop()
self.logger.debug(f'Gathered {len(tasks)} Tasks.')
self.logger.debug(f'Task Group {group} has been canceled')
self.logger.debug('Stopped looper.')
```
#### File: amipy/core/scheduler.py
```python
from amipy.log import getLogger
class Scheduler(object):
def __init__(self,settings):
self.req_limits = settings.gets('CONCURRENCY')
self.recv_req = []
self.waiting = False
self.spiders = None
self.logger = getLogger(__name__)
self.logger.debug('Loaded scheduler.')
def receive(self,req_queue):
def any_daemon():
return any(i.status in ['PAUSE','STOP'] for i in self.spiders)
if not self.waiting:
self.logger.debug(f'Requests Queue Size:{req_queue.qsize()}')
if not req_queue.empty():
self.waiting = False
for _ in range(min(self.req_limits,req_queue.qsize())):
self.recv_req.append(req_queue.get_nowait())
req_queue.task_done()
self.logger.debug(f'Left Requests:{req_queue.qsize()}')
else:
self.waiting = True
if all(i.status in ['RUNNING','CLOSE'] for i in self.spiders):
if self._gather_retry():
self.logger.info(f'Start to retry {len(self.recv_req)}'
f' Error and Exception pages.')
return
print('\n* [Done] No Requests to start the crawling.\n')
raise StopAsyncIteration
if any_daemon():
return
def export(self):
_export = []
while self.recv_req:
_export.append(self.recv_req.pop(0))
if not self.waiting:
self.logger.debug(f'Exported {len(_export)} Requests.')
return _export
def spiders_monitor(self,spiders):
self.spiders = spiders
def not_running():
return all([i.status in ['STOP','PAUSE'] for i in spiders])
while not_running():
for i in spiders:
if i.status=='STOP' and not i.stopped:
self.logger.debug(f'Stopping spider {i.name}.')
for req in i.binding_hub.requests._queue:
if req.spider.name == i.name:
i.binding_hub.requests._queue.remove(req)
self.logger.debug(f'Removing request {req}.')
i.stopped = True
continue
if all(i.status=='CLOSE' for i in spiders):
self.logger.info('* All spiders closed.')
raise StopAsyncIteration
for i in spiders:
if i.status == 'RESUME':
self.logger.debug(f'Resuming spider {i.name}.')
i.resume()
if i.status == 'RESTART':
self.logger.debug(f'Restarting spider {i.name}.')
i.restart()
if i.status == 'CLOSE':
self.logger.debug(f'Closing spider {i.name}.')
i.close(True)
def _gather_retry(self):
for i in self.spiders:
if any(i._retries):
while i._retries:
_req = i._retries.pop(0)
self.recv_req.append(_req)
self.logger.info(f'Got {len(self.recv_req)} retry Requests of {i.name}.')
return bool(self.recv_req)
```
#### File: amipy/core/serverthread.py
```python
import asyncio
import time
from threading import Thread
from amipy.BaseClass import SpiderClientCommand
from amipy.cmd import _iter_specify_classes
class SpiderServer(Thread):
def __init__(self,settings,spiders):
super(SpiderServer,self).__init__()
self.spiders = spiders
self.settings = settings
self.loop = asyncio.new_event_loop()
self.host = settings['project'].SPIDER_SERVER_HOST
self.port = settings['project'].SPIDER_SERVER_PORT
self.tool_module = settings['project'].SPIDER_SERVER_COMMANDS_MODULE
self.setDaemon(True)
self.prompt = b"""* Spider-Client commands tool:
\r\n* use "help" to see all the commands usage."""
async def _handle_request(self,reader:asyncio.StreamReader,writer:asyncio.StreamWriter):
writer.write(self.prompt)
while 1:
writer.write(b'\r\n$amipy> ')
client = writer.get_extra_info('peername')
_c = ':'.join(str(i) for i in client)
try:
await writer.drain()
data = await reader.readline()
msg = data.decode().strip()
if msg == 'quit':
print(f'*[Server] {time.ctime()} Connection closed at {_c}')
writer.close()
return
elif msg:
resp = self.parse_opt(msg)
print(f'*[Server] {time.ctime()} Received "{msg}" from {_c}.')
writer.write(resp.encode('latin-1'))
except Exception as e:
print(f'*[Server] {time.ctime()} {e} at {_c}')
writer.close()
if not writer.is_closing():
await writer.drain()
else:
writer.close()
return
def _pop_cmdname(self,msg):
args = [i for i in msg.strip().split(' ') if i]
import string
for _,v in enumerate(args):
if v and v[0] not in string.punctuation:
args.pop(_)
if v in ['help','list'] or(args and args[0][0] not in string.punctuation):
return v,args
return None,None
def _get_all_cmds(self,module):
cmds = {}
for cmd in _iter_specify_classes(module, cmdcls=SpiderClientCommand):
cmdname = cmd.__module__.split('.')[-1]
cmds[cmdname] = cmd
return cmds
def parse_opt(self,msg):
cmdname,args = self._pop_cmdname(msg)
cmds = self._get_all_cmds(self.tool_module)
if not cmds.get(cmdname):
return """\r\n* Command Usage:
\r\n <option> [spider name]
\r\n or: show spiders
"""
data = cmds[cmdname]().parse(cmdname,args,self.spiders)
return data
def serve(self):
coro = asyncio.start_server(self._handle_request,self.host,self.port,loop=self.loop)
server = self.loop.run_until_complete(coro)
addr,port = server.sockets[0].getsockname()
print(f'* Spider server serving on {addr}:{port}.')
print('* Press Ctrl+C to stop the crawling.\n')
try:
self.loop.run_forever()
except (KeyboardInterrupt,StopAsyncIteration):
print('Shutting down spider server.')
server.close()
self.loop.run_until_complete(server.wait_closed())
self.loop.close()
def run(self):
asyncio.set_event_loop(self.loop)
self.serve()
```
#### File: amipy/core/spiderhub.py
```python
import amipy
from amipy.BaseClass import Hub
from amipy.middlewares import MiddleWareManager
from amipy.util.load import load_py
from amipy.log import getLogger
class SpiderHub(Hub):
def __new__(cls, *args, **kwargs):
if not hasattr(cls,'_instance'):
cls._instance = super(SpiderHub, cls).__new__(cls)
return cls._instance
def __init__(self,settings,crawler):
super(SpiderHub, self).__init__()
self.settings = settings
self._success_counter = 0
self._failed_counter = 0
self._exception_counter = 0
self.active = False
self.looper = None
self._crawler = crawler
self.logger = getLogger(__name__)
self._set_queue()
def _set_queue(self):
_queue = self.settings.gets('PROJECT_REQUESTS_QUEUE')
self.requests = load_py(_queue)()
self.logger.debug(f'Loaded Requests Queue:{type(self.requests).__name__}')
def start(self,looper):
self.looper = looper
self.active = True
for i in self.spiders:
for seed in i.start_requests():
i.status = 'RUNNING'
if isinstance(seed, amipy.Request):
self.requests.put_nowait(seed)
if self.requests.empty():
print(f'* No start requests.Shutting down Amipy.\r\n')
raise StopAsyncIteration
self.logger.info(f'Got {self.requests.qsize()} start requests.')
def takeover(self,spiders):
self.spiders =spiders
self.logger.debug(f'Takeover:{[i.name+":"+i.__class__.__name__ for i in spiders]}')
self._binding()
def _binding(self):
for spider in self.spiders:
spider.binding_hub = self
spider.status = 'BOUND'
self.priorities += spider.priority
def accept(self,request):
_all_req = []
if isinstance(request,list):
for req in request:
if not isinstance(req, amipy.Request):
continue
else:
_all_req.append(req)
elif isinstance(request, amipy.Request):
_all_req.append(request)
return _all_req
@MiddleWareManager.handle_resp
def delegate(self,response):
_res = []
req = response.request
spider = response.spider
if response.status == 200:
self._success_counter += 1
spider._success += 1
self.logger.info(f'[Success]{spider.name} {req.method}-{req.url}')
a = self.accept(response.callback(response))
elif response.status == -1:
self._exception_counter += 1
spider._exc +=1
self.logger.info(f'[{response.exception.__class__.__name__}] {spider.name}'
f' {req.method}-{req.url} ')
a = self.accept(response.excback(response))
else:
self._failed_counter += 1
spider._fail += 1
self.logger.info(f'[{response.status} Error]{spider.name} {req.method}-{req.url}')
a = self.accept(response.errback(response))
_res.extend(a)
[self.requests.put_nowait(i) for i in _res if i]
def __str__(self):
return f'<SpiderHub obj at {hex(id(self))} active:{self.active}' \
f' [spiders:{len(self.spiders)} success:{self._success_counter} ' \
f'fail:{self._failed_counter} exc:{self._exception_counter}]>'
```
#### File: crawl/requester/common.py
```python
import asyncio
from amipy import Response
from amipy.BaseClass import CrawlRequester
from amipy.util.http import send_async_http
class CommonRequester(CrawlRequester):
_down_type = 'text/html'
async def crawl(self,request):
delay = request.delay
url = request.url
session = request.session
try:
async with self._crawler.semaphore:
resp = await send_async_http(session,
request.method,
url,
timeout=request.timeout,
headers=request.headers,
params=request.params,
data=request.data,
retries=request.retry,
proxy=request.proxy)
_resp = resp['resp']
body = resp['body']
exception = resp['exception']
if exception:
return Response(url, status=-1, request=request, exc=exception)
await asyncio.sleep(delay)
return Response(url,request=request,body=body,_resp=_resp)
except asyncio.CancelledError:
print(f'Task "{request}" canceled.')
return Response(url, status=0, request=request)
except Exception as e:
return Response(url, status=-1, request=request, exc=e.__class__())
```
#### File: crawl/requester/media.py
```python
import os
import asyncio
from amipy import Response
from amipy.BaseClass import CrawlRequester
from amipy.util.http import send_async_http
from amipy.util.file import get_file_size
from amipy.log import getLogger
class MediaRequester(CrawlRequester):
_down_type = 'media'
logger = getLogger(__name__)
async def crawl(self,request):
delay = request.delay
url = request.url
session = request.spider.session
proxy = request.proxy
buffer = request.spider.settings.DEFAULT_DOWNLOAD_BUFFER
path = os.path.normpath(request.save_path)
if not os.path.exists(os.path.dirname(path)):
self.logger.error(f'No path:{os.path.dirname(path)}.')
return
name = os.path.basename(path)
try:
self.logger.info(f'Downloading {name}.')
async with self._crawler.semaphore:
resp = await send_async_http( session,
request.method,
url,
path=path,
retries=request.retry,
timeout=request.timeout,
proxies=proxy,
buffer=buffer
)
if resp is None:
return
body = resp['body']
exception = resp['exception']
if exception and body != True:
return Response(url, status=-1, request=request, exc=exception)
await asyncio.sleep(delay)
size = get_file_size(size=int(resp['size']))
self.logger.info(f'Finished downloading:[{name} {size}]')
return
except asyncio.CancelledError:
print(f'Task "{request}" canceled.')
return Response(url, status=0, request=request)
except Exception as e:
return Response(url, status=-1, request=request, exc=e.__class__())
```
#### File: amipy/middlewares/CrawlFilter.py
```python
import re
import os
from amipy.util.filter import _to_md5,_to_feature
from amipy.middlewares import Middleware
from amipy.exceptions import DropRequest,DropResponse
from amipy.BaseClass import Fingerprint
from bs4 import BeautifulSoup as bs
class CrawlFilterMiddleware(Middleware):
def process_request(self,request):
spider = request.spider
url = request.url
_flag = self._rules_effect(url,spider)
if _flag is None:
if not request.filter:
return request
elif not _flag:
return request
_filter = spider.urlfilter
_feature = _to_feature(request)
if _feature in _filter:
raise DropRequest
else:
_filter.add(_feature)
return request
def _rules_effect(self,url,spider,mode=0):
rules = spider.rules
for URL in rules:
if URL.match(url):
if mode==0:
if URL.filter != None:
return bool(URL.filter)
else:
URL_FP = URL.fingerprint
if isinstance(URL_FP,bool):
return URL_FP
if URL_FP != None and \
callable(getattr(spider,URL_FP,False)):
return getattr(spider,URL_FP)
return None
def process_response(self,response):
url = response.url
spider = response.spider
if response.status != 200 and spider.urlfilter:
spider.urlfilter.delete(_to_feature(response.request))
if response.status == -1:
return response
self.spider = spider
_flag = self._rules_effect(url,spider,1)
if _flag is None:
if not response.resp_filter:
return response
elif not _flag:
return response
if callable(response.fingerprint):
_func = response.fingerprint if \
not callable(_flag) else _flag
elif isinstance(response.fingerprint,str) and \
callable(getattr(spider,response.fingerprint)):
_func = getattr(spider,response.fingerprint)
else:
raise ValueError('Not a valid fingerprint.')
fingerprint = _func(response)
if fingerprint.text is None or \
not isinstance(fingerprint,Fingerprint):
_fingerprint = response.read()
else:
_fingerprint = fingerprint.text
if fingerprint.precise:
_feature = _to_md5(_fingerprint)
else:
_feature = self._to_analyse(_fingerprint,
spider.settings.BLOOMFILTER_HTML_EXTRACTS)
if _feature in spider.respfilter:
raise DropResponse
else:
spider.respfilter.add(_feature)
return response
def _to_analyse(self,fingerprint,extract_list):
if fingerprint is None:
raise DropResponse
if len(fingerprint)<180:
return _to_md5(fingerprint)
html = bs(fingerprint,'lxml')
[i.extract() for j in extract_list for i in html(j) ]
_text = html.body.text
text = re.sub('\n','',_text).replace('\r','')
if len(text)<180:
return _to_md5(fingerprint)
lines_content = _text.splitlines()
res_dict = self._extract_content(lines_content)
if not res_dict:
return _to_md5(text)
else:
keys = sorted(res_dict.keys(),key=lambda x:-x)[:2]
texts =''.join([res_dict[i] for i in keys])
return _to_md5(texts)
def _extract_content(self,lines_content):
gap = self.spider.settings.BLOOMFILTER_HTML_GAP
threshold = self.spider.settings.BLOOMFILTER_HTML_THRESHOLD
density = self.spider.settings.BLOOMFILTER_HTML_DENSITY
results = {}
comobo_num = 0
combo_len = 0
combo_null = 0
combo_text = ''
pre_len = 0
for i in lines_content:
if i.strip():
pre_len = len(i)
comobo_num += 1
combo_null = 0
combo_len += pre_len
combo_text = combo_text + i + os.linesep
if len(lines_content) == 1 and pre_len >= density * threshold:
results[pre_len] = combo_text
else:
combo_null += 1
if pre_len:
if combo_null > gap:
if combo_len >= density * threshold \
and comobo_num >= threshold:
results[combo_len] = combo_text
else:
continue
comobo_num = 0
combo_len = 0 if combo_null > gap else combo_len
pre_len = 0
combo_text = '' if combo_null > gap else combo_text
return results
```
#### File: middlewares/requestwares/ListHandle.py
```python
import w3lib.url as urltool
from amipy.middlewares import Middleware
from amipy.exceptions import DropRequest
from amipy import Url
class UrlListHandleMiddleware(Middleware):
def process_request(self,request):
spider = request.spider
url = request.url
blacklist = spider.blacklist
whitelist = spider.whitelist
if self._is_in(url,blacklist):
raise DropRequest
if any(whitelist) and not \
self._is_in(url,whitelist):
raise DropRequest
return request
def _is_in(self,_url,_list):
def _tran(url):
return urltool.canonicalize_url(
urltool.safe_download_url(url), encoding='utf-8')
for url in _list:
if isinstance(url,Url):
if url.match(_url):
return True
elif _tran(url) == _url:
return True
```
#### File: middlewares/requestwares/Rules.py
```python
from amipy.middlewares import Middleware
from amipy.exceptions import DropRequest
from amipy import Url
class RulesHandleMiddleware(Middleware):
def process_request(self,request):
spider = request.spider
url = request.url
rules = [i for i in spider.rules if isinstance(i,Url)]
for U in rules:
if U.match(url):
if U.drop:
raise DropRequest
if U.cookies:
request._load_cookies(U.cookies)
if U.cb:
cb = getattr(spider,U.cb,None)
if callable(cb):
request.callback = cb
if bool(U.obey_robots_txt):
request.obey_robots_txt = True
elif U.obey_robots_txt == False:
request.obey_robots_txt = False
if U.down_type != None:
request.down_type = U.down_type
if U.proxy != None:
request.proxy = U.proxy
if U.proxy_auth != None:
request.proxy_auth = U.proxy_auth
break
else:
if U.unmatch is None:
continue
cb = getattr(spider,U.unmatch,None)
if callable(cb):
request.callback = cb
return request
```
#### File: Amipy/amipy/response.py
```python
import amipy
import json
import aiohttp
import w3lib.url as urltool
from amipy.exceptions import NotValidJsonContent
class mdict(dict):
def __getattr__(self, item):
try:
a = self[item]
except:
return None
return a
class Response(object):
def __init__(self,url,
status=None,
headers=None,
request=None,
priority=0,
encoding='utf-8',
body=None,
exc = None,
cookies=None,
_resp=None
):
assert isinstance(_resp,aiohttp.ClientResponse) or _resp is None,\
f'_resp of a Response must be a aiohttp.ClientResponse,' \
f'got {type(_resp).__name__}.'
assert isinstance(request, amipy.Request),\
'not a valid Request for Response,got "%s".'%type(request).__name__
if exc is not None:
if not isinstance(exc, Exception):
raise TypeError('Not an valid Exception for Response,got "%s".'
% type(exc).__name__)
self.request = request
self.spider = request.spider
self.callback = request.callback
self.errback = request.errback
self.excback = request.excback
self.priority = priority
self._body = body
self.exception = exc
self.resp = _resp
self.msg = _resp.reason if _resp else None
self.headers = _resp.headers if _resp and headers is None else headers
self.content_type = _resp.content_type if _resp else None
self.history = _resp.history if _resp else None
self.encoding = _resp.charset if _resp else encoding
self.__cookies = _resp.cookies if _resp else None
self.status = _resp.status if _resp and status is None else status
self.http_ver = _resp.version if _resp else None
self.request_info = _resp.request_info if _resp else None
self.cookies = cookies if cookies else self.__cookies
fingerprint = request.fingerprint
self.resp_filter = bool(fingerprint) if fingerprint != None \
else self.spider.settings.BLOOMFILTER_HTML_ON
self.fingerprint = fingerprint if fingerprint != None and \
not isinstance(fingerprint,bool) \
else self.spider.fingerprint
self.meta =mdict(request.kwargs_cb)
self._set_url(url)
def _set_url(self,url):
self.url = urltool.canonicalize_url(
urltool.safe_download_url(url),encoding=self.encoding)
def text(self,encoding=None):
encoding = encoding if encoding else self.encoding
if encoding is None:
encoding='utf-8'
if isinstance(self._body,bytes):
return str(self._body,encoding=encoding)
return self._body
def json(self):
try:
res = json.loads(self._body)
return res
except json.decoder.JSONDecodeError as e:
raise NotValidJsonContent(e)
def read(self,encoding=None):
encoding = encoding if encoding else self.encoding
if encoding is None:
encoding='utf-8'
if isinstance(self._body,str):
return bytes(self._body,encoding=encoding)
return self._body
@property
def url(self):
return self.__url
@url.setter
def url(self,url):
#only starts with schema:file,http,https allowed to be a valid url
if not urltool.is_url(url):
raise ValueError('Not a valid url for Request.')
else:
self.__url = urltool.safe_download_url(url)
def __str__(self):
return '<Response obj at %s [status=%d url=%s] >'\
%(hex(id(self)),self.status,self.url)
```
#### File: amipy/subcmd/restart.py
```python
import threading, time
from amipy.BaseClass import SpiderClientCommand
class SCommand(SpiderClientCommand):
def parse(self,cmdname,args,spiders):
argv = args[0]
urls = args[1:]
if argv == 'spiders':
return '\r\n Not a valid usage of command: restart <spider name> [restart url] [restart url]'
d = {i.name:i for i in spiders}
if argv in d:
spider = d[argv]
if spider.status == 'RUNNING':
return f"* Spider {argv} is running,it can not restart. "
elif spider.status == 'STOP':
lock = threading.Lock()
lock.acquire()
spider.status = 'RESTART'
spider._meta['restart_urls'] = urls
spider._restart_at = time.ctime()
lock.release()
return f'\r\n* Spider {argv} restarted at {spider._restart_at} successfully.'
elif spider.status == 'CLOSE':
return f'* Spider {argv} is closed.'
elif spider.status == 'PAUSE':
return f'* Spider {argv} is paused.Using "resume" command to resume it.'
else:
return f'* Invalid resuming status "{spider.status}" for a spider.'
else:
return f'* No spider "{argv}" in the project.'
```
#### File: amipy/util/proxy.py
```python
import re
def gen_proxy(proxy,kind='text/html'):
auth = extract_auth(proxy)
_proxy = extract_ip_port(proxy)
proxy = _proxy.strip('\n')
if kind == 'text/html':
return f'http://{auth}{proxy}'
if kind == 'media':
return {
'http' : f'http://{auth}{proxy}',
'https' : f'https://{auth}{proxy}',
}
def is_proxy_valid(proxy):
return re.findall(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b\:\d+',proxy)
def extract_auth(proxy):
res = re.findall(r'//(.+)@',proxy)
if res:
return res[0]+'@'
return ''
def extract_ip_port(proxy):
if isinstance(proxy,dict):
http_proxy = proxy['http']
return is_proxy_valid(http_proxy)[0]
else:
http_proxy = is_proxy_valid(proxy)
if http_proxy:
return http_proxy[0]
else:
return proxy
``` |
{
"source": "01ly/InsBot",
"score": 2
} |
#### File: InsBot/core/instagram.py
```python
import time
import copy
from config import *
from settings import *
from util.instagram import *
from core.user import User
from core.login import Login
from core.log import getLogger
from core.dbhelper import Database
from core.obj import BinaryImage,ImageHandler
from decorator import login_required,force_type
from decorator.instagram import check,switch,choose
from util.http import send_request
from util import md5
from urllib3 import encode_multipart_formdata
logger = getLogger(__name__)
class Instagram:
def __init__(self,user=None,pwd=<PASSWORD>):
self.user = user
self.pwd = <PASSWORD>
self.session = None
self.Login = None
self.current_user = None
self.logined = False
self.official = None
self.user_id = None
self._app_id = None
self._query_id = None
self._marked_id = None
self._page_hash = None
self._tv_hash = None
self._fans_hash = None
self._tag_hash = None
self._liker_hash = None
self._comment_hash = None
self._channel_hash = None
self._following_hash = None
self._following_tag_hash = None
self._comment_liker_hash = None
self._web_app_id = None
self._rhx_gis = None
self.__opt__ = None
self._info = None
self.db = Database(MONGODB)
def login(self,user=USERNAME,pwd=PASSWORD):
if not (self.user and self.pwd ):
self.user = user
self.pwd = <PASSWORD>
self.Login = Login(self.user,self.pwd)
self.logined = self.Login.login()
self.session = self.Login.session
self.user_id = self.Login.Id
def logout(self):
if self.logined:
send_request(API_LOGOUT,
method='get',
session=self.session,
headers=self.logined_headers,
proxy=PROXY_GLOBAL)
if not self.session.cookies.get_dict().get('sessionid'):
logger.info('Logout successfully~')
@choose(None,api=1,method='get',out=True)
def info(self,details=False):
if not details:
url = API_USER_JSON_INFO.format(userId=self.user_id)
else:
url = API_USER_INFO.format(username=self.user)
tips = {
'ok':f'Get account infos successfully!',
'failed':f'Get account infos failed.',
}
return {'url':url,},tips
@property
def app_id(self):
return self._app_id if self._app_id else self.__get_attr('_app_id')
@property
def web_app_id(self):
return self._web_app_id if self._web_app_id else self.__get_attr('_web_app_id')
@property
def query_id(self):
return self._query_id if self._query_id else self.__get_attr('_query_id')
@property
def marked_id(self):
return self._marked_id if self._marked_id else self.__get_attr('_marked_id')
@property
def page_hash(self):
return self._page_hash if self._page_hash else self.__get_attr('_page_hash')
@property
def channel_hash(self):
return self._channel_hash if self._channel_hash else self.__get_attr('_channel_hash')
@property
def tv_hash(self):
return self._tv_hash if self._tv_hash else self.__get_attr('_tv_hash')
@property
def tag_hash(self):
return self._tag_hash if self._tag_hash else self.__get_attr('_tag_hash')
@property
def liker_hash(self):
return self._liker_hash if self._liker_hash else self.__get_attr('_liker_hash')
@property
def comment_liker_hash(self):
return self._comment_liker_hash if self._comment_liker_hash else self.__get_attr('_comment_liker_hash')
@property
def following_tag_hash(self):
return self._following_tag_hash if self._following_tag_hash else self.__get_attr('_following_tag_hash')
@property
def rhx_gis(self):
return self._rhx_gis if self._rhx_gis else self.__get_attr('_rhx_gis')
@property
def comment_hash(self):
return self._comment_hash if self._comment_hash else self.__get_attr('_comment_hash')
@property
def fans_hash(self):
return self._fans_hash if self._fans_hash else self.__get_attr('_fans_hash')
@property
def following_hash(self):
return self._following_hash if self._following_hash else self.__get_attr('_following_hash')
@property
def keyparams(self):
_ = ['app_id','query_id','marked_id','following_hash','tv_hash','channel_hash',
'fans_hash','page_hash','comment_hash','rhx_gis','web_app_id','tag_hash',
'following_tag_hash','liker_hash','comment_liker_hash']
return {i:getattr(self,i) for i in _}
@property
def logined_headers(self):
if not self.logined:
self.login()
headers = self.Login.headers
headers['x-ig-app-id'] = self.app_id
return headers
def get_user(self,username):
return User(username,self)
@choose(None,api=1,method='get',login=False,out=True)
def get_user_info_by_id(self,user_id):
url = API_USER_JSON_INFO.format(userId=user_id)
tips = {
'ok':f'Get user (id:{user_id}) infos successfully!',
'failed':f'Get user (id:{user_id}) infos failed.',
}
return {'url':url},tips
@check()
@login_required
def get_user_fans(self,username,delay=DELAY,count=-1,save=False,path=None,tname=None):
_user = User(username,self)
fans = _user.get_fans(delay=delay,count=count,save=save,path=path,tname=tname)
return fans
@check()
@login_required
def get_user_followings(self,username,delay=DELAY,count=-1,save=False,path=None,tname=None):
_user = User(username,self)
followings = _user.get_followings(delay=delay,count=count,save=save,path=path,tname=tname)
return followings
@choose('query',method='get',out=True)
def get_following_tags(self):
params = {
'query_hash':self.following_tag_hash,
'variables':'{"id":%s}'%self.user_id,
}
tips={
'ok':f'Get self following tags successfully!',
'failed':f'Get self following tags failed.'
}
return {'params':params},tips
@choose('query',method='get',out=True)
def get_user_following_tags(self,username):
_user = self.get_user(username)
params = {
'query_hash':self.following_tag_hash,
'variables':'{"id":%s}'%_user.Id,
}
tips={
'ok':f'Get user "{username}" following tags successfully!',
'failed':f'Get user "{username}" following tags failed.'
}
return {'params':params},tips
@check()
@login_required
def get_user_channel_posts(self,username,delay=DELAY,count=-1,save=False,path=None,tname=None):
_user = User(username,self)
posts = _user.get_channel_posts(delay=delay,count=count,save=save,path=path,tname=tname)
return posts
@check()
def get_user_posts(self,username,delay=DELAY,count=-1,save=False,path=None,tname=None):
_user = User(username,self)
posts = _user.get_posts(delay=delay,count=count,save=save,path=path,tname=tname)
return posts
@check()
def get_user_tagged_posts(self,username,delay=DELAY,count=-1,save=False,path=None,tname=None):
_user = User(username,self)
posts = _user.get_tagged_posts(delay=delay,count=count,save=save,path=path,tname=tname)
return posts
@check(MONGODB['comments'])
def get_page_comments(self,shortcode,delay=DELAY,count=-1,save=False,path=None,tname=None):
results = []
_count = 0
page = self.get_page_info(shortcode)
comment_card = page['graphql']['shortcode_media']['edge_media_to_comment']
total = comment_card['count']
page_info = comment_card['page_info']
top_comments = comment_card['edges']
end_cursor = page_info['end_cursor']
has_next = page_info['has_next_page']
headers = COMMON_HEADERS
headers['x-ig-app-id']=self.app_id
headers['referer'] = API_PICTURE_PAGE.format(shortcode=shortcode)
_check = count if count > 0 else total
for i in top_comments:
if save:
self.db.save(i,tname=tname)
results.append(i)
_count += 1
if (_count >= count or _count >= total) and (count > 0):
logger.info(f'[Done]Get crawled comments of page:"{shortcode}":{len(results)}.[Total({total})]')
return results
if not has_next:
logger.info(f'[Done]Get crawled comments of page:"{shortcode}":{len(results)}.[Total({total})]')
return results
while 1:
if not end_cursor:
logger.info(f'[Done]Get crawled comments of page:"{shortcode}":{len(results)}.[Total({total})]')
break
params = copy.deepcopy(COMMENTS_PARAMS)
params['query_hash']=self.comment_hash
params['variables']=params['variables'].replace('$',end_cursor).replace('%',shortcode)
md5ed = md5(self.rhx_gis + ":" + params['variables'])
headers['x-instagram-gis']=md5ed
response = send_request(API_USER_POSTS,
params=params,
headers=headers,
delay=delay,
proxy=PROXY_GLOBAL,
json=True)
json_data = response.json()
data = json_data['data']['shortcode_media']['edge_media_to_comment']['edges']
page_info = json_data['data']['shortcode_media']['edge_media_to_comment']['page_info']
for i in data:
if save:
self.db.save(i,tname=tname)
results.append(i)
_count += 1
if (_count >= count or _count >= total) and (count > 0):
logger.info(f'[Done]Get crawled comments of page:"{shortcode}"'
f':{len(results)}.[Total({total})]')
return results
logger.info(f'Current crawled comments of page "{shortcode}"'
f':{len(results)}.[{round(len(results)/_check,4)*100 if _check else 0}%]')
end_cursor = page_info['end_cursor']
if not page_info['has_next_page']:
logger.info(f'[Done]Get crawled comments of page:"{shortcode}"'
f':{len(results)}.[Total({total})]')
break
return results
@choose(None,api=1,method='get',login=False,out=True)
def get_page_info(self,shortcode):
url = API_ACCESS_PAGE.format(shortcode=shortcode)
tips = {
'ok':f'Get media page which shortcode is "{shortcode}" successfully!',
'failed':f'Get media page which shortcode is "{shortcode}" failed.',
}
return {'url':url},tips
@choose('get_recommends',method='get',login=False,out=True)
def get_recommends_by_keyword(self,keyword):
params = copy.deepcopy(SEARCH_PARAMS)
params['rank_token']=random.randrange(1,10**18)/10**18
params['query']=keyword
tips = {
'ok':f'Get recommends of "{keyword}" successfully!',
'failed':f'Get recommends of "{keyword}" failed.',
}
return {'params':params},tips
@check(MONGODB['under_tag'])
def get_posts_by_tag(self,tag,delay=DELAY,top_only=True,count=-1,save=False,tname=None,path=None):
url = API_TAG_POSTS.format(tag=tag)
response = send_request(url,json=True)
data = response.json()
hashtags = data['graphql']['hashtag']
media_posts = hashtags['edge_hashtag_to_media']
top_posts = hashtags['edge_hashtag_to_top_posts']['edges']
total = media_posts['count']
current_posts = media_posts['edges']
page_info = media_posts['page_info']
end_cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
results = []
_count = 0
_check = count if count > 0 else total
headers=COMMON_HEADERS
headers['x-ig-app-id']=self.app_id
logger.info(f'Total posts of tag "{tag}":{total}')
if top_only:
for i in top_posts:
if save:
self.db.save(i,tname=tname)
return top_posts
else:
for i in current_posts:
_count+=1
results.append(i)
if (_count>=count or _count>=total) and (count>0):
logger.info(f'[Done]Total crawled posts of tag "{tag}":{len(results)}')
return results
if save:
self.db.save(i,tname=tname)
while 1:
if not has_next_page:
return results
params = copy.deepcopy(TAG_PARAMS)
params['query_hash']=self.tag_hash
params['variables']=params['variables'].replace('$',tag).replace('%',end_cursor)
md5ed = md5(self.rhx_gis + ":" + params['variables'])
headers['x-instagram-gis']=md5ed
response = send_request(API_USER_POSTS,
params=params,
delay=delay,
headers=headers,
json=True)
data = response.json()
hashtags = data['data']['hashtag']
media_posts = hashtags['edge_hashtag_to_media']
current_posts = media_posts['edges']
page_info = media_posts['page_info']
end_cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
logger.info(f'Amount of current crawled posts of tag "{tag}"'
f':{len(results)}.[{round(len(results)/_check,4)*100 if _check else 0}%]')
for i in current_posts:
_count+=1
results.append(i)
if (_count>=count or _count>=total) and (count>0):
logger.info(f'[Done]Total crawled posts of tag "{tag}":{len(results)}')
return results
if save:
self.db.save(i,tname=tname)
@switch('follow_tag',mode='tag')
def follow_tag(self,tag):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Follow tag "{tag}" successfully!')
else:
logger.info(f'Follow tag "{tag}" failed!')
@switch('unfollow_tag',mode='tag')
def unfollow_tag(self,tag):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Unfollow tag "{tag}" successfully!')
else:
logger.info(f'Unfollow tag "{tag}" failed!')
@switch('follow')
def follow(self,username):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Follow user "{username}" successfully!')
else:
logger.info(f'Follow user "{username}" failed!')
@switch('unfollow')
def unfollow(self,username):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Unfollow user "{username}" successfully!')
else:
logger.info(f'Unfollow user "{username}" failed!')
@switch('block')
def block(self,username):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Block user "{username}" successfully!')
else:
logger.info(f'Block user "{username}" failed!')
@switch('unblock')
def unblock(self,username):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Unblock user "{username}" successfully!')
else:
logger.info(f'Unblock user "{username}" failed!')
@choose('like_media',out=True)
def like(self,media_id=None,short_code=None):
if not any([media_id,short_code]):
raise Exception(f'there must be one param not None at least.')
if not media_id:
info = self.get_page_info(short_code)
media_id = info.get('graphql')['shortcode_media']['id']
url = API_PAGE_LIKE.format(pid=media_id)
tips = {
'ok':f'Like media (id:{media_id}) successfully!',
'failed': f'Like media (id:{media_id}) failed!'
}
return {'url':url},tips
@choose('like_media', out=True)
def unlike(self, media_id=None, short_code=None):
if not any([media_id, short_code]):
raise Exception(f'there must be one param not None at least.')
if not media_id:
info = self.get_page_info(short_code)
media_id = info.get('graphql')['shortcode_media']['id']
url = API_PAGE_UNLIKE.format(pid=media_id)
tips = {
'ok': f'Unlike media (id:{media_id}) successfully!',
'failed': f'Unike media (id:{media_id}) failed!'
}
return {'url': url}, tips
@choose(None,api=1)
def like_comment(self,comment_id):
url = API_COMMENT_LIKE.format(cid=comment_id)
tips = {
'ok': f'Like a comment (id:{comment_id}) successfully!',
'failed': f'Like a comment (id:{comment_id}) failed.'
}
return {'url': url}, tips
@choose(None,api=1)
def unlike_comment(self,comment_id):
url = API_COMMENT_UNLIKE.format(cid=comment_id)
tips = {
'ok': f'Unlike a comment (id:{comment_id}) successfully!',
'failed': f'Unlike a comment (id:{comment_id}) failed.'
}
return {'url': url}, tips
@force_type({1:str,'to':str})
@choose(None,api=1,out=True)
def add_comment(self,text,media_id=None,short_code=None,to=None):
if not any([media_id, short_code]):
raise Exception(f'there must be one param not None in (media_id,short_code) at least.')
if not media_id:
info = self.get_page_info(short_code)
media_id = info.get('graphql')['shortcode_media']['id']
data = {
'comment_text':text,
}
if to:
data['replied_to_comment_id']=to
url = API_ADD_COMMENT.format(mediaId=media_id)
tips = {
'ok':f'Add comment for media(id:{media_id}) {"to "+to if to else ""} successfully!',
'failed': f'Add comment for media(id:{media_id}) {"to " + to if to else ""} failed!',
}
return {'url':url,'data':data},tips
@switch('set_private',{'is_private':'true'})
def set_private(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Set your account as a private account successfully!')
else:
logger.info(f'Set your account as a private account failed!')
@switch('set_private',{'is_private':'false'})
def unset_private(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Unset your account as a private account successfully!')
else:
logger.info(f'Unset your account as a private account failed!')
@switch('set_presence',{'presence_disabled':'true'})
def disable_presence(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Disable your presence successfully!')
else:
logger.info(f'Disable your presencefailed!')
@switch('set_presence',{'presence_disabled':'false'})
def enable_presence(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Enable your presence successfully!')
else:
logger.info(f'Enable your presencefailed!')
@switch('set_reshare',{'disabled':'1'})
def disable_share(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Disable people share your story as messages successfully!')
else:
logger.info(f'Disable people share your story as messages failed!')
@switch('set_reshare',{'disabled':'0'})
def enable_share(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Enable people share your story as messages successfully!')
else:
logger.info(f'Enable people share your story as messages failed!')
@force_type({'keywords':list,'default':bool})
@switch('set_filter')
def set_comment_filter_keywords(self,keywords=[],default=False):
res = self.__opt__
if default:
show = 'by default'
else:
show = f'not in default mode'
if keywords:
show = f':{keywords}'
if res and res['status'] == 'ok':
logger.info(f'Set your comments filter keywords {show} successfully!')
else:
logger.info(f'Set your comments filter keywords failed!')
@force_type({1:str})
@choose('upload_pic')
def upload_profile_picture(self,path_or_url):
pic = BinaryImage(path_or_url).to_binary()
data = {'profile_pic':('profilepic.jpg',pic,'image/jpeg')}
encode_data = encode_multipart_formdata(data)
headers = {
'content-length':str(len(pic)),
'Content-Type':encode_data[1],
}
tips = {
'ok':'Upload your profile picture successfully!',
'failed':'Upload your profile picture failed!',
}
return {'data':encode_data[0],'headers':headers},tips
@force_type({1:str})
@choose('upload_pic',api=API_UPLOAD_PHOTO)
def upload_picture(self,path_or_url):
pic = BinaryImage(path_or_url).to_binary()
data = {
'upload_id':str(int(time.time() * 1000)),
'photo':('photo.jpg',pic,'image/jpeg'),
}
encode_data = encode_multipart_formdata(data)
headers = {
'content-length':str(len(pic)),
'Content-Type':encode_data[1],
}
tips = {
'ok':'Upload your photo successfully!',
'failed':'Upload your photo failed!',
}
return {'data':encode_data[0],'headers':headers},tips
@choose('reset_password')
def reset_password(self,new_pwd):
data = copy.deepcopy(PASSWORD_PARAMS)
data['old_password']=<PASSWORD>
data['new_password1']=<PASSWORD>
data['new_password2']=<PASSWORD>
tips = {
'ok':'Reset your password successfully!',
'failed':'Reset your password failed!',
}
return {'data':data},tips
@choose('upload_pic')
def remove_profile_picture(self):
tips = {
'ok':'Remove your profile picture successfully!',
'failed':'Remove your profile picture failed!',
}
return {'data':None,'headers':{},},tips
@choose('get_push_info',method='get',produce=False)
def get_push_info(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'Get push info successfully:{res["body"] if res["body"] else "0 push."}')
else:
logger.info(f'Get push info failed!')
return res
@choose('get_activity',method='get',produce=False)
def get_activity_notify(self):
return self.__opt__
@choose('mark_checked',data={'timestamp':str(time.time())},produce=False)
def mark_checked(self):
res = self.__opt__
if res and res['status'] == 'ok':
logger.info(f'mark checked successfully.')
else:
logger.info(f'mark checked failed!')
@choose('location_search',method='get',callback=get_location_params)
def search_location(self,location,latitude=None,longitude=None,fuzzy=False):
params = copy.deepcopy(LOCATION_PARAMS)
params['rank_token']=str(random.randrange(1,10**18)/10**18)
params['search_query'] = location
if latitude and longitude:
params['latitude']=latitude
params['longitude']=longitude
return {'params':params,'cb_kwargs':{'fuzzy':fuzzy,'location':location}},None
@force_type({'path_or_url':str,'caption':str})
@choose('create_post',out=True)
def post_photo(self,path_or_url=None,upload_id=None,caption=None):
data = copy.deepcopy(CONFIG_PHOTO_PARAMS)
if caption:
data['caption']=caption
if upload_id:
data['upload_id']=upload_id
else:
img = ImageHandler(path_or_url)
valid_path = img.to_valid_post_image_path()
data['upload_id']=self.upload_picture(valid_path)
tips = {
'ok':f'Post your photo Ins successfully!',
'failed':f'Post your photo Ins failed.',
}
logger.info('Posting your photo Ins...')
return {'data':data,'http_kwargs':{'delay':DELAY}},tips
@force_type({'path_or_url':str,'caption':str})
@choose('create_post',api=API_CREATE_STORY,out=True)
def post_story(self,path_or_url=None,upload_id=None,caption=None):
data = copy.deepcopy(CONFIG_PHOTO_PARAMS)
if caption:
data['caption'] = caption
if upload_id:
data['upload_id'] = upload_id
else:
img = ImageHandler(path_or_url)
valid_path = img.to_valid_post_image_path()
data['upload_id'] = self.upload_picture(valid_path)
tips = {
'ok': f'Post your photo story successfully!',
'failed': f'Post your photo story failed.',
}
logger.info('Posting your photo story...')
return {'data': data, 'http_kwargs': {'delay': DELAY}}, tips
@choose('delete',api=1)
def delete_media(self,media_id=None,short_code=None):
if not any([media_id, short_code]):
raise Exception(f'there must be one param not None at least.')
if not media_id:
info = self.get_page_info(short_code)
media_id = info.get('graphql')['shortcode_media']['id']
url = API_DELETE_POSTED_MEDIA.format(mediaId=media_id)
tips = {
'ok':f'Delete media (id:{media_id}) successfully!',
'failed':f'Delete media (id:{media_id}) failed.Maybe the media doesn\'t exist.',
}
return {'url':url},tips
@choose('delete',api=1)
def delete_comment(self,comment_id,media_id=None,short_code=None):
if not any([media_id, short_code]):
raise Exception(f'there must be one param not None in (media_id,short_code) at least.')
if not media_id:
info = self.get_page_info(short_code)
media_id = info.get('graphql')['shortcode_media']['id']
url = API_DELETE_COMMENT.format(mediaId=media_id,commentId=comment_id)
tips = {
'ok':f'Delete a comment of media(id:{media_id}) successfully!',
'failed':f'Delete a comment of media(id:{media_id}) failed.'
}
return {'url':url},tips
@check(MONGODB['media_liker'])
def get_media_likers(self,short_code,save=False,count=-1,delay=DELAY,tname=None,path=None):
_count = 0
results = []
end_cursor = ''
total = 0
_check = 0
while 1:
params = copy.deepcopy(MEDIA_LIKER_PARAMS)
headers = copy.deepcopy(COMMON_HEADERS)
params['query_hash']=self.liker_hash
params['variables'] = params['variables'].replace('$', short_code).replace('%', end_cursor)
md5ed = md5(self.rhx_gis + ":" + params['variables'])
headers['x-instagram-gis']=md5ed
response = send_request(API_USER_POSTS,
json=True,
delay=delay,
headers=headers,
params=params)
data = response.json()
liker_card = data['data']['shortcode_media']['edge_liked_by']
if _count==0:
total = liker_card['count']
_check = count if count >0 else total
logger.info(f'Total amount of users who liked media({short_code}) : {total}')
likers = liker_card['edges']
page_info = liker_card['page_info']
end_cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
logger.info(f'Current grabbed users who liked media({short_code}):{len(likers)}.[{round(len(results)/_check,4)*100}%]')
for i in likers:
_count += 1
results.append(i)
if (_count >= count or _count >= total) and (count > 0):
logger.info(f'[Done]Total crawled users who liked media({short_code}) :{len(results)}')
return results
if save:
self.db.save(i, tname=tname)
if not has_next_page:
logger.info(f'[Done]Total crawled users who liked media({short_code}) :{len(results)}')
return results
@check(MONGODB['comment_liker'])
@login_required
def get_comment_likers(self,comment_id,save=False,count=-1,delay=DELAY,tname=None,path=None):
_count = 0
results = []
end_cursor = ''
while 1:
params = copy.deepcopy(COMMENT_LIKER_PARAMS)
headers = copy.deepcopy(COMMON_HEADERS)
params['query_hash'] = self.comment_liker_hash
params['variables'] = params['variables'].replace('$', comment_id).replace('%', end_cursor)
md5ed = md5(self.rhx_gis + ":" + params['variables'])
headers['x-instagram-gis'] = md5ed
response = send_request(API_USER_POSTS,
session=self.session,
json=True,
delay=delay,
headers=headers,
params=params)
data = response.json()
liker_card = data['data']['comment']['edge_liked_by']
likers = liker_card['edges']
page_info = liker_card['page_info']
end_cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
logger.info(
f'Current grabbed users who liked comment({comment_id}):{len(likers)}.')
for i in likers:
_count += 1
results.append(i)
if _count >= count and (count > 0):
logger.info(f'[Done]Total crawled users who liked comment({comment_id}) :{len(results)}')
return results
if save:
self.db.save(i, tname=tname)
if not has_next_page:
logger.info(f'[Done]Total crawled users who liked comment({comment_id}) :{len(results)}')
return results
def discover_posts(self,save=False,delay=DELAY,count=-1,tname=None,path=None):
pass
def get_posts_of_location(self,location,save=False,count=-1,delay=DELAY,tname=None,path=None):
pass
def __get_attr(self,_property):
if not getattr(self,_property):
if self.official is None:
self.official = User(instagram=self)
setattr(self,_property,getattr(self.official,_property.lstrip('_')))
return getattr(self,_property)
a = Instagram()
logger.info(a.info(details=True))
# d = a.get_comment_likers('17967358843242707',save=True)
# d=a.get_media_likers('BuxpYvdFtfP',count=10)
# d = a.add_comment('This is posted by InsBot.GitHub:01ly/InsBot.',short_code='BuxpYvdFtfP')
# print(d)
# a.unlike_comment('17949756184248517')
# a.delete_media(short_code='ButZGujBt8
# a.delete_comment('18042937942030371',short_code='ButuTpXhWDQ')
# print(a.get_page_info('Bux8m0DhQms'))
# print(a.unlike(short_code='ButuTpXhWDQ'))
# print(a.post_story(r'https://b-ssl.duitang.com/uploads/item/201608/13/20160813004256_ArnKB.png',caption='Posted by InsBot'))
# print(a.get_push_info())
# print(a.delete_media('1994833336505674466'))
# a.get_user_fans('linkinpark',save=True,delay=3)
# print(a.delete_media('1994419799547189563'))
# print(a.get_user_info_by_id('7022182631'))
# a.get_posts_by_tag('linkinpark',save=True,top_only=False)
# print(a.get_user_following_tags('stevenfurtick'))
# a.follow_tag('anglebaby')
# print(a.info())
# a.enable_presence()
# a.login()
# a.logout()
# c = a.post_photo(r'https://images.unsplash.com/photo-1506794778202-cad84cf45f1d?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1000&q=80',caption=f'Posted by InsBot at {time.ctime()}')
# print(c)
# dd = a.get_activity_notify()
# a.mark_checked()
# dd = a.search_location('NewYork')
# dd = a.upload_picture(r'D:\projects\1.jpeg')
# print(dd)
# dd=a.get_push_info()
# dd = a.get_recommends_by_keyword('nba')
# a.mark_checked()
# a.get_push_info()
# print(dd['places'])
# a.remove_profile_picture()
# a.upload_profile_picture(r'https://images.unsplash.com/photo-1506794778202-cad84cf45f1d?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1000&q=80')
# a.reset_password(',,')
# a.set_comment_filter_keywords(keywords=['makelove','fuckyou','gay','fuck'],default=True)
# a.unblock('kobe')
# a.follow('cba')
# cc = a.get_user('linkin')
# print(cc.following_hash,cc.following_count)
# a.get_user_fans('linkinpark',save=True)
# cc.get_followings(save=True)
# cc.get_posts(save=True)
# print(a.keyparams)
# f = a.get_page_info('But53OPBwcp')
# print(a.get_page_comments('BuMkvw2gREs'))
# aa= a.get_page_comments('O_WT0Ry2LU',count=125,save=True)
# # print(aa)
# c = a.get_user('instagram')
# c.get_channel_posts(count=105)
```
#### File: InsBot/decorator/instagram.py
```python
import copy
from importlib import import_module
while 1:
try:
import_module('settings')
import_module('util')
import_module('log')
import_module('config')
except Exception as e:
import os,sys
sys.path.append(os.sep.join(os.getcwd().split(os.sep)[:-1]))
continue
break
from config import *
from settings import *
from util.http import send_request
from core.log import getLogger
logger = getLogger(__name__)
APIS = {
'follow' :API_FOLLOW,
'unfollow' :API_UNFOLLOW,
'block' :API_BLOCK,
'unblock' :API_UNBLOCK,
'set_private' :API_SET_PRIVATE,
'set_presence' :API_SET_PRESENCE,
'set_reshare' :API_SET_SHARE,
'set_filter' :API_SET_COMMENT_FILTER,
'reset_password':API_CHANGE_PASSWORD,
'upload_pic' :API_CHANGE_PROFILE_PIC,
'get_push_info' :API_PUSH_INFO,
'mark_checked' :API_MARK_CHECKED,
'location_search':API_GET_LOCATION,
'create_post' :API_CREATE_POST,
'get_activity' :API_ACTIVITY,
'get_recommends':API_TOP_SEARCH,
'follow_tag' :API_FOLLOW_TAG,
'unfollow_tag' :API_UNFOLLOW_TAG,
'query' :API_USER_POSTS,
'posts_by_tag' :API_TAG_POSTS,
'like_media' :API_PAGE_LIKE,
}
def switch(opt,_data=None,mode='user'):
def outter(func):
def wrapper(self,username=None,**kwargs):
API = APIS[opt]
headers = self.logined_headers
cookies = self.session.cookies.get_dict()
headers['x-csrftoken']=cookies['csrftoken']
data = _data
if username and isinstance(username,str):
if mode=='user':
target = self.get_user(username)
url = API.format(userid=target.Id)
elif mode=='tag':
url = API.format(tag=username)
else:
url = API
if opt == 'set_filter':
if kwargs.get('default'):
data = {'config_value':'1'}
logger.debug(f'set comments filter to default.')
elif kwargs.get('keywords'):
url = API_SET_COMMENT_FILTER_kEYWORDS
k = kwargs['keywords']
_ = ','.join([str(i) for i in k]) if len(k) > 1 else str(k[0])+','
data = {'keywords':_}
logger.debug(f'set comments filter keywords to {k}.')
send_request(API_SET_COMMENT_FILTER,
session=self.session,
headers=headers,
method='post',
data={'config_value':'0'},
proxy=PROXY_GLOBAL)
else:
data = {'config_value':'0'}
logger.debug(f'set comments filter keywords not in default mode.')
response = send_request(url,
session=self.session,
headers=headers,
method='post',
data=data,
proxy=PROXY_GLOBAL)
res = response.json()
self.__opt__=res
if not username is None:
ret = func(self,username)
else:
ret = func(self,**kwargs)
self.__opt__ = None
return ret
return wrapper
return outter
def choose(opt,method='post',data=None,
produce=True,api=None,params=None,callback=None,
login=True,out=False):
def outter(func):
def wrapper(self,*args,**kwargs):
url = APIS[opt] if api is None else api
if login:
headers = self.logined_headers
cookies = self.session.cookies.get_dict()
headers['x-csrftoken']=cookies['csrftoken']
else:
headers = copy.deepcopy(COMMON_HEADERS)
if not produce:
response = send_request(url,
session=self.session,
headers=headers,
method=method,
data=data,
params=params,
json=True,
delay=DELAY,
proxy=PROXY_GLOBAL)
res = response.json()
self.__opt__=res
ret = func(self,*args,**kwargs)
else:
ret = True
data_dict,tips = func(self,*args,**kwargs)
url = data_dict.get('url') if data_dict.get('url') else url
headers.update(data_dict.get('headers',{}))
if opt=='create_post':
headers.pop('Content-Type')
headers.pop('content-length')
response = send_request(url,
session=self.session,
headers=headers,
method=method,
params=data_dict.get('params',None),
data=data_dict.get('data',None),
proxy=PROXY_GLOBAL,
json=True,
delay=DELAY,
**data_dict.get('http_kwargs',{}))
if response is None:
return
res = response.json()
if callback and callable(callback):
cb_args = data_dict.get('cb_kwargs',{})
return callback(self,res,**cb_args)
if res and (res.get('status','')=='ok' or res.get('graphql')):
if opt == 'reset_password':
self.pwd = data_dict['data']['<PASSWORD>']
if opt == 'upload_pic':
if not res.get('has_profile_pic'):
if data_dict['data']:
if not res.get('upload_id'):
logger.info(tips['failed'])
else:
logger.info(tips['ok'])
return res.get('upload_id')
else:
logger.info(tips['ok'])
return ret
logger.info(tips['ok'])
if opt == 'create_post':
logger.info(f"Posted media id:{res.get('media').get('pk')}")
return res
else:
logger.info(tips['failed'])
logger.info(f"error:{res['message']}")
ret = False
self.__opt__ = None
if out:
return res
return ret
return wrapper
return outter
def check(dbname=None):
def outter(func):
def wrapper(self,*arg,**kwargs):
save = kwargs.get('save')
tname = kwargs.get('tname')
if save:
if tname is None:
logger.error('The table name for the current saving is not given.')
logger.warn(f'Setting tname to arg[0]:{arg[0]}')
kwargs['tname'] = str(arg[0])
if not self.db.connected:
logger.info(f'Connecting MongoDB..')
self.db.connect()
logger.info('MongoDB connected.')
if dbname:
self.db.use_db(dbname)
logger.info(f'Using database:{dbname}')
return func(self,*arg,**kwargs)
return wrapper
return outter
```
#### File: InsBot/decorator/user.py
```python
from importlib import import_module
while 1:
try:
import_module('config')
import_module('dbhelper')
import_module('log')
except Exception as e:
import os,sys
sys.path.append(os.sep.join(os.getcwd().split(os.sep)[:-1]))
continue
break
import re
from config import *
from core.dbhelper import Database
from core.log import getLogger
logger = getLogger(__name__)
def already(func):
def wrapper(self,*args,**kwargs):
if not self._homepage:
self.get_homepage()
return func(self,*args,**kwargs)
return wrapper
def exists(func):
def wrapper(self,*args,**kwargs):
sharedData = re.findall(r'_sharedData = (.+?);</script>',
self.homepage)
if sharedData:
return func(self,*args,**kwargs)
else:
raise Exception(f'User page not found,maybe there does not exist the user you are looking for.')
return wrapper
def login_required(func):
def wrapper(self,*args,**kwargs):
if self.instagram is None:
raise Exception(f'This operation need a logined Instagram API.Please login first.')
elif not self.instagram.logined:
self.instagram.login()
return func(self,*args,**kwargs)
return wrapper
def check(dbname=None):
def outter(func):
def wrapper(self,*arg,**kwargs):
save = kwargs.get('save')
tname = kwargs.get('tname')
if self.is_private and not self.is_myfans:
logger.info(f'User "{self.name}" is a private account and unvailable to you.')
return
if save:
if tname is None:
logger.error('The table name for the current saving is not given.')
logger.warn(f'Setting tname to username:{self.name}')
kwargs['tname'] = self.name
if not isinstance(self.db,Database):
logger.info('Creating MongoDB object.' )
self.db = Database(MONGODB)
if not self.db.connected:
logger.info(f'Connecting MongoDB..')
self.db.connect()
logger.info('MongoDB connected.')
if dbname:
self.db.use_db(dbname)
logger.info(f'Switched database to "{dbname}".')
return func(self,*arg,**kwargs)
return wrapper
return outter
```
#### File: InsBot/util/__init__.py
```python
import re
import hashlib
def from_pattern(content,pattern,index=0,allget=False):
finds = re.findall(pattern,content)
if finds:
if allget:
return finds
else:
return finds[index]
def md5(string):
hashed = hashlib.md5()
hashed.update(string.encode('utf-8'))
md5ed = hashed.hexdigest()
return md5ed
```
#### File: InsBot/util/instagram.py
```python
import random
def get_location_params(ins_obj,json_res,location=None,fuzzy=False):
locations = json_res['venues']
status = json_res['status']
if status == 'ok' and locations:
for i in locations:
if not fuzzy:
if i['name']==location:
return {"lat":i["lat"],"lng":i["lng"],"facebook_places_id":i["external_id"]}
else:
if i['name'].startswith(location):
return {"lat":i["lat"],"lng":i["lng"],"facebook_places_id":i['external_id']}
def inner_photo_tagged_pos(userIds):
inners = []
for i in userIds:
_ = {"user_id":f'{i}',"position":[random.randrange(1,10**16)/10**16,
random.randrange(1,10**16)/10**16]}
inners.append(_)
return {"in":inners}
``` |
{
"source": "01-Meyitzade-01/TgKATILMA",
"score": 2
} |
#### File: src/handlers/admin.py
```python
from pathlib import Path
import json
import logging
from aiogram import types
from peewee import fn
from process.database import User
REPLIES = json.load(open(Path.cwd().joinpath("src/ui/replies.json")))
logger = logging.getLogger(__name__)
async def stats(message: types.Message):
payload = message.get_args().split()
base_log_msg = f"Admin {message.from_user.id} requested {payload} with result"
subcommands = ["total", "file", "lang", "reqs"]
if "total" in payload:
total_users = User.select().where(User.is_ban == False).count() # noqa: E712
logger.info(f"{base_log_msg} {total_users}")
await message.answer(total_users)
elif "file" in payload:
file = open(Path.cwd().joinpath("src/data/users.db"), "rb")
input_file = types.InputFile(file)
logger.info(f"{base_log_msg} <file>")
await message.answer_document(input_file)
file.close()
elif "reqs" in payload:
total_requests = User.select(fn.SUM(User.requests)).scalar(as_tuple=True)
logger.info(f"{base_log_msg} {total_requests[0]}")
await message.answer(total_requests[0])
elif "lang" in payload:
text = "Language Percentages:"
top_list = []
for lang in REPLIES["LANGS"]:
total_users = (
User.select().where(User.is_ban == False).count() # noqa: E712
)
total_lang = User.select().where(User.language == lang).count()
try:
percent = total_lang / total_users
except ZeroDivisionError:
percent = 0.0
top_list.append([lang, round(percent, 4)])
for lang, per in sorted(top_list, key=lambda x: x[1], reverse=True):
text += f"\n<pre>{lang}: {per:.2%}</pre>"
logger.info(f"{base_log_msg} {top_list}")
await message.answer(text=text, parse_mode="HTML")
else:
logger.info(f"{base_log_msg} no result.")
await message.answer(subcommands)
```
#### File: src/handlers/excepts.py
```python
import pathlib, json # noqa: E401
import logging
from aiogram import Bot, types
CONFIG = json.load(open(pathlib.Path.cwd().joinpath("src/config.json")))
logger = logging.getLogger(__name__)
async def on_err(event: types.Update, exception: Exception):
bot = Bot.get_current()
# notifies the admins
for admin in CONFIG["ADMINS"]:
await bot.send_message(admin, f"{exception} ocurred on event: {event}")
# logs the error in the logger
logger.critical(f"<{exception}> ocurred on event: {event}")
return True
```
#### File: src/process/function.py
```python
import json, pathlib # noqa: E401
from typing import Callable
import time
import numpy as np
class Function:
def __init__(self, order: int = 3):
self.order = 3
self.data_path = pathlib.Path.cwd().joinpath("src/data/dates.json")
self.x, self.y = self._unpack_data()
self._func = self._fit_data()
def _unpack_data(self) -> (list, list):
with open(self.data_path) as string_data:
data = json.load(string_data)
x_data = np.array(list(map(int, data.keys())))
y_data = np.array(list(data.values()))
return (x_data, y_data)
def _fit_data(self) -> Callable[[int], int]:
fitted = np.polyfit(self.x, self.y, self.order)
func = np.poly1d(fitted)
return func
def add_datapoint(self, pair: tuple):
pair[0] = str(pair[0])
with open(self.data_path) as string_data:
data = json.load(string_data)
data.update([pair])
with open(self.data_path, "w") as string_data:
json.dump(data, string_data)
# update the model with new data
self.x, self.y = self._unpack_data()
self._func = self._fit_data()
def func(self, tg_id: int) -> int:
value = self._func(tg_id)
current = time.time()
if value > current:
value = current
return value
if __name__ == "__main__":
# import matplotlib.pyplot as plt
from datetime import datetime
f = Function(6)
a = f.func(1_300_200_300)
print(datetime.utcfromtimestamp(a).strftime("%Y-%m-%d")) # example interpolation
# plot scatter data + line of best fit
# plt.scatter(f.x, f.y)
# plt.plot(f.x, [f.func(x) for x in f.x])
# plt.show()
``` |
{
"source": "01mokuba/soumu_scrapy",
"score": 3
} |
#### File: soumu_scrapy/spiders/archive.py
```python
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from soumu_scrapy.items import ArchiveItem, ClipItem
class ArchiveSpider(CrawlSpider):
name = 'archive'
allowed_domains = ['www.soumu.go.jp'] #対象ドメイン
start_urls = ['http://www.soumu.go.jp/menu_news/s-news/index.html'] #開始URL
custom_settings = {
'DOWNLOAD_DELAY' : 1,
}
rules = (
Rule(
LinkExtractor(
allow=['http://www.soumu.go.jp/menu_news/s-news/[\d]+m\.html'], #リンク抽出をするURL - 月ごとの報道資料一覧
restrict_xpaths=['//div[@class=\'contentsBody\']'] #リンク抽出をするエリア
),
callback='parse_archive_list', #リンク抽出後に実行されるコールバック
follow=True
),
Rule(
LinkExtractor(
allow=['http://www.soumu.go.jp/menu_news/s-news/[\d\w]+\.html'], #リンク抽出をするURL - 報道資料詳細
restrict_xpaths=['//div[@class=\'contentsBody\']'] #リンク抽出をするエリア
),
callback='parse_archive_detail', #リンク抽出後に実行されるコールバック
follow=True
)
)
def parse_archive_list(self, response):
item = ArchiveItem()
item['links'] = []
item['month'] = response.url.split('/')[-1].replace('m.html','') #抽出した月 例: 1809
for linkitem in response.xpath('//div[@class=\"contentsBody\"]//a'): # メインコンテンツ内のリンクのリストでループ
item['links'].append({
'href' : linkitem.xpath('@href').extract_first(), #URLを抽出
'text' : linkitem.xpath('text()').extract_first() #アンカーテキストを抽出
})
return item
def parse_archive_detail(self, response):
item = ClipItem()
item['src'] = response.xpath('//body').extract_first()
content_root = response.xpath('//div[@class=\'contentsBody\']')
item['text'] = content_root.extract_first()
item['attachments'] = []
item['file_urls'] = []
for d in response.xpath('//a'): #responseのaタグでループ
dd = d.xpath('@href').extract_first() #hrefの値を抽出
if dd is not None: #hrefの値が存在する場合
if re.match('^https?://', dd) is None: #URLにhttp[s]が含まれていない場合
dd = response.urljoin(dd) #responseのベースURLを組み合わせて完全なURLを作る
if re.match('.*\.[Pp][Dd][Ff]$', dd) is not None: #大文字/小文字のPDF/pdfがURL内に存在するとき
item['attachments'].append({
'href': dd,
'text': d.xpath('text()').extract_first()
})
item['file_urls'].append(dd)
return item
``` |
{
"source": "01mu/covid-19",
"score": 3
} |
#### File: covid-19/src/conn.py
```python
import psycopg2
import MySQLdb
def make_conn(cred_file):
creds = map(str.strip, open(cred_file, 'r').readlines())
if creds[0] == 'mysql':
try:
c = MySQLdb.connect(db = creds[1],
user = creds[2],
passwd = creds[3],
unix_socket = creds[6])
except:
c = MySQLdb.connect(db = creds[1],
user = creds[2],
passwd = creds[3])
c.set_character_set('utf8')
return c
else:
return psycopg2.connect(database = creds[1],
user = creds[2],
password = creds[3],
host = creds[4],
port = creds[5])
``` |
{
"source": "01org/isafw",
"score": 2
} |
#### File: isafw/tests/LACPluginTestCase.py
```python
import unittest
import sys
from isafw import isafw
import isaplugins
import shutil
import os
from datetime import datetime
isafw_conf = isafw.ISA_config()
isafw_conf.reportdir = "./la_plugin/output"
class TestLACPlugin(unittest.TestCase):
def setUp(self):
# cleaning up the report dir and creating it if needed
if os.path.exists(os.path.dirname(isafw_conf.reportdir+"/internal/test")):
shutil.rmtree(isafw_conf.reportdir)
os.makedirs(os.path.dirname(isafw_conf.reportdir+"/internal/test"))
# setting the timestamp
isafw_conf.timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
# fetching proxy info
isafw_conf.proxy = ""
if "http_proxy" in os.environ:
isafw_conf.proxy = os.environ['http_proxy']
if "https_proxy" in os.environ:
isafw_conf.proxy = os.environ['https_proxy']
isafw_conf.machine = "TestCaseMachine"
self.la_report_path = isafw_conf.reportdir + "/la_problems_report_" + isafw_conf.machine + "_" + isafw_conf.timestamp
# creating ISA FW class
self.imageSecurityAnalyser = isafw.ISA(isafw_conf)
def test_package_with_licenses_OK(self):
pkg = isafw.ISA_package()
pkg.name = "bash"
pkg.version = "4.3"
pkg.licenses = ["bash:Apache-1.1"]
self.imageSecurityAnalyser.process_package(pkg)
self.imageSecurityAnalyser.process_report()
badLicExist = os.path.isfile (self.la_report_path)
# if no bad licenses exist no report is created
self.assertFalse(badLicExist)
def test_package_with_licenses_NotOK(self):
pkg = isafw.ISA_package()
pkg.name = "bash"
pkg.version = "4.3"
pkg.licenses = ["bash:BadLicense-1.1"]
self.imageSecurityAnalyser.process_package(pkg)
self.imageSecurityAnalyser.process_report()
with open(self.la_report_path, 'r') as freport:
output = freport.readline()
# if bad licenses exist, a report listing them is created
self.assertEqual(output,
"bash:BadLicense-1.1\n",
'Output does not match')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "01org/mayloon-runtime",
"score": 2
} |
#### File: com.intel.jsdroid/build_script/fixs_loadpath.py
```python
import os
import sys
import simplejson
def ls(dir, hidden=False, relative=True):
nodes = []
for nm in os.listdir(dir):
if not hidden and nm.startswith('.'):
continue
if not relative:
nm = os.path.join(dir, nm)
nodes.append(nm)
nodes.sort()
return nodes
def find(root, files=True, dirs=False, hidden=False, relative=True, topdown=True):
root = os.path.join(root, '') # add slash if not there
for parent, ldirs, lfiles in os.walk(root, topdown=topdown):
if relative:
parent = parent[len(root):]
if dirs and parent:
yield os.path.join(parent, '')
if not hidden:
lfiles = [nm for nm in lfiles if not nm.startswith('.')]
ldirs[:] = [nm for nm in ldirs if not nm.startswith('.')] # in place
if files:
lfiles.sort()
for nm in lfiles:
nm = os.path.join(parent, nm)
yield nm
def fixs_loadpath(root):
all_list = []
package_set = set()
current_path = os.path.abspath(os.path.dirname(sys.argv[0]))
print("java output path: %s" % "../" + root)
print("current execution script path: %s" % current_path)
for f in find("../" + root):
all_list.append(f)
for string in all_list:
index = str(string).rfind("/")
temp = str(string)[0:index]
temp = temp.replace("/", ".")
package_set.add(temp)
package_list = list(package_set)
write_file = open('../sdk_config/package_name.json', 'w')
simplejson.dump(package_list, write_file)
write_file.close()
print(package_list)
if __name__ == "__main__":
fixs_loadpath(sys.argv[1])
``` |
{
"source": "01org/mpxcheck",
"score": 2
} |
#### File: 01org/mpxcheck/Utl.py
```python
import os
import subprocess
class Utl(object):
"""A collection of static utility methods"""
@staticmethod
def exe(cmd):
"""Execute a command
Args:
cmd (list): command to execute as a list (e.g.)['ls','-al']
Returns:
a dictionary containing the following keys:
output: output of the executed command, includes stderr
exit: exit status of executed command
valid: if exit status was 0 and output exists
error: error from an exception if occurs
"""
try:
proc = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[0].decode(encoding="iso8859_1")
valid = not Utl.is_none(output) and proc.returncode == 0
proc.stdout.close()
proc.stderr.close()
return {"output":output, "exit":proc.returncode,
"valid":valid, "error":None}
except (ValueError, OSError) as ex:
return {"output":str(ex), "exit":-1,
"error":str(ex), "valid":False}
@staticmethod
def is_none(token):
"""Check if string object is None or empty
Args:
token (str): string object
Returns:
True if empty, False otherwise
"""
return token is None or str(token).strip() == ''
@staticmethod
def remove(path):
"""Delete file if exists
Args:
path (str): path to file
Returns:
None
"""
if os.path.isfile(path):
os.unlink(path)
``` |
{
"source": "01org/pyMIC",
"score": 2
} |
#### File: pyMIC/benchmarks/dgemm.py
```python
from __future__ import print_function
import sys
import time
import pymic
import numpy as np
def limiter(data_size):
if data_size < 128:
return 10000
if data_size < 1024:
return 1000
if data_size < 8192:
return 100
return 10
benchmark = sys.argv[0][2:][:-3]
# number of elements to copyin (8B to 2 GB)
data_sizes = []
data_sizes = [(128 + i * 128) for i in range(64)]
repeats = map(limiter, data_sizes)
device = pymic.devices[0]
library = device.load_library("libbenchmark_kernels.so")
stream = device.get_default_stream()
timings = {}
timings_kernel = {}
np.random.seed(10)
for ds, nrep in zip(data_sizes, repeats):
print("Measuring {0}x{0} (repeating {2})".format(ds, ds * 8, nrep))
m, k, n = ds, ds, ds
a = np.random.random(m * k).reshape((m, k))
b = np.random.random(k * n).reshape((k, n))
c = np.zeros((m, n))
alpha = 1.0
beta = 0.0
ts = time.time()
for i in range(nrep):
offl_a = stream.bind(a)
offl_b = stream.bind(b)
offl_c = stream.bind(c)
stream.sync()
ts_kernel = time.time()
stream.invoke(library.dgemm_kernel,
offl_a, offl_b, offl_c, m, n, k, alpha, beta)
stream.sync()
te_kernel = time.time()
te = time.time()
timings[ds] = (te - ts, nrep)
offl_a = stream.bind(a)
offl_b = stream.bind(b)
offl_c = stream.bind(c)
stream.sync()
ts_kernel = time.time()
for i in range(nrep):
stream.invoke(library.dgemm_kernel,
offl_a, offl_b, offl_c, m, n, k, alpha, beta)
stream.sync()
te_kernel = time.time()
timings_kernel[ds] = (te_kernel - ts_kernel, nrep)
try:
csv = open(benchmark + ".csv", "w")
print("benchmark;elements;avg time;avg time kernel;flops;gflops"
";gflops kernel",
file=csv)
for ds in sorted(list(timings)):
t, nrep = timings[ds]
t = (float(t) / nrep)
t_k, dummy = timings_kernel[ds]
t_k = (float(t_k) / nrep)
flops = 2 * ds * ds * ds
gflops = (float(flops) / (1000 * 1000 * 1000)) / t
gflops_k = (float(flops) / (1000 * 1000 * 1000)) / t_k
print("{0};{1};{2};{3};{4};{5};{6}".format(benchmark, ds, t,
t_k, flops, gflops,
gflops_k),
file=csv)
finally:
csv.close()
``` |
{
"source": "01org/virtual-storage-manager",
"score": 2
} |
#### File: vsm/agent/driver.py
```python
import json
import operator
import os
import platform
import time
import urllib2
from vsm.agent.cephconfigutils import CephConfigSynchronizer, CephConfigParser
from vsm.agent.crushmap_parser import CrushMap
from vsm.agent import rpcapi as agent_rpc
from vsm.common import ceph_version_utils
from vsm.common import constant
from vsm import conductor
from vsm.conductor import rpcapi as conductor_rpcapi
from vsm import db
from vsm import exception
from vsm import flags
from vsm.openstack.common import log as logging
from vsm.openstack.common.rpc import common as rpc_exc
from vsm import utils
try:
from novaclient.v1_1 import client as nc
except:
from novaclient.v2 import client as nc
try:
from cinderclient.v1 import client as cc
except:
from cinderclient.v2 import client as cc
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class CephDriver(object):
"""Excute commands relating to Ceph."""
def __init__(self, context):
self._crushmap_mgmt = CreateCrushMapDriver()
self._conductor_api = conductor.API()
self._conductor_rpcapi = conductor_rpcapi.ConductorAPI()
self._agent_rpcapi = agent_rpc.AgentAPI()
try:
CephConfigSynchronizer().sync_before_read(FLAGS.ceph_conf)
self.update_etc_fstab(context)
except:
pass
def _is_systemctl(self):
"""
if the ceph version is greater than or equals infernalis and the operating
system is not ubuntu, use command "systemctl" to operate ceph daemons.
"""
ceph_version = self.get_ceph_version()
if int(ceph_version.split(".")[0]) > 0:
utils.execute('chown', '-R', 'ceph:ceph',
'/var/lib/ceph', run_as_root=True)
utils.execute('chown', '-R', 'ceph:ceph',
'/etc/ceph', run_as_root=True)
(distro, release, codename) = platform.dist()
if distro != "Ubuntu":
return True
return False
def _operate_ceph_daemon(self, operate, type, id=None, ssh=False, host=None):
"""
start/stop ceph-$type id=$id.
service ceph start/stop $type.$id
systemctl start/stop ceph-$type@$id
ceph script has -a parameter. It can operate remote node. But ceph-$type
only can operate local node. So using ssh to operate remote node.
:param operate: start or stop
:param type: osd, mon or mds or None
:param id:
:param ssh: if ssh, then remote operate
:param host: ssh host
:return:
"""
# the cluster should be passed in, but most of the code in this module
# assumes the cluster is named 'ceph' - just creating a variable here
# makes it simpler to fix this issue later - at least in this function
DEFAULT_CLUSTER_NAME = "ceph"
DEFAULT_OSD_DATA_DIR = "/var/lib/ceph/osd/$cluster-$id"
DEFAULT_MON_DATA_DIR = "/var/lib/ceph/mon/$cluster-$host"
# type and id is required here.
# not support operate all ceph daemons
if not type or not id:
LOG.error("Required parameter type or id is blank")
return False
# host is local host if not specified
if not host:
host = platform.node()
# get cluster from config - use default if not found
cluster = DEFAULT_CLUSTER_NAME
LOG.info("Operate %s type %s, id %s" % (operate, type, id))
is_systemctl = self._is_systemctl()
config = CephConfigParser(FLAGS.ceph_conf)
data_dir = config.get(type, type + " data")
if not data_dir:
data_dir = DEFAULT_OSD_DATA_DIR if type == "osd" else DEFAULT_MON_DATA_DIR
# path = os.path.dirname(data_dir)
file = data_dir.replace("$cluster", cluster).\
replace("$id", str(id)).replace("$host", host) + "/upstart"
# no using os.path.exists(), because if the file is owned by ceph
# user, the result will return false
if ssh:
try:
out, err = utils.execute('ssh', '-t', 'root@'+host,
'ls', file, run_as_root=True)
except:
out = ""
else:
try:
out, err = utils.execute('ls', file, run_as_root=True)
except:
out = ""
# is_file_exist = os.path.exists(file)
if out:
id_assignment = "id=%s" % (host if "-$host" in data_dir else id)
cluster_assignment = "cluster=%s" % cluster
service = "ceph-%s" % type
if ssh:
utils.execute('ssh', '-t', 'root@'+host,
operate, service, cluster_assignment,
id_assignment, run_as_root=True)
else:
utils.execute(operate, service, cluster_assignment,
id_assignment, run_as_root=True)
else:
if is_systemctl:
if ssh:
utils.execute('ssh', '-t', 'root@'+host,
'systemctl', operate,
'ceph-'+type+'@'+id,
run_as_root=True)
else:
utils.execute('systemctl', operate,
'ceph-'+type+'@'+id,
run_as_root=True)
else:
type_id = type + "." + id
if ssh:
utils.execute('ssh', '-t', 'root@'+host,
'service', 'ceph', operate,
type_id, run_as_root=True)
else:
utils.execute('service', 'ceph', operate, type_id,
run_as_root=True)
def _get_new_ruleset(self):
args = ['ceph', 'osd', 'crush', 'rule', 'dump']
ruleset_list = self._run_cmd_to_json(args)
return len(ruleset_list)
def _get_cluster_name(self,secondary_public_ip,keyring):
cluster_name = ''
args = ['ceph', 'mon', 'dump','--keyring',keyring]
mon_name = None
mon_dump = self._run_cmd_to_json(args)
for mon in mon_dump['mons']:
if mon['addr'].split(':')[0] == secondary_public_ip.split(',')[0]:
mon_name = mon['name']
break
if mon_name:
mon_configs = self._get_mon_config_dict(mon_name)
cluster_name = mon_configs['cluster']
return cluster_name
# def _get_ceph_admin_keyring_from_file(self,secondary_public_ip):
# keyring = ''
# args = ['ceph', 'mon', 'dump']
# mon_name = None
# mon_dump = self._run_cmd_to_json(args)
# for mon in mon_dump['mons']:
# if mon['addr'].split(':')[0] == secondary_public_ip.split(',')[0]:
# mon_name = mon['name']
# break
# if mon_name:
# mon_configs = self._get_mon_config_dict(mon_name)
# keyring_file = mon_configs['keyring']
# keyring,err = utils.execute('cat',keyring_file,run_as_root=True)
# return keyring
def _get_mon_config_dict(self,mon_id):
args = ['ceph', 'daemon','mon.%s'%mon_id ,'config','show']
return self._run_cmd_to_json(args)
def update_etc_fstab(self, context):
# NOTE: This routine fails as it relies on [TYP.X] sections, which are no longer required or configured.
utils.execute('sed', '-i', '/forvsmosd/d', '/etc/fstab', run_as_root=True)
config = CephConfigParser(FLAGS.ceph_conf)
fs_type = config.get('osd', 'osd mkfs type', 'xfs')
cluster = db.cluster_get_all(context)[0]
mount_option = cluster['mount_option']
if not mount_option:
mount_option = utils.get_fs_options(fs_type)[1]
mount_attr = config.get('osd', 'osd mount options %s' % fs_type, mount_option)
for sec in config.sections():
if sec.find('osd.') != -1:
osd_id = sec.split('.')[1]
mount_path = os.path.join(FLAGS.osd_data_path, "osd%s" % osd_id)
mount_disk = config.get(sec, 'devs')
mount_host = config.get(sec, 'host')
if FLAGS.host == mount_host:
line = mount_disk + ' ' + mount_path
line = line + ' ' + fs_type
line = line + ' ' + mount_attr + ' 0 0'
line = line + ' ' + '## forvsmosd'
utils.write_file_as_root('/etc/fstab', line)
def create_storage_pool(self, context, body):
pool_name = body["name"]
primary_storage_group = ''
if body.get("ec_profile_id"):
profile_ref = db.ec_profile_get(context, body['ec_profile_id'])
pgp_num = pg_num = profile_ref['pg_num']
plugin = "plugin=" + profile_ref['plugin']
crushmap = self.get_crushmap_json_format()
ruleset_root = "ruleset-root=" + crushmap.get_bucket_root_by_rule_name(body['ec_ruleset_root'])
failure_domain = "ruleset-failure-domain=" + body['ec_failure_domain']
rule_name = pool_name
kv = eval(profile_ref['plugin_kv_pair'])
pair_str = ""
for k, v in kv.items():
pair_str += str(k) + "=" + str(v) + " "
utils.execute('ceph', 'osd', 'erasure-code-profile','set', profile_ref['name'], \
plugin, ruleset_root, failure_domain, pair_str, '--force', \
run_as_root=True)
utils.execute('ceph', 'osd', 'crush', 'rule', 'create-erasure', \
rule_name, profile_ref['name'], run_as_root=True)
res = utils.execute('ceph', 'osd', 'pool', 'create', pool_name, pg_num, \
pgp_num, 'erasure', profile_ref['name'], rule_name, \
run_as_root=True)
new_crushmap = self.get_crushmap_json_format()
storage_group_values = new_crushmap.get_storage_group_value_by_rule_name(rule_name)
if len(storage_group_values) == 1:
storage_group_values = storage_group_values[0]
storage_group_values['status'] = 'IN'
ref_storge_group = db.storage_group_update_or_create(context,storage_group_values)
body['storage_group_id'] = ref_storge_group.id
elif body.get('pool_type') == 'replicated':
try:
utils.execute('ceph', 'osd', 'getcrushmap', '-o', FLAGS.crushmap_bin,
run_as_root=True)
utils.execute('crushtool', '-d', FLAGS.crushmap_bin, '-o', FLAGS.crushmap_src,
run_as_root=True)
#ruleset = self._get_new_ruleset()
pg_num = str(body['pg_num'])
primary_storage_group = body['storage_group_name']
storage_group = db.storage_group_get_by_name(context,primary_storage_group)
ruleset = storage_group['rule_id']
utils.execute('chown', '-R', 'vsm:vsm', '/etc/vsm/',
run_as_root=True)
utils.execute('ceph', 'osd', 'pool', 'create', pool_name, \
pg_num, pg_num, 'replicated', run_as_root=True)
utils.execute('ceph', 'osd', 'pool', 'set', pool_name,
'crush_ruleset', ruleset, run_as_root=True)
utils.execute('ceph', 'osd', 'pool', 'set', pool_name,
'size', str(body['size']), run_as_root=True)
res = True
except:
LOG.error("create replica storage pool error!")
raise
return False
else:
rule = str(body['crush_ruleset'])
size = str(body['size'])
pg_num = str(body['pg_num'])
res = utils.execute('ceph', 'osd', 'pool', 'create', pool_name, \
pg_num, run_as_root=True)
utils.execute('ceph', 'osd', 'pool', 'set', pool_name,
'size', size, run_as_root=True)
utils.execute('ceph', 'osd', 'pool', 'set', pool_name,
'crush_ruleset', rule, run_as_root=True)
#set quota
if body.get('enable_quota', False):
max_bytes = 1024 * 1024 * 1024 * int(body.get('quota', 0))
utils.execute('ceph', 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes,\
run_as_root=True)
#update db
pool_list = self.get_pool_status()
for pool in pool_list:
if pool_name == pool['pool_name']:
values = {
'pool_id': pool.get('pool'),
'name': pool.get('pool_name'),
'pg_num': pool.get('pg_num'),
'pgp_num': pool.get('pg_placement_num'),
'size': pool.get('size'),
'min_size': pool.get('min_size'),
'crush_ruleset': pool.get('crush_ruleset'),
'crash_replay_interval': pool.get('crash_replay_interval'),
'ec_status': pool.get('erasure_code_profile'),
'replica_storage_group': body.get('pool_type'),
'quota': body.get('quota'),
'max_pg_num_per_osd':body.get('max_pg_num_per_osd'),
'auto_growth_pg':body.get('auto_growth_pg',0),
}
values['created_by'] = body.get('created_by')
values['cluster_id'] = body.get('cluster_id')
values['tag'] = body.get('tag')
values['status'] = 'running'
values['primary_storage_group_id'] = body.get('storage_group_id')
db.pool_create(context, values)
return res
def _keystone_v3(self, tenant_name, username, password,
auth_url, region_name):
auth_url = auth_url + "/auth/tokens"
user_id = username
user_password = password
project_id = tenant_name
auth_data = {
"auth": {
"identity": {
"methods": ["password"],
"password": {
"user": {
"id": user_id,
"password": <PASSWORD>
}
}
},
"scope": {
"project": {
"id": project_id
}
}
}
}
auth_request = urllib2.Request(auth_url)
auth_request.add_header("content-type", "application/json")
auth_request.add_header('Accept', 'application/json')
auth_request.add_header('User-Agent', 'python-mikeyp')
auth_request.add_data(json.dumps(auth_data))
auth_response = urllib2.urlopen(auth_request)
return auth_response
def _keystone_v2(self, tenant_name, username, password,
auth_url, region_name):
auth_url = auth_url + "/tokens"
auth_data = {
"auth": {
"tenantName": tenant_name,
"passwordCredentials": {
"username": username,
"password": password
}
}
}
auth_request = urllib2.Request(auth_url)
auth_request.add_header("content-type", "application/json")
auth_request.add_header('Accept', 'application/json')
auth_request.add_header('User-Agent', 'python-mikeyp')
auth_request.add_data(json.dumps(auth_data))
auth_response = urllib2.urlopen(auth_request)
return auth_response
def _config_cinder_conf(self, **kwargs):
LOG.info("_config_cinder_conf")
uuid = kwargs.pop('uuid', None)
volume_host = kwargs.pop('volume_host', None)
pool_type = kwargs.pop('pool_type', None)
pool_name = kwargs.pop('pool_name', None)
ssh_user = kwargs.pop('ssh_user', None)
os_controller_host = kwargs.pop('os_controller_host', None)
pool_str = pool_name + "," + pool_type + "-" + pool_name
LOG.info("volume host = %s, uuid = %s, pool type = %s, pool name = %s, "
"ssh_user = %s, os_controller_host = %s" %
(volume_host, uuid, pool_type, pool_name, ssh_user,
os_controller_host))
LOG.info("present pool info = %s" % pool_str)
try:
out, err = utils.execute(
'presentpool',
'cinder',
ssh_user,
uuid,
volume_host,
os_controller_host,
pool_str,
run_as_root = True
)
LOG.info("present pool on cinder-volume host logs = %s" % out)
except:
LOG.error("Failed to present pool on cinder-volume host")
pass
def _config_nova_conf(self, **kwargs):
LOG.info("_config_nova_conf")
uuid = kwargs.pop('uuid', "")
username = kwargs.pop('username', "")
password = kwargs.pop('password', "")
tenant_name = kwargs.pop('tenant_name', "")
auth_url = kwargs.pop('auth_url', "")
region_name = kwargs.pop('region_name', "")
ssh_user = kwargs.pop('ssh_user', None)
os_controller_host = kwargs.pop('os_controller_host', None)
nova_compute_hosts = []
LOG.info("uuid = %s, username = %s, password = %s, tenant name = %s, "
"auth url = %s, region name = %s, ssh_user = %s, os_controller_host = %s" %
(uuid, username, password, tenant_name, auth_url, region_name,
ssh_user, os_controller_host))
if "v3" in auth_url:
connection = self._keystone_v3(tenant_name, username, password, auth_url, region_name)
response_data = json.loads(connection.read())
services_list = response_data['token']['catalog']
endpoints_list = []
_url = None
for service in services_list:
service_type = service['type']
service_name = service['name']
if service_type == "compute" and service_name == "nova":
endpoints_list = service['endpoints']
break
for endpoint in endpoints_list:
interface = endpoint['interface']
region_id = endpoint['region_id']
if region_name:
if interface == "public" and region_id == region_name:
_url = endpoint['url']
break
else:
if len(endpoints_list) == 3:
_url = endpoint['url']
break
token = connection.info().getheader('X-Subject-Token')
url_list = _url.split(":")
auth_url_list = auth_url.split(":")
url_list[1] = auth_url_list[1]
url = ":".join(url_list) + "/os-services"
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header("content-type", "application/json")
req.add_header("X-Auth-Token", token)
resp = urllib2.urlopen(req)
nova_services = json.loads(resp.read())
nova_services = nova_services['services']
LOG.info("nova services = %s " % str(nova_services))
for nova_service in nova_services:
if nova_service['binary'] == "nova-compute":
nova_compute_hosts.append(nova_service['host'])
LOG.info("nova-compute hosts = %s" % str(nova_compute_hosts))
else:
novaclient = nc.Client(
username, password, tenant_name, auth_url, region_name=region_name
)
nova_services = novaclient.services.list()
LOG.info("nova services = %s " % str(nova_services))
for nova_service in nova_services:
if nova_service.binary == "nova-compute":
nova_compute_hosts.append(nova_service.host)
LOG.info("nova-compute hosts = %s" % str(nova_compute_hosts))
for nova_compute_host in nova_compute_hosts:
try:
LOG.info("nova-compute host = %s" % nova_compute_host)
out, err = utils.execute(
'presentpool',
'nova',
ssh_user,
uuid,
nova_compute_host,
os_controller_host,
run_as_root = True
)
LOG.info("present pool on nova-compute host logs = %s" % out)
except:
LOG.info("Failed to present pool on nova-compute host")
pass
def _config_glance_conf(self, **kwargs):
LOG.info("_config_glance_conf")
uuid = kwargs.pop('uuid', "")
pool_name = kwargs.pop('pool_name', "")
os_controller_host = kwargs.pop('os_controller_host', "")
tenant_name = kwargs.pop('tenant_name', "")
username = kwargs.pop('username', "")
password = kwargs.pop('password', "")
auth_url = kwargs.pop('auth_url', "")
region_name = kwargs.pop('region_name', "")
ssh_user = kwargs.pop('ssh_user', "")
_url = None
if "v3" in auth_url:
connection = self._keystone_v3(tenant_name, username, password, auth_url, region_name)
response_data = json.loads(connection.read())
services_list = response_data['token']['catalog']
endpoints_list = []
for service in services_list:
service_type = service['type']
service_name = service['name']
if service_type == "image" and service_name == "glance":
endpoints_list = service['endpoints']
break
for endpoint in endpoints_list:
interface = endpoint['interface']
region_id = endpoint['region_id']
if region_name:
if interface == "public" and region_id == region_name:
_url = endpoint['url']
break
else:
if len(endpoints_list) == 3:
_url = endpoint['url']
break
pass
elif "v2.0" in auth_url:
connection = self._keystone_v2(tenant_name, username, password, auth_url, region_name)
response_data = json.loads(connection.read())
services_list = response_data['access']['serviceCatalog']
endpoints_list = []
for service in services_list:
service_type = service['type']
service_name = service['name']
if service_type == "image" and service_name == "glance":
endpoints_list = service['endpoints']
break
for endpoint in endpoints_list:
region = endpoint['region']
if region == region_name:
_url = endpoint['publicURL']
break
glance_host = _url.split("/")[2].split(":")[0]
LOG.info("uuid = %s, glance_host = %s, pool_name = %s, os_controller_host = %s, "
"ssh_user = %s" % (uuid, glance_host, pool_name, os_controller_host, ssh_user))
try:
out, err = utils.execute(
'presentpool',
'glance',
ssh_user,
uuid,
glance_host,
os_controller_host,
pool_name,
run_as_root=True
)
LOG.info("present pool on glance-api host logs = %s" % out)
except:
LOG.info("Failed to present pool on glance-api host")
pass
def present_storage_pools(self, context, info):
LOG.info('agent present_storage_pools()')
LOG.info('body = %s' % info)
regions = {}
for pool in info:
as_glance_store_pool = pool['as_glance_store_pool']
appnode_id = pool['appnode_id']
appnode = db.appnodes_get_by_id(context, appnode_id)
volume_host = pool['cinder_volume_host']
tenant_name = appnode['os_tenant_name']
username = appnode['os_username']
password = <PASSWORD>node['<PASSWORD>']
auth_url = appnode['os_auth_url']
region_name = appnode['os_region_name']
os_controller_host = auth_url.split(":")[1][2:]
pool_type = pool['pool_type']
pool_name = pool['pool_name']
uuid = appnode['uuid']
ssh_user = appnode['ssh_user']
# if is_glance_store_pool, present pool for openstack glance
if as_glance_store_pool:
self._config_glance_conf(uuid=uuid,
pool_name=pool_name,
os_controller_host=os_controller_host,
username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url,
region_name=region_name,
ssh_user=ssh_user)
if not volume_host:
return
# present pool for openstack cinder
self._config_cinder_conf(uuid=uuid,
volume_host=volume_host,
pool_type=pool_type,
pool_name=pool_name,
ssh_user=ssh_user,
os_controller_host=os_controller_host
)
# only config nova.conf at the first time
if region_name not in regions.keys() or (
region_name in regions.keys() and
os_controller_host != regions.get(region_name)):
regions.update({region_name: os_controller_host})
self._config_nova_conf(uuid=uuid,
username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url,
region_name=region_name,
ssh_user=ssh_user,
os_controller_host=os_controller_host
)
volume_type = pool_type + "-" + pool_name
if "v3" in auth_url:
def _get_volume_type_list():
volume_type_list = []
i = 0
while i < 60:
try:
connection = self._keystone_v3(tenant_name, username, password,
auth_url, region_name)
response_data = json.loads(connection.read())
services_list = response_data['token']['catalog']
endpoints_list = []
_url = None
for service in services_list:
service_type = service['type']
service_name = service['name']
if service_type == "volume" and service_name == "cinder":
endpoints_list = service['endpoints']
break
for endpoint in endpoints_list:
interface = endpoint['interface']
region_id = endpoint['region_id']
if region_name:
if interface == "public" and region_id == region_name:
_url = endpoint['url']
break
else:
if len(endpoints_list) == 3:
_url = endpoint['url']
break
token = connection.info().getheader('X-Subject-Token')
url_list = _url.split(":")
auth_url_list = auth_url.split(":")
url_list[1] = auth_url_list[1]
url = ":".join(url_list) + "/types?is_public=None"
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header("content-type", "application/json")
req.add_header("X-Auth-Token", token)
resp = urllib2.urlopen(req)
volume_type_list = json.loads(resp.read())
volume_type_list = volume_type_list['volume_types']
i = 60
except:
i = i + 1
time.sleep(i)
return volume_type_list, token, ":".join(url_list)
type_list, token, url = _get_volume_type_list()
if volume_type not in [type['name'] for type in type_list]:
url = url + "/types"
req = urllib2.Request(url)
req.get_method = lambda: 'POST'
req.add_header("content-type", "application/json")
req.add_header("X-Auth-Token", token)
type_data = {"volume_type": {"os-volume-type-access:is_public": True, "name": volume_type, "description": None}}
req.add_data(json.dumps(type_data))
resp = urllib2.urlopen(req)
volume_resp = json.loads(resp.read())
_volume_type = volume_resp['volume_type']
type_id = _volume_type['id']
LOG.info("creating volume type = %s" % volume_type)
url = url + "/%s/extra_specs" % str(type_id)
req = urllib2.Request(url)
req.get_method = lambda: 'POST'
req.add_header("content-type", "application/json")
req.add_header("X-Auth-Token", token)
key_data = {"extra_specs": {"volume_backend_name": pool_name}}
req.add_data(json.dumps(key_data))
urllib2.urlopen(req)
LOG.info("Set extra specs {volume_backend_name: %s} on a volume type" % pool_name)
else:
def _get_volume_type_list():
volume_type_list = []
i = 0
while i < 60:
try:
cinderclient = cc.Client(
username,
password,
tenant_name,
auth_url,
region_name=region_name
)
volume_type_list = cinderclient.volume_types.list()
i = 60
except:
i = i + 1
time.sleep(i)
return volume_type_list
if volume_type not in [type.name for type in _get_volume_type_list()]:
cinderclient = cc.Client(
username,
password,
tenant_name,
auth_url,
region_name=region_name
)
cinder = cinderclient.volume_types.create(volume_type)
LOG.info("creating volume type = %s" % volume_type)
cinder.set_keys({"volume_backend_name": pool_name})
LOG.info("Set extra specs {volume_backend_name: %s} on a volume type" % pool_name)
def _create_osd_state(self, context, strg, osd_id):
osd_state = {}
osd_state['osd_name'] = 'osd.%d' % osd_id
osd_state['device_id'] = strg['dev_id']
osd_state['storage_group_id'] = strg['storage_group_id']
osd_state['service_id'] = strg['service_id']
osd_state['cluster_id'] = strg['cluster_id']
osd_state['state'] = FLAGS.osd_in_up
osd_state['operation_status'] = FLAGS.vsm_status_present
osd_state['weight'] = 1.0
osd_state['public_ip'] = strg['secondary_public_ip']
osd_state['cluster_ip'] = strg['cluster_ip']
osd_state['deleted'] = 0
osd_state['weight'] = 1.0
osd_state['operation_status'] = FLAGS.vsm_status_present
osd_state['zone_id'] = strg['zone_id']
LOG.info('ADD_OSD _create_osd_state %s' % osd_state)
self._conductor_rpcapi.osd_state_create(context, osd_state)
def _remove_osd_state(self, context, id):
osd_name = 'osd.%s' % id
val = { 'osd_name': osd_name, 'deleted': 1 }
self._conductor_rpcapi.osd_state_update_or_create(context,
val, create=False)
def get_ceph_config(self, context):
return CephConfigParser(FLAGS.ceph_conf).as_dict()
def inital_ceph_osd_db_conf(self, context, server_list, file_system, ceph_conf_in_cluster_manifest=None):
config = CephConfigParser(sync=False) # building ceph.conf for the first time, no file, no initial sync
osd_num = db.device_get_count(context)
LOG.info("osd_num:%d" % osd_num)
settings = db.vsm_settings_get_all(context)
for setting in settings:
if setting['name'] == 'ceph_near_full_threshold':
cnfth = setting['value']
elif setting['name'] == 'ceph_full_threshold':
cfth = setting['value']
elif setting['name'] == 'pg_count_factor':
pg_count_factor = int(setting['value'])
elif setting['name'] == 'heartbeat_interval':
heartbeat_interval = setting['value']
elif setting['name'] == 'osd_heartbeat_interval':
osd_heartbeat_interval = setting['value']
elif setting['name'] == 'osd_heartbeat_grace':
osd_heartbeat_grace = setting['value']
elif setting['name'] == 'osd_pool_default_size':
pool_default_size = setting['value']
global_kvs = {'heartbeat_interval':heartbeat_interval,
'osd_heartbeat_interval':osd_heartbeat_interval,
'osd_heartbeat_grace':osd_heartbeat_grace,
'pool_default_size':pool_default_size,
}
if ceph_conf_in_cluster_manifest:
for cell in ceph_conf_in_cluster_manifest:
if not cell['name'].startswith('osd_') and not cell['name'].startswith('mon_'):
global_kvs[cell['name']] = cell['default_value']
config.add_global(global_kvs)
is_first_mon = True
is_first_osd = True
mon_cnt = -1
osd_cnt = -1
for host in server_list:
#DEBUG for debug here.
LOG.info(' host list: %s' % host)
if host['is_monitor']:
mon_cnt = mon_cnt + 1
monitor = db.init_node_get_by_id(context, host['id'])
hostname = monitor['host']
hostip = monitor['secondary_public_ip']
if is_first_mon:
# config.add_mds_header()
#config.add_mds(hostname, hostip, '0')
#values = {'mds': 'yes'}
#db.init_node_update(context, host['id'], values)
mon_header_kvs = {
'cnfth':cnfth,
'cfth':cfth,
}
if ceph_conf_in_cluster_manifest:
for cell in ceph_conf_in_cluster_manifest:
if cell['name'].startswith('mon_'):
global_kvs[cell['name']] = cell['default_value']
config.add_mon_header(mon_header_kvs)
is_first_mon = False
config.add_mon(hostname, hostip, mon_cnt)
else:
config.add_mon(hostname, hostip, mon_cnt)
if host['is_storage']:
# Get disks list info from DB.
strgs = self._conductor_rpcapi.\
host_storage_groups_devices(context,
host['id'])
LOG.info('strg list: %s' % strgs)
if strgs and is_first_osd:
fs_type = strgs[0]['file_system']
osd_heartbeat_interval= db.vsm_settings_get_by_name(context,'osd_heartbeat_interval').get('value')
osd_heartbeat_grace= db.vsm_settings_get_by_name(context,'osd_heartbeat_grace').get('value')
# validate fs type
if fs_type in ['xfs', 'ext3', 'ext4', 'btrfs']:
#config.add_osd_header(osd_type=fs_type)
cluster = db.cluster_get_all(context)[0]
mount_options = cluster['mount_option']
if not mount_options:
mount_options = utils.get_fs_options(fs_type)[1]
osd_header_kvs = {
'osd_type':fs_type,
'osd_heartbeat_interval':osd_heartbeat_interval,
'osd_heartbeat_grace':osd_heartbeat_grace,
'osd_mount_options_'+fs_type:mount_options,
}
if ceph_conf_in_cluster_manifest:
for cell in ceph_conf_in_cluster_manifest:
if cell['name'].startswith('osd_'):
osd_header_kvs[cell['name']] = cell['default_value']
config.add_osd_header(osd_header_kvs)
else:
config.add_osd_header()
is_first_osd = False
for strg in strgs:
# NOTE: osd_cnt stands for the osd_id.
osd_cnt = osd_cnt + 1
LOG.info(' strg = %s' % \
(json.dumps(strg, sort_keys=True, indent=4)))
config.add_osd(strg['host'],
strg['secondary_public_ip'],
strg['cluster_ip'],
strg['dev_name'],
strg['dev_journal'],
osd_cnt)
self._create_osd_state(context,
strg,
osd_cnt)
mount_point = '%sosd%s' % \
(FLAGS.osd_data_path, osd_cnt)
utils.ensure_tree(mount_point)
val = {}
val['id'] = strg['dev_id']
val['mount_point'] = mount_point
val['fs_type'] = file_system
LOG.info('device_update values = %s, osd_id = %s' % \
(val, osd_cnt))
self._conductor_api.device_update(context, val['id'], val)
config.save_conf(FLAGS.ceph_conf)
def mkcephfs(self):
LOG.info('mkcephfs in agent/driver.py')
utils.execute('mkcephfs',
'-a',
'-c', FLAGS.ceph_conf,
'-k', FLAGS.keyring_admin,
# '--mkfs',
run_as_root=True)
LOG.info('mkcephfs over in agent/driver.py')
return True
def start_ceph(self, context):
utils.execute('service', 'ceph', '-a', 'start', run_as_root=True)
return True
def _stop_all_ceph_service(self):
run_path = '/var/run/ceph/'
try:
pids = utils.execute('ls', run_path, run_as_root=True)[0]
for pid_file in pids.split():
try:
LOG.info('KILL %s' % pid_file)
if pid_file.find('pid') != -1:
self._kill_by_pid_file(run_path + pid_file)
except:
LOG.info('KILL PROCESS')
pids = utils.execute('pgrep', 'ceph', run_as_root=True)[0]
for pid in pids.split():
try:
LOG.info('KILL pid = %s' % pid)
utils.execute('kill', '-9',
pid,
ignore_exit_code=True,
run_as_root=True)
except:
LOG.info('KILL BY PGREP')
except:
LOG.info('Stop meet error')
ceph_version = self.get_ceph_version()
if int(ceph_version.split(".")[0]) > 0:
LOG.info("ceph version is greater than hammer, ceph user exists")
LOG.info("Create /var/lib/ceph directory, and chown ceph:ceph")
utils.execute('mkdir', '-p', run_path, run_as_root=True)
utils.execute('chown', '-R', 'ceph:ceph',
run_path, run_as_root=True)
def _clean_dirs(self, dir_path):
try:
files = utils.execute('ls', dir_path, run_as_root=True)[0]
files = files.split()
for f in files:
try:
utils.execute('rm', '-rf', dir_path + "/" + f,
ignore_exit_code=True,
run_as_root=True)
except:
LOG.info('Error when delete file = %s' % f)
except:
LOG.info('LOOK UP dir failed %s' % dir_path)
def _clean_ceph_conf(self):
try:
self._clean_dirs('/etc/ceph/')
except:
LOG.info('Delete files meet error!')
def _clean_lib_ceph_files(self):
# delete dirty files in mds.
try:
osd_list = utils.execute('ls', '/var/lib/ceph/osd/',
ignore_exit_code=True,
run_as_root=True)[0]
LOG.info('Get osd_list = %s' % osd_list)
for osd in osd_list.split():
try:
LOG.info('Begin to umount %s' % osd)
self._clean_dirs('/var/lib/ceph/osd/%s' % osd)
utils.execute('umount', '/var/lib/ceph/osd/' + osd,
ignore_exit_code = True,
run_as_root=True)
except:
LOG.info('umount /var/lib/ceph/osd/%s' % osd)
self._clean_dirs('/var/lib/ceph/')
except:
LOG.info('rm monitor files error')
def _build_lib_ceph_dirs(self):
try:
dirs_list = ['bootstrap-mds', 'bootstrap-osd',
'mds', 'mon', 'osd', 'tmp']
for d in dirs_list:
utils.execute('mkdir', '-p', '/var/lib/ceph/' + d,
run_as_root=True)
ceph_version = self.get_ceph_version()
if int(ceph_version.split(".")[0]) > 0:
utils.execute('chown', '-R',
'ceph:ceph',
'/var/lib/ceph',
run_as_root=True)
except:
LOG.info('build dirs in /var/lib/ceph failed!')
def __format_devs(self, context, disks, file_system):
# format devices to xfs.
def ___fdisk(disk):
cluster = db.cluster_get_all(context)[0]
mkfs_option = cluster['mkfs_option']
if not mkfs_option:
mkfs_option = utils.get_fs_options(file_system)[0]
utils.execute('mkfs.%s' % file_system,
mkfs_option,
disk,
run_as_root=True)
thd_list = []
for disk in disks:
thd = utils.MultiThread(___fdisk, disk=disk)
thd_list.append(thd)
try:
utils.start_threads(thd_list)
except:
pass
def clean_ceph_data(self, context, osd_disks, journal_disks, file_system):
utils.execute('chown', '-R', 'vsm:vsm', '/var/lib/ceph/',
run_as_root=True)
self._stop_all_ceph_service()
self._stop_all_ceph_service()
time.sleep(1)
self._clean_ceph_conf()
self._clean_lib_ceph_files()
self._build_lib_ceph_dirs()
self.__format_devs(context, osd_disks + journal_disks, file_system)
return {'status': 'ok'}
def get_dev_by_mpoint(self, directory):
def _parse_proc_partitions():
parts = {}
for line in file('/proc/partitions'):
fields = line.split()
try:
dmaj = int(fields[0])
dmin = int(fields[1])
name = fields[3]
parts[(dmaj, dmin)] = name
except:
pass
return parts
dev = os.stat(directory).st_dev
major, minor = os.major(dev), os.minor(dev)
parts = _parse_proc_partitions()
return '/dev/' + parts[(major, minor)]
def mount_disks(self, context, devices, fs_type):
def __mount_disk(disk):
utils.execute('mkdir',
'-p',
disk['mount_point'],
run_as_root=True)
ceph_version = self.get_ceph_version()
if int(ceph_version.split(".")[0]) > 0:
utils.execute('chown', '-R',
'ceph:ceph',
disk['mount_point'],
run_as_root=True)
utils.execute('chown',
'ceph:ceph',
disk['name'],
run_as_root=True)
utils.execute('chown',
'ceph:ceph',
disk['journal'],
run_as_root=True)
cluster = db.cluster_get_all(context)[0]
mount_options = cluster['mount_option']
if not mount_options:
mount_options = utils.get_fs_options(fs_type)[1]
utils.execute('mount',
'-t', fs_type,
'-o', mount_options,
disk['name'],
disk['mount_point'],
run_as_root=True)
thd_list = []
for dev in devices:
thd = utils.MultiThread(__mount_disk, disk=dev)
thd_list.append(thd)
utils.start_threads(thd_list)
def is_new_storage_group(self, storage_group):
nodes = self.get_crushmap_nodes()
for node in nodes:
if storage_group == node['name']:
return False
return True
def is_new_zone(self, zone):
nodes = self.get_crushmap_nodes()
for node in nodes:
if zone == node['name']:
return False
return True
def get_ceph_osd_info(self):
'''
Locally execute 'ceph osd dump -f json' and return the json block as a python data structure.
:return: a python data structure containing the json content returned by 'ceph osd dump -f json'
'''
output = utils.execute("ceph", "osd", "dump", "-f", "json", run_as_root=True)[0]
return json.loads(output)
def get_ceph_disk_list(self):
'''
Execute 'sudo ceph-disk list' and gather ceph partition info.
:return: a python data structure containing the content of 'sudo ceph-disk list'
'''
output = utils.execute('ceph-disk', 'list', run_as_root=True)[0]
return self.v09_ceph_disk_list_parser(output) if 'ceph data' in output\
else self.v08_ceph_disk_list_parser(output)
def v09_ceph_disk_list_parser(self, output):
'''
Parse the output of 'ceph-disk list' as if we're running against a v0.9 ceph (infernalis) or higher.
:param output: the output to be parsed.
:return: a list of disk-info dictionaries.
'''
disk_list = []
for line in output.split('\n'):
if 'ceph data' in line:
# /dev/sdb1 ceph data, active, cluster ceph, osd.0, journal /dev/sdb2
disk_dict = {}
parts = line.strip().split(', ')
disk_dict[u'dev'] = parts[0].split()[0]
disk_dict[u'state'] = parts[1]
disk_dict[u'cluster'] = parts[2].split()[-1]
disk_dict[u'id'] = int(parts[3].split('.')[-1])
disk_dict[u'journal'] = parts[4].split()[-1]
disk_list.append(disk_dict)
return disk_list
def v08_ceph_disk_list_parser(self, output):
'''
Parse the output of 'ceph-disk list' as if we're running against a v0.8 ceph (firefly) or lower.
:param output: the output to be parsed.
:return: a list of disk-info dictionaries.
'''
disk_list = []
for line in output.split('\n'):
if '/osd/' in line:
# /dev/sdb4 other, xfs, mounted on /var/lib/ceph/osd/osd0
disk_dict = {}
parts = line.strip().split(', ')
osd_path = parts[-1].split()[-1]
osd_id = self.get_osd_whoami(osd_path)
osd_daemon_cfg = self.get_osd_daemon_map(osd_id, 'config')
osd_daemon_status = self.get_osd_daemon_map(osd_id, 'status')
disk_dict[u'dev'] = parts[0].split()[0]
disk_dict[u'state'] = osd_daemon_status['state']
disk_dict[u'cluster'] = osd_daemon_cfg['cluster']
disk_dict[u'id'] = osd_id
disk_dict[u'journal'] = osd_daemon_cfg['osd_journal']
disk_list.append(disk_dict)
return disk_list
def get_osd_whoami(self, osd_path):
'''
Return the osd id number for the osd on the specified path.
:param osd_path: the device path of the osd - e.g., /var/lib/ceph/osd...
:return: an integer value representing the osd id number for the target osd.
'''
output = utils.execute('cat', osd_path+'/whoami', run_as_root=True)[0]
return int(output)
def get_osd_daemon_map(self, oid, reqtype):
'''
command: ceph daemon osd.{oid} config show
output: { "cluster": "ceph",
...
"osd_journal": "\/dev\/sdc1"}
:param oid: the id number of the osd for which to obtain a journal device path.
:param reqtype: the type of request - 'config' or 'status' (could be expanded to other types later).
:return: a dictionary containing configuration parameters and values for the specified osd.
'''
values = {}
arglist = ['ceph', 'daemon', 'osd.'+str(oid)]
arglist.extend(['config', 'show'] if reqtype == 'config' else ['status'])
output = utils.execute(*arglist, run_as_root=True)[0]
for line in output.split('\n'):
if len(line.strip()) > 1:
attr, val = tuple(line.translate(None, ' {"\},').split(':', 1))
values[attr] = val
return values
def add_osd(self, context, host_id, osd_id_in=None):
if osd_id_in is not None:
osd_obj = db.osd_get(context, osd_id_in)
host_obj = db.init_node_get_by_device_id(context, osd_obj.device_id)
host_id = host_obj.id
LOG.info("begin to add osd %s from host %s"%(osd_obj.device_id,host_id))
LOG.info('start to ceph osd on %s' % host_id)
strg_list = self._conductor_api.\
host_storage_groups_devices(context, host_id)
LOG.info('strg_list %s' % strg_list)
#added_to_crushmap = False
osd_cnt = len(strg_list)
if osd_id_in is not None:
osd_cnt = 1
count = 0
for strg in strg_list:
if osd_id_in is not None and strg.get("dev_id") != osd_obj.device_id:
continue
LOG.info('>> Step 1: start to ceph osd %s' % strg)
count = count + 1
if osd_id_in is None:
self._conductor_api.init_node_update(context, host_id, {"status": "add_osd %s/%s"%(count,osd_cnt)})
# Create osd from # ceph osd create
stdout = utils.execute("ceph",
"osd",
"create",
run_as_root=True)[0]
osd_id = str(int(stdout))
LOG.info(' gen osd_id success: %s' % osd_id)
# step 1 end
host = strg['host']
zone = strg['zone']
#TODO strg['storage_group']
# stands for the storage_group_name fetch from DB.
if strg.get('storage_group',None) is None:
default_storage_group = db.storage_group_get_all(context)[0]
strg['storage_group'] = default_storage_group['name']
strg['storage_group_id'] = default_storage_group['id']
storage_group = strg['storage_group']
crush_dict = {"root": "vsm",
"storage_group": storage_group,
"zone": "_".join([zone, storage_group]),
"host": "_".join([host, storage_group, zone]),}
osd_conf_dict = {"host": host,
"primary_public_ip": strg['primary_public_ip'],
"secondary_public_ip": strg['secondary_public_ip'],
"cluster_ip": strg['cluster_ip'],
"dev_name": strg['dev_name'],
"dev_journal": strg['dev_journal'],
"file_system": strg['file_system']}
osd_state = {}
osd_state['osd_name'] = 'osd.%s' % osd_id
osd_state['device_id'] = strg['dev_id']
osd_state['storage_group_id'] = strg['storage_group_id']
osd_state['service_id'] = strg['service_id']
osd_state['cluster_id'] = strg['cluster_id']
osd_state['state'] = FLAGS.osd_in_up
osd_state['weight'] = 1.0
osd_state['operation_status'] = FLAGS.vsm_status_present
osd_state['public_ip'] = strg['secondary_public_ip']
osd_state['cluster_ip'] = strg['cluster_ip']
osd_state['deleted'] = 0
osd_state['zone_id'] = strg['zone_id']
if osd_id_in is not None:
osd_state_ref = db.osd_state_update(context,osd_id_in,osd_state)
else:
osd_state_ref = self._conductor_api.osd_state_create(context, osd_state)
osd_state['osd_location'] = osd_state_ref['osd_location']
osd_state['weight'] = osd_state_ref['weight'] and float(osd_state_ref['weight']) or 1.0
LOG.info('>> crush_dict %s' % crush_dict)
LOG.info('>> osd_conf_dict %s' % osd_conf_dict)
LOG.info('>> osd_state %s' % osd_state)
values = {}
#if not added_to_crushmap:
# LOG.info('>> add crushmap ')
crushmap = self.get_crushmap_json_format()
types = crushmap.get_all_types()
types.sort(key=operator.itemgetter('type_id'))
if self.is_new_storage_group(crush_dict['storage_group']):
self._crushmap_mgmt.add_storage_group(crush_dict['storage_group'],\
crush_dict['root'],types=types)
# zones = db.zone_get_all_not_in_crush(context)
# for item in zones:
# zone_item = item['name'] + '_' + crush_dict['storage_group']
# self._crushmap_mgmt.add_zone(zone_item, \
# crush_dict['storage_group'],types=types)
#
if zone == FLAGS.default_zone:
self._crushmap_mgmt.add_rule(crush_dict['storage_group'], 'host')
else:
self._crushmap_mgmt.add_rule(crush_dict['storage_group'], 'zone')
#TODO update rule_id and status in DB
rule_dict = self.get_crush_rule_dump_by_name(crush_dict['storage_group'])
LOG.info("rule_dict:%s" % rule_dict)
values['rule_id'] = rule_dict['rule_id']
if self.is_new_zone(crush_dict['zone']):
self._crushmap_mgmt.add_zone(crush_dict['zone'], \
crush_dict['storage_group'], types=types)
self._crushmap_mgmt.add_host(crush_dict['host'],
crush_dict['zone'], types=types)
# added_to_crushmap = True
#There must be at least 3 hosts in every storage group when the status is "IN"
zones, hosts = self._conductor_rpcapi.zones_hosts_get_by_storage_group(context, \
crush_dict['storage_group'])
#LOG.info("storage group:%s" % crush_dict['storage_group'])
#LOG.info("zones:%s" % zones)
#LOG.info("hosts:%s" % hosts)
#no zone and zone version
if zones:
if zones[0] == FLAGS.default_zone:
if host not in hosts and len(hosts) >= 2:
values['status'] = FLAGS.storage_group_in
else:
if zone not in zones and len(zones) >= 2:
values['status'] = FLAGS.storage_group_in
if values:
db.storage_group_update_by_name(context, crush_dict['storage_group'], values)
# other steps
LOG.info('>> _add_osd start ')
self._add_osd(context,
osd_id,
crush_dict,
osd_conf_dict,
osd_state)
try:
self.run_add_disk_hook(context)
except:
LOG.info('run add_disk error')
return True
def _add_osd(self,
context,
osd_id,
crush_dict,
osd_conf_dict,
osd_state,
weight="1.0"):
# step 2
LOG.info('>>> step2 start')
#osd_pth = '%sceph-%s' % (FLAGS.osd_data_path, osd_id)
#osd_keyring_pth = "%s/keyring" % osd_pth
#osd_pth = '/var/lib/ceph/osd/osd%s' % osd_id
#osd_keyring_pth = '/etc/ceph/keyring.osd.%s' % osd_id
try:
osd_data_path = self.get_ceph_config(context)['osd']['osd data']
osd_pth = osd_data_path.replace('$id',osd_id)
except:
osd_pth = os.path.join(FLAGS.osd_data_path, "osd" + osd_id)
LOG.info('osd add osd_pth =%s'%osd_pth)
osd_keyring_pth = self.get_ceph_config(context)['osd']['keyring']
osd_keyring_pth = osd_keyring_pth.replace('$id',osd_id).replace('$name','osd.%s'%osd_id)
LOG.info('osd add keyring path=%s'%osd_keyring_pth)
utils.ensure_tree(osd_pth)
# step 3
LOG.info('>>> step3 start')
# get cluster file system to format the disk
utils.execute("umount",
osd_conf_dict['dev_name'],
check_exit_code=False,
run_as_root=True)
LOG.debug("The file system is %s" % osd_conf_dict['file_system'])
file_system = 'xfs'
if osd_conf_dict['file_system']:
file_system = osd_conf_dict['file_system']
cluster = db.cluster_get_all(context)[0]
mkfs_option = cluster['mkfs_option']
if not mkfs_option:
mkfs_option = utils.get_fs_options(file_system)[0]
utils.execute("mkfs",
"-t", file_system,
mkfs_option, osd_conf_dict['dev_name'],
run_as_root=True)
# TODO: does not support ext4 for now.
# Need to use -o user_xattr for ext4
mount_option = cluster['mount_option']
if not mount_option:
mount_option = utils.get_fs_options(file_system)[1]
ceph_version = self.get_ceph_version()
if int(ceph_version.split(".")[0]) > 0:
utils.execute('chown',
'ceph:ceph',
osd_conf_dict['dev_name'],
run_as_root=True)
utils.execute('chown',
'ceph:ceph',
osd_conf_dict['dev_journal'],
run_as_root=True)
utils.execute("mount",
"-t", file_system,
"-o", mount_option,
osd_conf_dict['dev_name'],
osd_pth,
run_as_root=True)
self._clean_dirs(osd_pth)
# step 3.1
LOG.info('>>> step3.1 start')
ret = self._add_ceph_osd_to_config(context, osd_conf_dict, osd_id)
# step 4 add to config file before this step
LOG.info('>>> step4 start')
utils.execute("ceph-osd", "-i", osd_id, "--mkfs", "--mkkey",
run_as_root=True)
# step 5
LOG.info('>>> step5 start')
utils.execute("ceph", "auth", "del", "osd.%s" % osd_id,
run_as_root=True)
utils.execute("ceph", "auth", "add", "osd.%s" % osd_id,
"osd", "allow *", "mon", "allow rwx",
"-i", osd_keyring_pth,
run_as_root=True)
# step 6 zone host stg
LOG.info('>>> step6 start')
# utils.execute("ceph", "osd", "crush", "add", "osd.%s" % osd_id, weight,
# "root=%s" % crush_dict['root'],
# "storage_group=%s" % crush_dict['storage_group'],
# "zone=%s" % crush_dict['zone'], "host=%s" % crush_dict['host'],
# run_as_root=True)
all_osd_in_host = db.osd_state_get_by_service_id(context,osd_state['service_id'])
# other_osd_in_host = [osd['osd_name'] for osd in all_osd_in_host if osd['device_id'] != osd_state['device_id'] and osd['state'] != 'Uninitialized']
crushmap = self.get_crushmap_json_format()
LOG.info("osd_location_direct=======%s"%osd_state.get('osd_location'))
osd_location_direct = osd_state.get('osd_location')
if osd_location_direct:
if osd_location_direct.find('=') != -1:
osd_location_str = osd_location_direct
else:
osd_location_str = "%s=%s"%(crushmap._types[1]['name'],osd_location_direct)
# elif len(other_osd_in_host) > 0:
# osd_location = crushmap._get_location_by_osd_name(other_osd_in_host[0])
# osd_location_str = "%s=%s"%(osd_location['type_name'],osd_location['name'])
else:
osd_location = crush_dict['host']
osd_location_str = "%s=%s"%(crushmap._types[1]['name'],osd_location)
LOG.info("osd_location_str=======%s"%osd_location_str)
utils.execute("ceph", "osd", "crush", "add", "osd.%s" % osd_id, weight,
osd_location_str,
run_as_root=True)
# step 7 start osd service
LOG.info('>>> step7 start')
self.start_osd_daemon(context, osd_id, is_vsm_add_osd=True)
utils.execute("ceph", "osd", "crush", "create-or-move", "osd.%s" % osd_id, weight,
osd_location_str,
run_as_root=True)
#LOG.info('osd-to-db==%s'%osd_state)
#self._conductor_api.osd_state_create(context, osd_state)
LOG.info('>>> step7 finish')
return True
def _add_ceph_osd_to_config(self, context, strg, osd_id):
LOG.info('>>>> _add_ceph_osd_to_config start')
config = CephConfigParser(FLAGS.ceph_conf)
ip = strg['secondary_public_ip']
config.add_osd(strg['host'], ip, strg['cluster_ip'],
strg['dev_name'], strg['dev_journal'], osd_id)
LOG.info('>>>> _add_ceph_osd_to_config config %s ' % config.as_dict())
LOG.info('>>>> _add_ceph_osd_to_config added')
config.save_conf(FLAGS.ceph_conf)
return True
def get_crushmap_json_format(self,keyring=None):
'''
:return:
'''
if keyring:
json_crushmap,err = utils.execute('ceph', 'osd', 'crush', 'dump','--keyring',keyring, run_as_root=True)
else:
json_crushmap,err = utils.execute('ceph', 'osd', 'crush', 'dump', run_as_root=True)
crushmap = CrushMap(json_context=json_crushmap)
return crushmap
def add_monitor(self, context, host_id, mon_id, port="6789"):
LOG.info('>> start to add mon %s on %s' % (mon_id, host_id))
ser = self._conductor_rpcapi.init_node_get_by_id(context, host_id)
host_ip = ser['secondary_public_ip']
LOG.info('>> start to add mon %s' % host_ip)
# TODO
# step 1
LOG.info('>> add mon step 1 ')
try:
mon_data_path = self.get_ceph_config(context)['mon']['mon data']
mon_path = mon_data_path.replace('$id',mon_id)
#LOG.info('osd restore mon_pth =%s'%mon_path)
except:
mon_path = os.path.join(FLAGS.monitor_data_path,"mon" + mon_id)
utils.ensure_tree(mon_path)
# step 2
LOG.info('>> add mon step 2 ')
tmp_pth = "/tmp"
monitor_key_pth = os.path.join(tmp_pth, 'monitor_key')
monitor_map_pth = os.path.join(tmp_pth, 'monitor_map')
# step 3
LOG.info('>> add mon step 3 ')
utils.execute("ceph", "auth", "get", "mon.", "-o", monitor_key_pth,
run_as_root=True)
# step 4
LOG.info('>> add mon step 4 ')
utils.execute("ceph", "mon", "getmap", "-o", monitor_map_pth,
run_as_root=True)
# step 5
LOG.info('>> add mon step 5 ')
utils.execute("ceph-mon", "-i", mon_id, "--mkfs",
"--monmap", monitor_map_pth,
"--keyring", monitor_key_pth,
run_as_root=True)
def __add_ceph_mon_to_config(context, host, host_ip, mon_id):
config = CephConfigParser(FLAGS.ceph_conf)
config.add_mon(host, host_ip, mon_id=mon_id)
config.save_conf(FLAGS.ceph_conf)
return True
## step 6
#LOG.info('>> add mon step 6 ')
#host = ":".join([host_ip, port])
#utils.execute("ceph", "mon", "add", mon_id, host, run_as_root=True)
## step 7
#LOG.info('>> add mon step 7 ')
#__add_ceph_mon_to_config(context, ser['host'], host_ip, mon_id=mon_id)
#utils.execute("ceph-mon", "-i", mon_id, "--public-addr", host,
# run_as_root=True)
#changed by ly
# step 6
LOG.info('>> add mon step 6 ')
host = ":".join([host_ip.split(',')[0], port])
__add_ceph_mon_to_config(context, ser['host'], host_ip, mon_id=mon_id)
#utils.execute("ceph-mon", "-i", mon_id, "--public-addr", host,
# run_as_root=True)
# step 7
LOG.info('>> add mon step 7 ')
# utils.execute("ceph", "mon", "add", mon_id, host, run_as_root=True)
self.start_mon_daemon(context, mon_id)
LOG.info('>> add mon finish %s' % mon_id)
return True
def remove_monitor(self, context, host_id, is_stop=False):
LOG.info('>> start to remove ceph mon on : %s' % host_id)
# get host_name
node = self._conductor_rpcapi.init_node_get_by_id(context, host_id)
host = node['host']
# get config
LOG.info('>> removing ceph mon')
config = CephConfigParser(FLAGS.ceph_conf)
# get mon_id
mon_id = None
for section in config.sections():
if section.startswith("mon."):
if config.get(section, 'host') == host:
mon_id = section.replace("mon.", "")
if not mon_id:
LOG.info('>> removing ceph mon not found')
return True
# step 1
LOG.info('>> removing ceph mon %s' % mon_id)
try:
# test ssh service in case the server is down
LOG.info('>>>> removing ceph mon step 1: test server start!')
utils.execute('ssh', '-q', 'root@' + host, 'exit', run_as_root=True)
except exception.ProcessExecutionError as e:
LOG.info('>> removing ceph mon test server error!')
code = e.exit_code
LOG.info('return code: %s' % code)
if code == 0:
# utils.execute("service",
# "ceph",
# "-a",
# "stop",
# "mon.%s" % mon_id,
# run_as_root=True)
self._operate_ceph_daemon("stop", "mon", id=mon_id, ssh=True, host=host)
# If can not ssh to that server,
# We assume that the server has been shutdown.
# Go steps below.
# step 2
LOG.info('>> removing ceph mon step 2')
# fix the issue of ceph jewel version when remove the monitor,
# it will throw the Error EINVAL, but the monitor remove successfully.
try:
utils.execute("ceph",
"mon",
"remove",
mon_id,
run_as_root=True)
except:
LOG.warn("Ceph throws out an error, but monitor has been remove successfully")
pass
if not is_stop:
config.remove_mon(mon_id)
# step 3
LOG.info('>> removing ceph mon step 3')
LOG.info('>> removing ceph mon step 4:stop mon service ')
try:
self._operate_ceph_daemon("stop", "mon", id=mon_id, ssh=True, host=host)
except:
pass
LOG.info('>> removing ceph mon success!')
config.save_conf(FLAGS.ceph_conf)
return True
# TODO don't remove any code from this line to the end of func
# remove monitors from unhealthy cluster
# step 1
# try:
# utils.execute("service", "ceph", "stop", "mon", run_as_root=True)
# except:
# utils.execute("stop", "ceph-mon-all", run_as_root=True)
#self._operate_ceph_daemon("stop", "mon", id=mon_id, ssh=True, host=host)
# step 2
LOG.info('>> remove ceph mon step2 start')
tmp_pth = "/tmp"
monitor_map_pth = os.path.join(tmp_pth, 'monitor_map')
utils.execute("ceph-mon", "-i", mon_id, "--extract-monmap",
monitor_map_pth, run_as_root=True)
utils.execute("ceph-mon", "-i", "a", "--extract-monmap",
monitor_map_pth, run_as_root=True)
# step 3
LOG.info('>> remove ceph mon step3 start')
utils.execute("monmaptool", monitor_map_pth, "--rm", mon_id,
run_as_root=True)
# step 4
LOG.info('>> remove ceph mon step4 start')
utils.execute("ceph-mon", "-i", mon_id, "--inject-monmap",
monitor_map_pth, run_as_root=True)
return True
def remove_mds(self, context, host_id):
"""Remove mds service on host_id server."""
def __is_host_running(host):
try:
self._agent_rpcapi.test_service(context,
FLAGS.agent_topic,
host)
return True
except rpc_exc.Timeout, rpc_exc.RemoteError:
return False
def __config_remove_mds(mds_id):
config = CephConfigParser(FLAGS.ceph_conf)
config.remove_mds_header()
config.remove_mds(mds_id)
config.save_conf(FLAGS.ceph_conf)
LOG.info('>> remove ceph mds on hostid(%s) start' % host_id)
node = self._conductor_rpcapi.init_node_get_by_id(context, host_id)
values = {'mds': 'no'}
self._conductor_rpcapi.init_node_update(context, host_id, values)
host = node['host']
host_is_running = __is_host_running(host)
LOG.info('host_is_running===mds==%s'%host_is_running)
if host_is_running:
try:
self._agent_rpcapi.stop_mds(context, host)
except rpc_exc.Timeout, rpc_exc.RemoteError:
host_is_running = False
mds_id = self.get_mds_id(host)
if not mds_id:
LOG.info('Have not find mds on %s' % host_id)
return
__config_remove_mds(mds_id)
try:
utils.execute('ceph', 'mds',
'rm', mds_id, 'mds.%s' % mds_id,'--keyring',FLAGS.keyring_admin,
run_as_root=True)
except:
pass
try:
utils.execute('ceph', 'auth', 'del',
'mds.%s' % mds_id,'--keyring',FLAGS.keyring_admin,
run_as_root=True)
except:
pass
try:
utils.execute('ceph', 'mds', 'newfs', '0', '1',
'--yes-i-really-mean-it','--keyring',FLAGS.keyring_admin,
run_as_root=True)
except:
pass
LOG.info('remove mds success!')
def remove_osd(self, context, host_id):
def __is_host_running(host):
try:
self._agent_rpcapi.test_service(context,
FLAGS.agent_topic,
host)
return True
except rpc_exc.Timeout, rpc_exc.RemoteError:
return False
LOG.info('>> remove ceph osds on hostid(%s) start' % host_id)
node = self._conductor_rpcapi.init_node_get_by_id(context, host_id)
host = node['host']
host_is_running = __is_host_running(host)
LOG.info('host_is_running===osd==%s'%host_is_running)
# get config
config_dict = self.get_ceph_config(context)
# get osd_ids
osd_id_list = []
for section in config_dict:
if section.startswith("osd."):
if config_dict[section]['host'] == host:
osd_id_list.append(section.replace("osd.", ""))
LOG.info('>> remove ceph osd osd_ids %s' % osd_id_list)
for osd_id in osd_id_list:
self._remove_osd(context, osd_id, host, host_is_running)
# step 5
LOG.info('>>> remove ceph osd step5 osd_id %s' % osd_id)
osd_name = 'osd.%s' % osd_id
val = { 'osd_name': osd_name, 'deleted': 0 }
self._conductor_rpcapi.osd_state_update(context, val)
LOG.info('>>> remove ceph osd step 1-5 osd_id %s' % osd_id)
#step 6
# LOG.info('>>> Begin to remove crushmap')
# osd_tree = utils.execute('ceph', 'osd', 'tree', run_as_root=True)[0]
# LOG.info('>>> Get ceph osd tree = %s' % osd_tree)
# for line in osd_tree.split('\n'):
# if line.lower().find(host.lower()) != -1:
# for x in line.split(' '):
# if x.lower().find(host.lower()) != -1:
# utils.execute('ceph', 'osd', 'crush', 'rm', x)
LOG.info('>>> remove ceph osd finish.')
if not host_is_running:
val = {'deleted': 1}
self._conductor_rpcapi.init_node_update(context, host_id, val)
return True
def _kill_by_pid_file(self, pid_file):
# Kill process by pid file.
# mainly for ceph.
file_path = pid_file
# no using os.path.exists(), because if the file is owned by ceph
# user, the result will return false
try:
out, err = utils.execute('ls', file_path, run_as_root=True)
except:
out = ""
# if os.path.exists(file_path):
if out:
# no permission to read if the file is owned by ceph user
# pid = open(file_path).read().strip()
out, err = utils.execute('cat', file_path, run_as_root=True)
pid = out.strip()
pid_live = os.path.exists('/proc/%s' % pid)
utils.execute('rm', '-rf', file_path, run_as_root=True)
try_times = 1
while pid_live:
try_times = try_times + 1
try:
if try_times % 2:
utils.execute('kill', '-9', pid, run_as_root=True)
else:
utils.execute('kill', '-9', pid, run_as_root=True)
except:
LOG.info('Seems can not stop this OSD process.')
time.sleep(2)
pid_live = os.path.exists('/proc/%s' % pid)
if try_times > 100:
break
return True
def stop_osd_daemon(self, context, num):
# stop ceph-osd daemon on the storage node
# Param: the osd id
# return Bool
file_path = '/var/run/ceph/osd.%s.pid' % num
# no using os.path.exists(), because if the file is owned by ceph
# user, the result will return false
try:
out, err = utils.execute('ls', file_path, run_as_root=True)
except:
out = ""
# if os.path.exists(file_path):
if out:
self._kill_by_pid_file(file_path)
else:
LOG.info("Not found pid file for osd.%s" % num)
try:
LOG.info("Try to stop osd %s daemon by ceph or ceph-osd command" % num)
self._operate_ceph_daemon("stop", "osd", id=num)
except:
LOG.warn("Osd %s has NOT been stopped" % num)
return True
def start_osd_daemon(self, context, num, is_vsm_add_osd=False):
osd = "osd.%s" % num
LOG.info('begin to start osd = %s' % osd)
if is_vsm_add_osd:
ceph_version = self.get_ceph_version()
if int(ceph_version.split(".")[0]) > 0:
utils.execute('chown', '-R', 'ceph:ceph',
'/var/lib/ceph', run_as_root=True)
utils.execute('chown', '-R', 'ceph:ceph',
'/etc/ceph', run_as_root=True)
#utils.execute('service', 'ceph', 'start', osd, run_as_root=True)
#else:
self._operate_ceph_daemon("start", "osd", id=num)
return True
def stop_mon_daemon(self, context, name):
file_path = '/var/run/ceph/mon.%s.pid' % name
# no using os.path.exists(), because if the file is owned by ceph
# user, the result will return false
try:
out, err = utils.execute('ls', file_path, run_as_root=True)
except:
out = ""
# if os.path.exists(file_path):
if out:
self._kill_by_pid_file(file_path)
else:
LOG.info("Not found pid file for mon.%s" % name)
try:
LOG.info("Try to stop mon %s daemon by ceph or ceph-mon command" % name)
self._operate_ceph_daemon("stop", "mon", id=name)
except:
LOG.warn("Mon %s has NOT been stopped" % name)
return True
def start_mon_daemon(self, context, name):
try:
self.stop_mon_daemon(context, name)
except:
pass
# mon_name = 'mon.%s' % num
# utils.execute('service', 'ceph', 'start', mon_name, run_as_root=True)
try:
self._operate_ceph_daemon("start", "mon", id=name)
except:
LOG.warn("Monitor has NOT been started!")
return True
def stop_mds_daemon(self, context, num):
file_path = '/var/run/ceph/mds.%s.pid' % num
if os.path.exists(file_path):
self._kill_by_pid_file(file_path)
else:
LOG.info('Not found pid file for mds.%s' % num)
try:
LOG.info("Try to stop mds %s daemon by ceph or ceph-mds command" % num)
self._operate_ceph_daemon("stop", "mds", id=num)
except:
LOG.warn("Mds %s has NOT been stopped" % num)
return True
def get_mds_id(self, host=FLAGS.host):
"""Stop mds service on this host."""
config = CephConfigParser(FLAGS.ceph_conf)
# get osd_ids
mds_id = None
for section in config.sections():
if section.startswith("mds."):
if config.get(section, 'host') == host:
mds_id = section.replace("mds.", "")
return mds_id
def stop_mds(self, context):
mds_id = self.get_mds_id()
if mds_id:
self.stop_mds_daemon(context, mds_id)
def start_mds_daemon(self, context, num):
mds_name = 'mds.%s' % num
# utils.execute('service', 'ceph', 'start', mds_name, run_as_root=True)
self._operate_ceph_daemon("start", "mds", id=num)
def _get_ceph_mon_map(self):
output = utils.execute("ceph", "mon", "dump", "-f", "json", run_as_root=True)[0]
return json.loads(output)
def start_monitor(self, context):
# Get info from db.
res = self._conductor_rpcapi.init_node_get_by_host(context, FLAGS.host)
node_type = res.get('type', None)
# get mon_id
mon_id = None
monmap = self._get_ceph_mon_map()
mons = monmap['mons']
for mon in mons:
if mon['name'] == FLAGS.host:
mon_id = mon['name']
# Try to start monitor service.
if mon_id:
LOG.info('>> start the monitor id: %s' % mon_id)
if node_type and node_type.find('monitor') != -1:
self.start_mon_daemon(context, mon_id)
def stop_monitor(self, context):
# Get info from db.
res = self._conductor_rpcapi.init_node_get_by_host(context, FLAGS.host)
node_type = res.get('type', None)
# get mon_id
mon_id = None
monmap = self._get_ceph_mon_map()
mons = monmap['mons']
for mon in mons:
if mon['name'] == FLAGS.host:
mon_id = mon['name']
# Try to stop monitor service.
if mon_id:
LOG.info('>> stop the monitor id: %s' % mon_id)
if node_type and node_type.find('monitor') != -1:
self.stop_mon_daemon(context, mon_id)
def start_osd(self, context):
# Start all the osds on this node.
osd_list = []
config = CephConfigParser(FLAGS.ceph_conf)
for section in config.sections():
if section.startswith("osd."):
if config.get(section, 'host') == FLAGS.host:
osd_id = section.replace("osd.", "")
osd_list.append(osd_id)
LOG.info('osd_list = %s' % osd_list)
def __start_osd(osd_id):
utils.execute('start_osd', osd_id, run_as_root=True)
thd_list = []
for osd_id in osd_list:
thd = utils.MultiThread(__start_osd, osd_id=osd_id)
thd_list.append(thd)
utils.start_threads(thd_list)
def add_mds(self, context):
LOG.info('add_mds')
mds_id = self.get_mds_id()
if mds_id:
LOG.info('add_mds find mds on this node. Just start it.')
self.start_mds(context)
return
# Change /etc/ceph.conf file.
# Add new mds service.
LOG.info('add_mds begin to create new mds.')
config = CephConfigParser(FLAGS.ceph_conf)
config.add_mds_header()
mds_id = config.get_mds_num()
LOG.info('create new mds_id = %s' % mds_id)
init_node_ref = db.init_node_get_by_host(context, FLAGS.host)
hostip = init_node_ref['secondary_public_ip']
config.add_mds(FLAGS.host, hostip, mds_id)
config.save_conf(FLAGS.ceph_conf)
values = {'mds': 'yes'}
db.init_node_update(context, init_node_ref['id'], values)
# Generate new keyring.
mds_path = '/var/lib/ceph/mds/ceph-%s' % mds_id
utils.execute('mkdir', '-p', mds_path, run_as_root=True)
mds_key = '/etc/ceph/keyring.mds.%s' % mds_id
out = utils.execute('ceph', 'auth',
'get-or-create', 'mds.%d' % mds_id,
'mds', "allow",
'osd', "allow *",
'mon', "allow rwx",
run_as_root=True)[0]
utils.write_file_as_root(mds_key, out, 'w')
ceph_version = self.get_ceph_version()
if int(ceph_version.split(".")[0]) > 0:
utils.execute('chown', '-R',
'ceph:ceph',
'/var/lib/ceph',
run_as_root=True)
# Start mds service.
self.start_mds(context)
def start_mds(self, context):
config_dict = self.get_ceph_config(context)
# mds_id
mds_id = None
for section in config_dict:
if section.startswith("mds."):
if config_dict[section]['host'] == FLAGS.host:
mds_id = section.replace("mds.", "")
# Try to start monitor service.
if mds_id:
LOG.info('>> start the mds id: %s' % mds_id)
try:
self._operate_ceph_daemon("start", "mds", id=mds_id)
# utils.execute('ceph-mds', '-i', mds_id, run_as_root=True)
except:
LOG.info('Meets some error on start mds service.')
def start_server(self, context, node_id):
""" Start server.
0. start monitor
1. start mds.
2. start all osd.
3. unset osd noout.
4. reset db server status.
"""
res = self._conductor_rpcapi.init_node_get_by_id(context, node_id)
service_id = res.get('service_id', None)
node_type = res.get('type', None)
host_ip = res.get('secondary_public_ip', None)
host = res.get('host', None)
LOG.debug('The server info: %s %s %s %s' %
(service_id, node_type, host_ip, host))
# start monitor
self.start_monitor(context)
# start mds
self.start_mds(context)
# get osd list; if there aren't any, update status and return
osd_states = self._conductor_rpcapi.osd_state_get_by_service_id(context, service_id)
if not len(osd_states) > 0:
LOG.info("There is no osd on node %s" % node_id)
self._conductor_rpcapi.init_node_update_status_by_id(context, node_id, 'Active')
return True
# async method to start an osd
def __start_osd(osd_id):
osd = db.get_zone_hostname_storagegroup_by_osd_id(context, osd_id)[0]
osd_name = osd['osd_name'].split('.')[-1]
self.start_osd_daemon(context, osd_name)
# utils.execute("ceph", "osd", "crush", "create-or-move", osd['osd_name'], osd['weight'],
# "host=%s_%s_%s" %(osd['service']['host'],osd['storage_group']['name'],osd['zone']['name']) ,
# run_as_root=True)
values = {'state': FLAGS.osd_in_up, 'osd_name': osd['osd_name']}
self._conductor_rpcapi.osd_state_update_or_create(context, values)
# start osds asynchronously
thd_list = []
for item in osd_states:
osd_id = item['id']
thd = utils.MultiThread(__start_osd, osd_id=osd_id)
thd_list.append(thd)
utils.start_threads(thd_list)
# update init node status
ret = self._conductor_rpcapi.init_node_update_status_by_id(context, node_id, 'Active')
count = db.init_node_count_by_status(context, 'Stopped')
if count == 0:
utils.execute('ceph', 'osd', 'unset', 'noout', run_as_root=True)
return True
def track_monitors(self, mon_id):
"""Return the status of monitor in quorum."""
# ceph --cluster=ceph \
# --admin-daemon \
# /var/run/ceph/ceph-mon.%id.asok \
# mon_status
out = utils.execute('ceph',
'--cluster=ceph',
'--admin-daemon',
'/var/run/ceph/ceph-mon.%s.asok' % mon_id,
'mon_status',
run_as_root=True)[0]
return json.loads(out)
def create_keyring(self, mon_id):
"""Create keyring file:
ceph.client.admin.keyring
bootstrap-osd/keyring
bootstrap-mds/keyrong
"""
# Firstly begin to create ceph.client.admin.keyring
utils.execute('ceph',
'--cluster=ceph',
'--name=mon.',
'--keyring=/var/lib/ceph/mon/mon{mon_id}/keyring'.format(
mon_id=mon_id,
),
'auth',
'get-or-create',
'client.admin',
'mon', 'allow *',
'osd', 'allow *',
'mds', 'allow',
'-o',
'/etc/ceph/keyring.admin',
run_as_root=True)
# Begin to create bootstrap keyrings.
utils.execute('mkdir',
'-p',
'/var/lib/ceph/bootstrap-osd',
run_as_root=True)
utils.execute('ceph',
'--cluster=ceph',
'auth',
'get-or-create',
'client.bootstrap-osd',
'mon',
'allow profile bootstrap-osd',
'-o',
'/var/lib/ceph/bootstrap-osd/ceph.keyring',
run_as_root=True)
# Begin to create bootstrap-mds
utils.execute('mkdir',
'-p',
'/var/lib/ceph/bootstrap-mds',
run_as_root=True)
utils.execute('ceph',
'--cluster=ceph',
'auth',
'get-or-create',
'client.bootstrap-mds',
'mon',
'allow profile bootstrap-mds',
'-o',
'/var/lib/ceph/bootstrap-mds/ceph.keyring',
run_as_root=True)
if self._is_systemctl():
utils.execute('chown', '-R',
'ceph:ceph',
'/var/lib/ceph',
run_as_root=True)
def stop_cluster(self,context):
"stop cluster"
LOG.info('agent/driver.py stop cluster')
utils.execute('service', 'ceph', '-a', 'stop', run_as_root=True)
return True
def start_cluster(self,context):
LOG.info('agent/driver.py start cluster')
utils.execute('service', 'ceph', '-a', 'start', run_as_root=True)
return True
def stop_server(self, context, node_id):
"""Stop server.
0. Remove monitor if it is a monitor
1. Get service_id by node_id
2. Get all osds for given service_id
3. Set osd noout
4. service ceph stop osd.$num
"""
LOG.info('agent/driver.py stop_server')
CephConfigSynchronizer().sync_before_read(FLAGS.ceph_conf) # not sure why these two calls are here; we haven't done
self.update_etc_fstab(context) # anything yet to change the state of the system...
LOG.info('Step 1. Scan the osds in db.')
res = self._conductor_rpcapi.init_node_get_by_id(context, node_id)
service_id = res.get('service_id', None)
osd_states = self._conductor_rpcapi.\
osd_state_get_by_service_id(context, service_id)
if not len(osd_states) > 0:
LOG.info("There is no osd on node %s; skipping osd shutdown." % node_id)
else:
LOG.info('Step 2. ceph osd set noout')
utils.execute('ceph', 'osd', 'set', 'noout', run_as_root=True)
for item in osd_states:
osd_name = item['osd_name']
LOG.info('>> Stop ceph %s' % osd_name)
# utils.execute('service', 'ceph', 'stop', osd_name,
# run_as_root=True)
self.stop_osd_daemon(context, osd_name.split(".")[1])
# self._operate_ceph_daemon("stop", "osd", id=osd_name.split(".")[1])
values = {'state': 'In-Down', 'osd_name': osd_name}
LOG.info('>> update status into db %s' % osd_name)
self._conductor_rpcapi.osd_state_update_or_create(context, values)
# Stop monitor service.
self.stop_monitor(context)
# Stop mds service.
self.stop_mds(context)
#We really dont' want to remove mds, right? Just stop it.
#values = {'mds': 'no'}
#self._conductor_rpcapi.init_node_update(context, node_id, values)
self._conductor_rpcapi.init_node_update_status_by_id(context, node_id, 'Stopped')
return True
def ceph_upgrade(self, context, node_id, key_url, pkg_url,restart=True):
"""ceph_upgrade
"""
LOG.info('agent/driver.py ceph_upgrade')
err = 'success'
try:
out, err = utils.execute('vsm-ceph-upgrade',
run_as_root=True)
LOG.info("exec vsm-ceph-upgrade:%s--%s"%(out,err))
if restart:
self.stop_server(context, node_id)
self.start_server(context, node_id)
err = 'success'
except:
LOG.info("vsm-ceph-upgrade error:%s"%err)
err = 'error'
db.init_node_update_status_by_id(context, node_id, 'Ceph Upgrade:%s'%err)
pre_status = 'available'
if restart:
pre_status = 'Active'
ceph_ver = self.get_ceph_version()
LOG.info('get version--after ceph upgrade==%s'%ceph_ver)
db.init_node_update(context,node_id,{'ceph_ver':ceph_ver})
db.init_node_update_status_by_id(context, node_id, pre_status)
return ceph_ver
def get_ceph_health(self, context):
out, err = utils.execute('ceph',
'health',
run_as_root=True)
if not 'HEALTH_OK' in out and not 'HEALTH_WARN' in out:
LOG.info('Failed to start ceph cluster: %s' % out)
try:
raise exception.StartCephFaild
except exception.StartCephFaild, e:
LOG.error("%s:%s" %(e.code, e.message))
return True
return True
def get_ceph_version(self):
try:
out, err = utils.execute('ceph',
'--version',
run_as_root=True)
out = out.split(' ')[2]
except:
out = ''
return out
def get_vsm_version(self):
try:
out, err = utils.execute('vsm',
'--version',
run_as_root=True)
except:
out = '2.0'
return out
def find_attr_start_line(self, lines, min_line=4, max_line=9):
"""
Return line number of the first real attribute and value.
The first line is 0. If the 'ATTRIBUTE_NAME' header is not
found, return the index after max_line.
"""
for idx, line in enumerate(lines[min_line:max_line]):
col = line.split()
if len(col) > 1 and col[1] == 'ATTRIBUTE_NAME':
return idx + min_line + 1
LOG.warn('ATTRIBUTE_NAME not found in second column of'
' smartctl output between lines %d and %d.'
% (min_line, max_line))
return max_line + 1
def parse_nvme_output(self, attributes, start_offset=0, end_offset=-1):
import string
att_list = attributes.split('\n')
att_list = att_list[start_offset:end_offset]
dev_info={}
for att in att_list:
att_kv = att.split(':')
if not att_kv[0]: continue
if len(att_kv) > 1:
dev_info[string.strip(att_kv[0])] = string.strip(att_kv[1])
else:
dev_info[string.strip(att_kv[0])] = ''
return dev_info
def get_nvme_smart_info(self, device):
smart_info_dict = {'basic':{},'smart':{}}
if "/dev/nvme" in device:
LOG.info("This is a nvme device : " + device)
dev_info = {}
dev_smart_log = {}
dev_smart_add_log = {}
import commands
# get nvme device meta data
attributes, err = utils.execute('nvme', 'id-ctrl', device, run_as_root=True)
if not err:
basic_info_dict = self.parse_nvme_output(attributes)
LOG.info("basic_info_dict=" + str(basic_info_dict))
smart_info_dict['basic']['Drive Family'] = basic_info_dict.get('mn') or ''
smart_info_dict['basic']['Serial Number'] = basic_info_dict.get('sn') or ''
smart_info_dict['basic']['Firmware Version'] = basic_info_dict.get('fr') or ''
smart_info_dict['basic']['Drive Status'] = 'PASSED'
else:
smart_info_dict['basic']['Drive Status'] = 'WARN'
LOG.warn("Fail to get device identification with error: " + str(err))
# get nvme devic smart data
attributes, err = utils.execute('nvme', 'smart-log', device, run_as_root=True)
if not err:
dev_smart_log_dict = self.parse_nvme_output(attributes, 1)
LOG.info("device smart log=" + str(dev_smart_log_dict))
for key in dev_smart_log_dict:
smart_info_dict['smart'][key] = dev_smart_log_dict[key]
else:
smart_info_dict['basic']['Drive Status'] = 'WARN'
LOG.warn("Fail to get device smart log with error: " + str(err))
# get nvme device smart additional data
attributes, err = utils.execute('nvme', 'smart-log-add', device, run_as_root=True)
if not err:
dev_smart_log_add_dict = self.parse_nvme_output(attributes, 2)
LOG.info("device additional smart log=" + str(dev_smart_log_add_dict))
smart_info_dict['smart']['<<< additional smart log'] = ' >>>'
for key in dev_smart_log_add_dict:
smart_info_dict['smart'][key] = dev_smart_log_add_dict[key]
else:
smart_info_dict['basic']['Drive Status'] = 'WARN'
LOG.warn("Fail to get device additional (vendor specific) smart log with error: " + str(err))
LOG.info(smart_info_dict)
return smart_info_dict
def get_smart_info(self, context, device):
LOG.info('retrieve device info for ' + str(device))
if "/dev/nvme" in device:
return self.get_nvme_smart_info(device)
attributes, err = utils.execute('smartctl', '-A', device, run_as_root=True)
attributes = attributes.split('\n')
start_line = self.find_attr_start_line(attributes)
smart_info_dict = {'basic':{},'smart':{}}
if start_line < 10:
for attr in attributes[start_line:]:
attribute = attr.split()
if len(attribute) > 1 and attribute[1] != "Unknown_Attribute":
smart_info_dict['smart'][attribute[1]] = attribute[9]
basic_info, err = utils.execute('smartctl', '-i', device, run_as_root=True)
basic_info = basic_info.split('\n')
basic_info_dict = {}
if len(basic_info)>=5:
for info in basic_info[4:]:
info_list = info.split(':')
if len(info_list) == 2:
basic_info_dict[info_list[0]] = info_list[1]
smart_info_dict['basic']['Drive Family'] = basic_info_dict.get('Device Model') or basic_info_dict.get('Vendor') or ''
smart_info_dict['basic']['Serial Number'] = basic_info_dict.get('Serial Number') or ''
smart_info_dict['basic']['Firmware Version'] = basic_info_dict.get('Firmware Version') or ''
status_info,err = utils.execute('smartctl', '-H', device, run_as_root=True)
status_info = status_info.split('\n')
smart_info_dict['basic']['Drive Status'] = ''
if len(status_info)>4:
status_list = status_info[4].split(':')
if len(status_list)== 2:
smart_info_dict['basic']['Drive Status'] = len(status_list[1]) < 10 and status_list[1] or ''
LOG.info("get_smart_info_dict:%s"%(smart_info_dict))
return smart_info_dict
def get_available_disks(self, context):
all_disk_info,err = utils.execute('blockdev','--report',run_as_root=True)
all_disk_info = all_disk_info.split('\n')
all_disk_name = []
disk_check = []
if len(all_disk_info)>1:
for line in all_disk_info[1:-1]:
LOG.info('line====%s'%line)
line_list = line.split(' ')
line_list.remove('')
LOG.info('line_list====%s'%line_list)
if int(line_list[-4]) <= 1024:
continue
if line_list[-1].find('-') != -1:
continue
if line_list[-9] and int(line_list[-9]) == 0:
disk_check.append(line_list[-1])
all_disk_name.append(line_list[-1])
for disk_check_cell in disk_check:
for disk in all_disk_name:
if disk != disk_check_cell and disk.find(disk_check_cell) == 0:
all_disk_name.remove(disk_check_cell)
break
mounted_disk_info,err = utils.execute('mount', '-l', run_as_root=True)
mounted_disk_info = mounted_disk_info.split('\n')
for mounted_disk in mounted_disk_info:
mounted_disk_list = mounted_disk.split(' ')
if mounted_disk_list[0].find('/dev/') != -1:
if mounted_disk_list[0] in all_disk_name:
all_disk_name.remove(mounted_disk_list[0])
pvs_disk_info,err = utils.execute('pvs', '--rows', run_as_root=True)
pvs_disk_info = pvs_disk_info.split('\n')
for line in pvs_disk_info:
line_list = line.split(' ')
if line_list[-1].find('/dev/') != -1 and line_list[-1] in all_disk_name:
all_disk_name.remove(line_list[-1])
return all_disk_name
def get_disks_name(self, context,disk_bypath_list):
disk_name_dict = {}
for bypath in disk_bypath_list:
out, err = utils.execute('ls',bypath,'-l',
run_as_root=True)
if len(out.split('../../'))>1:
disk_name_dict[bypath] = '/dev/%s'%(out.split('../../')[1][:-1])
return disk_name_dict
def get_disks_name_by_path_dict(self,disk_name_list):
disk_name_dict = {}
by_path_info,err = utils.execute('ls','-al','/dev/disk/by-path',run_as_root=True)
LOG.info('by_path_info===%s,err===%s'%(by_path_info,err))
for bypath in by_path_info.split('\n'):
bypath_list = bypath.split(' -> ../../')
if len(bypath_list) > 1:
disk_name_dict['/dev/%s'%(bypath_list[1])] = '/dev/disk/by-path/%s'%(bypath_list[0].split(' ')[-1])
return disk_name_dict
def get_disks_name_by_uuid_dict(self,disk_name_list):
disk_name_dict = {}
by_uuid_info,err = utils.execute('ls','-al','/dev/disk/by-uuid',run_as_root=True)
LOG.info('by_uuid_info===%s,err===%s'%(by_uuid_info,err))
for byuuid in by_uuid_info.split('\n'):
byuuid_list = byuuid.split(' -> ../../')
if len(byuuid_list) > 1:
disk_name_dict['/dev/%s'%(byuuid_list[1])] = '/dev/disk/by-path/%s'%(byuuid_list[0].split(' ')[-1])
return disk_name_dict
def run_add_disk_hook(self, context):
out, err = utils.execute('add_disk',
'll',
run_as_root=True)
LOG.info("run_add_disk_hook:%s--%s"%(out,err))
return out
def get_ceph_admin_keyring(self, context):
"""
read ceph keyring from CEPH_PATH
"""
with open(FLAGS.keyring_admin, "r") as fp:
keyring_str = fp.read()
return keyring_str
def save_ceph_admin_keyring(self, context, keyring_str):
"""
read ceph keyring from CEPH_PATH
"""
open(FLAGS.keyring_admin, 'w').write(keyring_str)
return True
def refresh_osd_number(self, context):
LOG.info("Start Refresh OSD number ")
config_dict = self.get_ceph_config(context)
osd_num_dict = {}
for section in config_dict:
if section.startswith("osd."):
host = config_dict[section]['host']
if not host in config_dict:
osd_num_dict.setdefault(host, 0)
osd_num_dict[host] += 1
LOG.info("Refresh OSD number %s " % osd_num_dict)
init_nodes = self._conductor_rpcapi.get_server_list(context)
init_node_dict = {}
for node in init_nodes:
init_node_dict.setdefault(node['host'], node)
for host in osd_num_dict:
values = {"data_drives_number": osd_num_dict[host]}
self._conductor_rpcapi.init_node_update(context,
init_node_dict[host],
values)
LOG.info("Refresh OSD number finish")
return True
def _remove_osd(self, context, osd_id, host, host_is_running=True):
def _get_line(osd_id):
out = utils.execute('ceph',
'osd',
'dump',
'-f',
'json-pretty',
run_as_root=True)[0]
status = json.loads(out)
for x in status['osds']:
if int(x['osd']) == int(osd_id):
return x
return None
def _wait_osd_status(osd_id, key, value):
status = _get_line(osd_id)
if not status:
time.sleep(10)
return
try_times = 0
while str(status[key]) != str(value):
try_times = try_times + 1
if try_times > 120:
break
status = _get_line(osd_id)
if not status:
time.sleep(10)
return
time.sleep(5)
if try_times % 10 == 0:
LOG.info('Try %s: %s change key = %s to value = %s' % \
(try_times, osd_id, key, value))
# Step 1: out this osd.
LOG.info('>>> remove ceph osd osd_id %s' % osd_id)
LOG.info('>>> remove ceph osd step0 out osd %s' % osd_id)
utils.execute("ceph", "osd", "out", osd_id, run_as_root=True)
LOG.info('>>> remove ceph osd step0 out osd cmd over')
_wait_osd_status(osd_id, 'in', 0)
# Step 2: shutdown the process.
if host_is_running:
LOG.info('>>> remove ceph osd kill proc osd %s' % osd_id)
try:
self._operate_ceph_daemon("stop", "osd", id=osd_id,
ssh=True, host=host)
except:
utils.execute("service", "ceph", "-a", "stop", "osd.%s" % osd_id,
run_as_root=True)
_wait_osd_status(osd_id, 'up', 0)
# Step 3: Remove it from crushmap.
LOG.info('>>> remove ceph osd step1 osd_id %s' % osd_id)
utils.execute("ceph", "osd", "crush", "remove", "osd.%s" % osd_id,
run_as_root=True)
# Step 4: Remove it from auth list.
LOG.info('>>> remove ceph osd step2 osd_id %s' % osd_id)
utils.execute("ceph", "auth", "del", "osd.%s" % osd_id,
run_as_root=True)
# Step 5: rm it.
LOG.info('>>> remove ceph osd step3 osd_id %s' % osd_id)
utils.execute("ceph", "osd", "rm", osd_id, run_as_root=True)
# Step 6: Remove it from ceph.conf
LOG.info('>>> remove ceph osd step4 osd_id %s' % osd_id)
config = CephConfigParser(FLAGS.ceph_conf)
config.remove_osd(osd_id)
config.save_conf(FLAGS.ceph_conf)
def osd_remove(self, context, osd_id, device, osd_host, umount_path):
LOG.info('osd_remove osd_id = %s' % osd_id)
self._remove_osd(context, osd_id, osd_host)
utils.execute("umount",
umount_path,
check_exit_code=False,
run_as_root=True)
return True
def ceph_osd_stop(self, context, osd_name):
# utils.execute('service',
# 'ceph',
# '-a',
# 'stop',
# osd_name,
# run_as_root=True)
osd_id = osd_name.split('.')[-1]
self.stop_osd_daemon(context, osd_id)
#self._operate_ceph_daemon("stop", "osd", id=osd_name.split(".")[1],
# ssh=True, host=osd_host)
#osd_id = osd_name.split('.')[-1]
#values = {'state': 'Out-Down', 'osd_name': osd_name}
#ret = self._conductor_rpcapi.\
# osd_state_update_or_create(context, values)
def ceph_osd_start(self, context, osd_name):
osd_id = osd_name.split('.')[-1]
self.start_osd_daemon(context, osd_id)
#values = {'state': FLAGS.osd_in_up, 'osd_name': osd_name}
#ret = self._conductor_rpcapi.\
# osd_state_update_or_create(context, values)
def osd_restart(self, context, osd_id):
LOG.info('osd_restart osd_id = %s' % osd_id)
osd = db.get_zone_hostname_storagegroup_by_osd_id(context, osd_id)
osd=osd[0]
#stop
utils.execute('ceph', 'osd', 'set', 'noout', run_as_root=True)
self.ceph_osd_stop(context, osd['osd_name'])
#start
utils.execute('ceph', 'osd', 'unset', 'noout', run_as_root=True)
self.ceph_osd_start(context, osd['osd_name'])
time.sleep(10)
# utils.execute("ceph", "osd", "crush", "create-or-move", osd['osd_name'], osd['weight'],
# "host=%s_%s_%s" %(osd['service']['host'],osd['storage_group']['name'],osd['zone']['name']) ,
# run_as_root=True)
return True
def osd_restore(self, context, osd_id):
LOG.info('osd_restore osd_id = %s' % osd_id)
osd = db.osd_get(context, osd_id)
init_node = db.init_node_get_by_service_id(context, osd['service_id'])
osd_conf_dict = {"host": FLAGS.host,
"primary_public_ip": init_node['primary_public_ip'],
"secondary_public_ip": init_node['secondary_public_ip'],
"cluster_ip": init_node['cluster_ip'],
"dev_name": osd['device']['name'],
"dev_journal": osd['device']['journal'],
"file_system": osd['device']['fs_type']}
LOG.debug('osd_conf_dict = %s' % osd_conf_dict)
stdout = utils.execute("ceph",
"osd",
"create",
run_as_root=True)[0]
osd_inner_id = str(int(stdout))
osd_name = 'osd.%s' % osd_inner_id
LOG.info("new osd_name = %s" % osd_name)
utils.execute("umount",
osd['device']['name'],
check_exit_code=False,
run_as_root=True)
file_system = 'xfs'
if osd['device']['fs_type']:
file_system = osd['device']['fs_type']
cluster = db.cluster_get_all(context)[0]
mkfs_option = cluster['mkfs_option']
if not mkfs_option:
mkfs_option = utils.get_fs_options(file_system)[0]
utils.execute("mkfs",
"-t", file_system,
mkfs_option,
osd['device']['name'],
run_as_root=True)
#osd_pth = '%sceph-%s' % (FLAGS.osd_data_path, osd_inner_id)
try:
osd_data_path = self.get_ceph_config(context)['osd']['osd data']
osd_pth = osd_data_path.replace('$id',osd_inner_id)
except:
osd_pth = os.path.join(FLAGS.osd_data_path, "osd" + osd_id)
LOG.info('osd restore osd_pth =%s'%osd_pth)
utils.ensure_tree(osd_pth)
cluster = db.cluster_get_all(context)[0]
mount_option = cluster['mount_option']
if not mount_option:
mount_option = utils.get_fs_options(file_system)[1]
utils.execute("mount",
"-t", file_system,
"-o", mount_option,
osd['device']['name'],
osd_pth,
run_as_root=True)
self._clean_dirs(osd_pth)
self._add_ceph_osd_to_config(context, osd_conf_dict, osd_inner_id)
utils.execute("ceph-osd",
"-i", osd_inner_id,
"--mkfs",
"--mkkey",
run_as_root=True)
utils.execute("ceph", "auth", "del", "osd.%s" % osd_inner_id,
run_as_root=True)
osd_keyring_pth = self.get_ceph_config(context)['osd']['keyring']
osd_keyring_pth = osd_keyring_pth.replace('$id',osd_inner_id).replace('$name','osd.%s'%osd_inner_id)
LOG.info('osd restore keyring path=%s'%osd_keyring_pth)
#osd_keyring_pth = "/etc/ceph/keyring.osd.%s" % osd_inner_id
utils.execute("ceph", "auth", "add", "osd.%s" % osd_inner_id,
"osd", "allow *", "mon", "allow rwx",
"-i", osd_keyring_pth,
run_as_root=True)
storage_group = osd['storage_group']['name']
#TODO change zone
if osd['osd_location']:
osd_location_str = ''
if osd['osd_location'].find('=') != -1:
osd_location_str = osd['osd_location']
else:
crushmap = self.get_crushmap_json_format()
osd_location_str = "%s=%s"%(crushmap._types[1]['name'],osd['osd_location'])
weight = "1.0"
utils.execute("ceph",
"osd",
"crush",
"add",
"osd.%s" % osd_inner_id,
weight,
osd_location_str,
run_as_root=True)
else:
zone = init_node['zone']['name']
crush_dict = {"root": 'vsm',
"storage_group":storage_group,
"zone": "_".join([zone, storage_group]),
"host": "_".join([FLAGS.host, storage_group, zone]),
}
weight = "1.0"
utils.execute("ceph",
"osd",
"crush",
"add",
"osd.%s" % osd_inner_id,
weight,
"root=%s" % crush_dict['root'],
"storage_group=%s" % crush_dict['storage_group'],
"zone=%s" % crush_dict['zone'],
"host=%s" % crush_dict['host'],
run_as_root=True)
#step1
self.start_osd_daemon(context, osd_inner_id)
#step2
utils.execute('ceph', 'osd', 'in', osd_name, run_as_root=True)
time.sleep(10)
# utils.execute("ceph", "osd", "crush", "create-or-move", "osd.%s" % osd_inner_id, weight,
# "host=%s" % crush_dict['host'],
# run_as_root=True)
#update db
value = {}
value['id'] = osd_id
value['osd_name'] = osd_name
value['operation_status'] = FLAGS.vsm_status_present
value['state'] = FLAGS.osd_in_up
db.osd_state_update(context, osd_id, value)
return True
def set_pool_pg_pgp_num(self, context, pool, pg_num, pgp_num):
self.set_pool_pg_num(context, pool, pg_num)
#need to wait for the last set pg_num
time.sleep(120)
self.set_pool_pgp_num(context, pool, pgp_num)
def set_pool_pg_num(self, context, pool, pg_num):
args= ['ceph', 'osd', 'pool', 'set', pool, 'pg_num', pg_num]
utils.execute(*args, run_as_root=True)
def set_pool_pgp_num(self, context, pool, pgp_num):
args= ['ceph', 'osd', 'pool', 'set', pool, 'pgp_num', pgp_num]
utils.execute(*args, run_as_root=True)
def get_ec_profiles(self):
DEFAULT_PLUGIN_PATH = "/usr/lib/ceph/erasure-code"
args = ['ceph', 'osd', 'erasure-code-profile', 'ls']
(out, err) = utils.execute(*args, run_as_root=True)
profile_names = out.splitlines()
profiles = []
for profile_name in profile_names:
args = ['ceph', 'osd', 'erasure-code-profile', 'get', profile_name]
(out, err) = utils.execute(*args, run_as_root=True)
profile = {}
profile['name'] = profile_name
profile['plugin_path'] = DEFAULT_PLUGIN_PATH
profile_kv = {}
for item in out.splitlines():
key, val = item.split('=')
if key == 'plugin':
profile['plugin'] = val
elif key == 'directory':
profile['plugin_path'] = val
else:
profile_kv[key] = val
profile['pg_num'] = int(profile_kv['k']) + int(profile_kv['m'])
profile['plugin_kv_pair'] = json.dumps(profile_kv)
profiles.append(profile)
return profiles
def get_osds_status(self):
args = ['ceph', 'osd', 'dump', '-f', 'json']
#args = ['hostname', '-I']
#(out, _err) = utils.execute(*args)
(out, _err) = utils.execute(*args, run_as_root=True)
if out != "":
#LOG.info("osd_status:%s", out)
return out
else:
return None
def get_osds_details(self):
args = ['ceph', 'osd', 'dump']
osd_dump = self._run_cmd_to_json(args)
if osd_dump:
return osd_dump['osds']
else:
return None
def get_osds_metadata(self):
args = ['ceph', 'report']
report = self._run_cmd_to_json(args)
if report:
return report['osd_metadata']
else:
return None
def get_ceph_health_list(self):
args = ['ceph', 'health']
out, _err = utils.execute(*args, run_as_root=True)
try:
k = out.find(" ")
status = out[:k]
health_list =[i.strip() for i in out[k:].split(";")]
return [status] + health_list
except:
return ["GET CEPH STATUS ERROR"]
def make_cmd(self, args):
h_list = list()
t_list = ['-f', 'json-pretty']
if isinstance(args, list):
h_list.extend(args)
h_list.extend(t_list)
else:
h_list.append(args)
h_list.append(t_list)
return h_list
def _run_cmd_to_json(self, args, pretty=True):
if pretty:
cmd = self.make_cmd(args)
else:
cmd = args
LOG.debug('command is %s' % cmd)
(out, err) = utils.execute(*cmd, run_as_root=True)
json_data = None
if out:
try:
json_data = json.loads(out)
except:
json_data = None
LOG.error('CMD result is invalid.cmd is %s.ret of cmd is %s.'%(cmd,out))
return json_data
def get_osds_total_num(self):
args = ['ceph', 'osd', 'ls']
osd_list = self._run_cmd_to_json(args)
return len(osd_list)
def get_crushmap_nodes(self):
args = ['ceph', 'osd', 'tree']
node_dict = self._run_cmd_to_json(args)
node_list = []
if node_dict:
node_list = node_dict.get('nodes')
return node_list
def get_osds_tree(self):
return_list = list()
node_list = self.get_crushmap_nodes()
if node_list:
for node in node_list:
name = node.get('name')
id = node.get('id')
if name and name.startswith('osd.'):
#LOG.debug('node %s ' % node)
for node_2 in node_list:
if node_2.get('children') and id in node_2.get('children'):
osd_location = '%s=%s'%(node_2.get('type'),node_2.get('name'))
node['osd_location'] = osd_location
break
return_list.append(node)
#LOG.debug('osd list: %s' % return_list)
return return_list
def get_osd_capacity(self):
args = ['ceph', 'pg', 'dump', 'osds']
osd_dict = self._run_cmd_to_json(args)
#LOG.debug('osd list: %s' % osd_dict)
return osd_dict
def get_pool_status(self):
args = ['ceph', 'osd', 'dump']
dump_list = self._run_cmd_to_json(args)
if dump_list:
return dump_list.get('pools')
return None
def get_pool_usage(self):
args = ['ceph', 'pg', 'dump', 'pools']
return self._run_cmd_to_json(args)
def get_pool_stats(self):
args = ['ceph', 'osd', 'pool', 'stats']
return self._run_cmd_to_json(args)
def get_osd_lspools(self):
args = ['ceph', 'osd', 'lspools']
pool_list = self._run_cmd_to_json(args)
return pool_list
def get_rbd_lsimages(self, pool):
#args = ['rbd', 'ls', '-l', pool, \
# '--format', 'json', '--pretty-format']
args = ['rbd_ls', pool]
rbd_image_list = self._run_cmd_to_json(args, pretty=False)
return rbd_image_list
def get_rbd_image_info(self, image, pool):
args = ['rbd', '--image', \
image,\
'-p', pool, \
'--pretty-format',\
'--format', 'json', \
'info']
rbd_image_dict = self._run_cmd_to_json(args, pretty=False)
return rbd_image_dict
def get_rbd_status(self):
pool_list = self.get_osd_lspools()
if pool_list:
rbd_list = []
for pool in pool_list:
rbd_image_list = self.get_rbd_lsimages(pool['poolname'])
if rbd_image_list:
for rbd_image in rbd_image_list:
rbd_dict = {}
image_dict = self.get_rbd_image_info(\
rbd_image['image'], \
pool['poolname'])
if image_dict:
rbd_dict['pool'] = pool['poolname']
rbd_dict['image'] = rbd_image['image']
rbd_dict['size'] = rbd_image['size']
rbd_dict['format'] = rbd_image['format']
rbd_dict['objects'] = image_dict['objects']
rbd_dict['order'] = image_dict['order']
rbd_list.append(rbd_dict)
return rbd_list
else:
return None
def get_mds_dump(self):
args = ['ceph', 'mds', 'dump']
mds_dict = self._run_cmd_to_json(args)
return mds_dict
def get_mds_status(self):
mds_dict = self.get_mds_dump()
if mds_dict:
mds_list = []
for key in mds_dict['info'].keys():
dict = {}
item = mds_dict['info'][key]
dict['gid'] = item['gid']
dict['name'] = item['name']
dict['state'] = item['state']
dict['address'] = item['addr']
mds_list.append(dict)
return mds_list
else:
return
def get_pg_dump(self):
args = ['ceph', 'pg', 'dump', 'pgs_brief']
result = self._run_cmd_to_json(args)
return result
def get_pg_status(self):
val_list = self.get_pg_dump()
if val_list:
pg_list = []
for item in val_list:
dict = {}
dict['pgid'] = item['pgid']
dict['state'] = item['state']
dict['up'] = ','.join(str(v) for v in item['up'])
dict['acting'] = ','.join(str(v) for v in item['acting'])
pg_list.append(dict)
return pg_list
else:
return
def get_mon_health(self):
args = ['ceph', 'health']
return self._run_cmd_to_json(args)
def get_ceph_status(self):
args = ['ceph', 'status']
return self._run_cmd_to_json(args)
def get_crush_rule_dump_by_name(self, name):
args = ['ceph', 'osd', 'crush', 'rule', 'dump', name]
return self._run_cmd_to_json(args)
def get_summary(self, sum_type, sum_dict=None):
if sum_type in [FLAGS.summary_type_pg, FLAGS.summary_type_osd,
FLAGS.summary_type_mds, FLAGS.summary_type_mon,
FLAGS.summary_type_cluster, FLAGS.summary_type_vsm]:
if not sum_dict:
sum_dict = self.get_ceph_status()
# newer versions of 'ceph status' don't display mdsmap - use 'ceph mds dump' instead
# if not 'mdsmap' in sum_dict:
sum_dict['mdsmap'] = self._run_cmd_to_json(['ceph', 'mds', 'dump'])
if sum_dict:
if sum_type == FLAGS.summary_type_pg:
return self._pg_summary(sum_dict)
elif sum_type == FLAGS.summary_type_osd:
return self._osd_summary(sum_dict)
elif sum_type == FLAGS.summary_type_mds:
return self._mds_summary(sum_dict)
elif sum_type == FLAGS.summary_type_mon:
return self._mon_summary(sum_dict)
elif sum_type == FLAGS.summary_type_cluster:
return self._cluster_summary(sum_dict)
elif sum_type == FLAGS.summary_type_vsm:
return self._vsm_summary(sum_dict)
def _osd_summary(self, sum_dict):
if sum_dict:
osdmap = sum_dict.get('osdmap')
return json.dumps(osdmap)
return None
def _pg_summary(self, sum_dict):
if sum_dict:
pgmap = sum_dict.get('pgmap')
return json.dumps(pgmap)
return None
def _mds_summary(self, sum_dict):
if sum_dict:
sum_dict = sum_dict.get("mdsmap")
mdsmap = {}
mdsmap['max'] = sum_dict['max_mds']
mdsmap['up'] = len(sum_dict['up'])
mdsmap['epoch'] = sum_dict['epoch']
mdsmap['in'] = len(sum_dict['in'])
mdsmap['failed'] = len(sum_dict['failed'])
mdsmap['stopped'] = len(sum_dict['stopped'])
mdsmap['data_pools'] = sum_dict['data_pools']
mdsmap['metadata_pool'] = sum_dict['metadata_pool']
return json.dumps(mdsmap)
return None
def _mon_summary(self, sum_dict):
if sum_dict:
quorum_status = self.get_quorum_status()
quorum_leader_name = quorum_status.get('quorum_leader_name')
quorum_leader_rank = None
for mon in quorum_status.get('monmap').get('mons'):
if mon.get('name') == quorum_leader_name:
quorum_leader_rank = str(mon.get('rank'))
break
mon_data = {
'monmap_epoch': sum_dict.get('monmap').get('epoch'),
'monitors': len(sum_dict.get('monmap').get('mons')),
'election_epoch': sum_dict.get('election_epoch'),
'quorum': json.dumps(' '.join([str(i) for i in sum_dict.get('quorum')])).strip('"'),
'overall_status': json.dumps(sum_dict.get('health').get('overall_status')).strip('"'),
'quorum_leader_name':quorum_leader_name,
'quorum_leader_rank':quorum_leader_rank,
}
return json.dumps(mon_data)
def get_quorum_status(self):
args = ['ceph', 'quorum_status']
out = self._run_cmd_to_json(args)
return out
def _cluster_summary(self, sum_dict):
if sum_dict:
cluster_data = {
'cluster': sum_dict.get('fsid'),
'status': sum_dict.get('health').get('summary'),
'detail': sum_dict.get('health').get('detail'),
'health_list': sum_dict.get("health_list")
}
return json.dumps(cluster_data)
def _vsm_summary(self, sum_dict):
#TODO: run cmd uptime | cut -d ' ' -f2
try:
uptime = open("/proc/uptime", "r").read().strip().split(" ")[0]
except:
uptime = ""
ceph_version = self.get_ceph_version()
return json.dumps({
'uptime': uptime,
'ceph_version': ceph_version,
'vsm_version':" ",
})
def ceph_status(self):
is_active = True
try:
self.get_ceph_status()
except exception.ProcessExecutionError as e:
LOG.debug('exit_code: %s, stderr: %s' % (e.exit_code, e.stderr))
if e.exit_code == 1 and e.stderr.find('TimeoutError') != -1:
is_active = False
return json.dumps({
'is_ceph_active': is_active
})
def add_cache_tier(self, context, body):
storage_pool_name = db.pool_get(context, body.get("storage_pool_id")).get('name')
cache_pool_name = db.pool_get(context, body.get("cache_pool_id")).get('name')
cache_mode = body.get("cache_mode")
LOG.info("add cache tier start")
LOG.info("storage pool %s cache pool %s " % (storage_pool_name, cache_pool_name))
if body.get("force_nonempty"):
utils.execute("ceph", "osd", "tier", "add", storage_pool_name, \
cache_pool_name, "--force-nonempty", run_as_root=True)
else:
utils.execute("ceph", "osd", "tier", "add", storage_pool_name, \
cache_pool_name, run_as_root=True)
# for the latest ceph version(jewel), it needs the parameter
# --yes-i-really-mean-it to do the action.
cache_mode_args = ["ceph",
"osd",
"tier",
"cache-mode",
cache_pool_name,
cache_mode]
ceph_version_code = ceph_version_utils.get_ceph_version_code()
if ceph_version_code == constant.CEPH_JEWEL:
cache_mode_args.append("--yes-i-really-mean-it")
utils.execute(*cache_mode_args, run_as_root=True)
if cache_mode == "writeback":
utils.execute("ceph", "osd", "tier", "set-overlay", storage_pool_name, \
cache_pool_name, run_as_root=True)
db.pool_update(context, body.get("storage_pool_id"), {"cache_tier_status": "Storage pool for:%s" % cache_pool_name})
db.pool_update(context, body.get("cache_pool_id"), {
"cache_tier_status": "Cache pool for:%s" % storage_pool_name,
"cache_mode": cache_mode})
options = body.get("options")
self._configure_cache_tier(cache_pool_name, options)
LOG.info("add cache tier end")
return True
def _configure_cache_tier(self, cache_pool_name, options):
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "hit_set_type", options["hit_set_type"], run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "hit_set_count", options["hit_set_count"], run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "hit_set_period", options["hit_set_period_s"], run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "target_max_bytes", int(options["target_max_mem_mb"]) * 1000000, run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "cache_target_dirty_ratio", options["target_dirty_ratio"], run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "cache_target_full_ratio", options["target_full_ratio"], run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "target_max_objects", options["target_max_objects"], run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "cache_min_flush_age", options["target_min_flush_age_m"], run_as_root=True)
utils.execute("ceph", "osd", "pool", "set", cache_pool_name, "cache_min_evict_age", options["target_min_evict_age_m"], run_as_root=True)
def remove_cache_tier(self, context, body):
LOG.info("Remove Cache Tier")
LOG.info(body)
cache_pool = db.pool_get(context, body.get("cache_pool_id"))
cache_pool_name = cache_pool.get("name")
storage_pool_name = cache_pool.get("cache_tier_status").split(":")[1].strip()
LOG.info(cache_pool['name'])
cache_mode = cache_pool.get("cache_mode")
LOG.info(cache_mode)
if cache_mode == "writeback":
# for the latest ceph version(jewel), it needs the parameter
# --yes-i-really-mean-it to do the action.
cache_mode_args = ["ceph",
"osd",
"tier",
"cache-mode",
cache_pool_name,
"forward"]
ceph_version_code = ceph_version_utils.get_ceph_version_code()
if ceph_version_code == constant.CEPH_JEWEL:
cache_mode_args.append("--yes-i-really-mean-it")
utils.execute(*cache_mode_args, run_as_root=True)
utils.execute("rados", "-p", cache_pool_name, "cache-flush-evict-all", \
run_as_root=True)
utils.execute("ceph", "osd", "tier", "remove-overlay", storage_pool_name, \
run_as_root=True)
else:
utils.execute("ceph", "osd", "tier", "cache-mode", cache_pool_name, \
"none", run_as_root=True)
utils.execute("ceph", "osd", "tier", "remove", storage_pool_name, \
cache_pool_name, run_as_root=True)
db.pool_update(context, cache_pool.pool_id, {"cache_tier_status": None})
# TODO cluster id
if body.has_key('cluster_id') and body['cluster_id']:
cluster_id = body['cluster_id']
else:
cluster_id = db.cluster_get_all(context)[0]['id']
db.pool_update_by_name(context, storage_pool_name, cluster_id, {"cache_tier_status": None})
return True
def auth_caps(self, context, entity, **kwargs):
"""
update caps for <name> from caps specified in the command
:param context:
:param entity:
:param kwargs:
:return:
"""
caps_keys = kwargs.keys()
if "mon" in caps_keys:
caps_mon = kwargs['mon']
else:
caps_mon = ""
if "osd" in caps_keys:
caps_osd = kwargs['osd']
else:
caps_osd = ""
if "mds" in caps_keys:
caps_mds = kwargs['mds']
else:
caps_mds = ""
try:
if caps_mon and caps_osd and caps_mds:
utils.execute('ceph', 'auth', 'caps', entity, 'mds', caps_mds,
'mon', caps_mon, 'osd', caps_osd, run_as_root=True)
elif caps_mon and caps_osd:
utils.execute('ceph', 'auth', 'caps', entity, 'mon', caps_mon,
'osd', caps_osd, run_as_root=True)
elif caps_mon:
utils.execute('ceph', 'auth', 'caps', entity, 'mon', caps_mon,
run_as_root=True)
except:
LOG.error("Failed to update auth caps")
raise
def auth_get(self, context, entity):
"""
get auth info
:param entity: client.ce1032ba-9ae9-4a7f-b456-f80fd821dd7f
:return:
{
"entity":"client.ce1032ba-9ae9-4a7f-b456-f80fd821dd7f",
"key":"<KEY>
"caps":{
"mon":"allow r",
"osd":"allow class-read object_prefix rbd_children,allow rwx pool=testpool01,allow rwx pool=testpool02"
}
}
"""
out = utils.execute('ceph', 'auth', 'get', entity, '-f', 'plain',
run_as_root=True)[0].strip("\n").split("\n")
result = {}
result["caps"] = {}
for line in out:
line = line.strip(" ")
if len(line.split("=")) < 2:
result["entity"] = line.replace("[","").replace("]","")
else:
if "key" in line.split("=")[0]:
result["key"] = line.split("=")[1].strip()
elif "mon" in line.split("=")[0]:
result["caps"]["mon"] = "=".join(line.split("=")[1:]).strip()[1:-1]
elif "osd" in line.split("=")[0]:
result["caps"]["osd"] = "=".join(line.split("=")[1:]).strip()[1:-1]
elif "mds" in line.split("=")[0]:
result["caps"]["mds"] = "=".join(line.split("=")[1:]).strip()[1:-1]
return result
def delete_cinder_type(self, context, name, **kwargs):
"""
:param name: cinder type name
:param kwargs:
:return:
"""
username = kwargs.pop('username')
password = kwargs.pop('password')
tenant_name = kwargs.pop('tenant_name')
auth_url = kwargs.pop('auth_url')
region_name = kwargs.pop('region_name')
cinderclient = cc.Client(username,
password,
tenant_name,
auth_url,
region_name=region_name)
cinder_type_list = cinderclient.volume_types.list()
delete_type = None
for type in cinder_type_list:
if type.name == name:
delete_type = type
break
if delete_type:
cinderclient.volume_types.delete(delete_type)
else:
LOG.warn("Not found the cinder type %s" % name)
def revoke_storage_pool_from_cinder_conf(self, context, auth_host,
cinder_host, ssh_user,
pool_name):
"""
:param auth_host:
:param cinder_host:
:param ssh_user: ssh user
:param pool_name: pool name
:return:
"""
line, err = utils.execute("su", "-s", "/bin/bash", "-c",
"exec ssh %s ssh %s sudo sed -n '/^enabled_backends/=' /etc/cinder/cinder.conf" %
(auth_host, cinder_host),
ssh_user, run_as_root=True)
line = line.strip(" ").strip("\n")
search_str = str(line) + "p"
enabled_backends, err = utils.execute("su", "-s", "/bin/bash", "-c",
"exec ssh %s ssh %s sudo sed -n %s /etc/cinder/cinder.conf" %
(auth_host, cinder_host, search_str),
ssh_user, run_as_root=True)
enabled_backends = enabled_backends.strip(" ").strip("\n")
backends_list = enabled_backends.split("=")[1].strip(" ").split(",")
new_backends_list = []
for backend in backends_list:
if backend != pool_name:
new_backends_list.append(backend)
new_enabled_backends = "enabled_backends\\\\\\ =\\\\\\ " + str(",".join(new_backends_list))
utils.execute("su", "-s", "/bin/bash", "-c",
"exec ssh %s ssh %s sudo sed -i 's/^enabled_backends*.*/%s/g' /etc/cinder/cinder.conf" %
(auth_host, cinder_host, new_enabled_backends),
ssh_user, run_as_root=True)
search_str = '/rbd_pool\\\\\\ =\\\\\\ ' + pool_name + '/='
line, err = utils.execute("su", "-s", "/bin/bash", "-c",
"exec ssh %s ssh %s sudo sed -n \"%s\" /etc/cinder/cinder.conf" %
(auth_host, cinder_host, search_str),
ssh_user, run_as_root=True)
line = line.strip(" ").strip("\n")
# remove 10 lines total
start_line = int(line) - 2
line_after = 9
end_line = int(start_line) + line_after
utils.execute("su", "-s", "/bin/bash", "-c",
"exec ssh %s ssh %s sudo sed -i %s','%s'd' /etc/cinder/cinder.conf" %
(auth_host, cinder_host, start_line, end_line), ssh_user,
run_as_root=True)
try:
utils.execute("service", "cinder-api", "restart", run_as_root=True)
utils.execute("service", "cinder-volume", "restart", run_as_root=True)
LOG.info("Restart cinder-api and cinder-volume successfully")
except:
utils.execute("service", "openstack-cinder-api", "restart", run_as_root=True)
utils.execute("service", "openstack-cinder-volume", "restart", run_as_root=True)
LOG.info("Restart openstack-cinder-api and openstack-cinder-volume successfully")
def create_keyring_and_key_for_rgw(self, context, name, keyring):
try:
utils.execute("rm", keyring, run_as_root=True)
except:
pass
utils.execute("ceph-authtool", "--create-keyring", keyring,
run_as_root=True)
utils.execute("chmod", "+r", keyring, run_as_root=True)
try:
utils.execute("ceph", "auth", "del", "client." + name, run_as_root=True)
except:
pass
utils.execute("ceph-authtool", keyring, "-n", "client." + name,
"--gen-key", run_as_root=True)
utils.execute("ceph-authtool", "-n", "client." + name,
"--cap", "osd", "allow rwx", "--cap", "mon", "allow rw",
keyring, run_as_root=True)
utils.execute("ceph", "-k", FLAGS.keyring_admin, "auth", "add",
"client." + name, "-i", keyring, run_as_root=True)
def add_rgw_conf_into_ceph_conf(self, context, name, host, keyring,
log_file, rgw_frontends):
rgw_section = "client." + str(name)
config = CephConfigParser(FLAGS.ceph_conf)
config.add_rgw(rgw_section, host, keyring, log_file, rgw_frontends)
config.save_conf(FLAGS.ceph_conf)
LOG.info("+++++++++++++++end add_rgw_conf_into_ceph_conf")
def create_default_pools_for_rgw(self, context):
utils.execute("ceph", "osd", "pool", "create", ".rgw", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".rgw.control", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".rgw.gc", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".log", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".intent-log", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".usage", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".users", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".users.email", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".users.swift", 8, 8, run_as_root=True)
utils.execute("ceph", "osd", "pool", "create", ".users.uid", 8, 8, run_as_root=True)
class DbDriver(object):
"""Executes commands relating to TestDBs."""
def __init__(self, execute=utils.execute, *args, **kwargs):
pass
def init_host(self, host):
pass
def update_recipe_info(self, context):
LOG.info("DEBUG in update_recipe_info() in DbDriver()")
res = db.recipe_get_all(context)
recipe_id_list = []
for x in res:
recipe_id_list.append(int(x.recipe_id))
str0 = os.popen("ssh [email protected] \'ceph osd lspools\' ").read()
str = str0[0:-2]
LOG.info('DEBUG str from mon %s' % str)
items = str.split(',')
##
items.remove('5 -help')
LOG.info("DEBUG items %s" % items)
##
pool_name_list = []
attr_names = ['size', 'min_size', 'crash_replay_interval', 'pg_num',
'pgp_num', 'crush_ruleset',]
for item in items:
x = item.split()
pool_name_list.append(x[1])
pool_name = x[1]
pool_id = int(x[0])
values = {}
values['recipe_name'] = pool_name
for attr_name in attr_names:
val = os.popen("ssh [email protected] \'ceph osd pool\
get %s %s\'" % (pool_name, attr_name)).read()
LOG.info("DEBUG val from cmon %s" % val)
_list = val.split(':')
values[attr_name] = int(_list[1])
if pool_id in recipe_id_list:
LOG.info('DEBUG update pool: %s recipe values %s' % (pool_name, values))
db.recipe_update(context, pool_id, values)
else:
values['recipe_id'] = pool_id
LOG.info('DEBUG create pool: %s recipe values %s' % (pool_name, values))
db.recipe_create(context, values)
def update_pool_info(self, context):
LOG.info("DEBUG in update_pool_info() in DbDriver()")
attr_names = ['size', 'min_size', 'crash_replay_interval', 'pg_num',
'pgp_num', 'crush_ruleset',]
res = db.pool_get_all(context)
pool_list = []
for x in res:
pool_list.append(int(x.pool_id))
LOG.info('x.id = %s' % x.pool_id)
#str0 = "0 data,1 metadata,2 rbd,3 testpool_after_periodic"
str0 = os.popen("ssh [email protected] \'ceph osd lspools\' ").read()
str = str0[0:-2]
items = str.split(',')
LOG.info("DEBUG items %s pool_list %s" % (items, pool_list))
for i in items:
x = i.split()
values = {}
pool_id = int(x[0])
LOG.info('DEBUG x[0] %s' % pool_id)
pool_name = x[1]
for attr_name in attr_names:
val = os.popen("ssh [email protected] \'ceph osd pool\
get %s %s\'" % (pool_name, attr_name)).read()
LOG.info("DEBUG val from cmon %s" % val)
_list = val.split(':')
values[attr_name] = int(_list[1])
if pool_id in pool_list:
#pool_id = x[0]
values['name'] = x[1]
db.pool_update(context, pool_id, values)
else:
values['pool_id'] = pool_id
values['name'] = x[1]
values['recipe_id'] = pool_id
values['status'] = 'running'
db.pool_create(context, values)
return res
class CreateCrushMapDriver(object):
"""Create crushmap file"""
def __init__(self, execute=utils.execute, *args, **kwargs):
self.conductor_api = conductor.API()
self.conductor_rpcapi = conductor_rpcapi.ConductorAPI()
self.osd_num = 0
self._crushmap_path = "/var/run/vsm/crushmap"
fd = open(self._crushmap_path, 'w')
fd.write("")
fd.close()
def _write_to_crushmap(self, string):
fd = open(self._crushmap_path, 'a')
fd.write(string)
fd.close()
def add_new_zone(self, context, zone_name):
res = self.conductor_api.storage_group_get_all(context)
storage_groups = []
for i in res:
storage_groups.append(i["name"])
storage_groups = list(set(storage_groups))
for storage_group in storage_groups:
zone = zone_name + "_" + storage_group
utils.execute("ceph", "osd", "crush", "add-bucket", zone, "zone",'--keyring',FLAGS.keyring_admin,
run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", zone,
"storage_group=%s" % storage_group,'--keyring',FLAGS.keyring_admin,
run_as_root=True)
values = {'name': zone_name,
'deleted': 0}
self.conductor_rpcapi.create_zone(context, values)
return True
def add_rule(self, name, type):
utils.execute("ceph", "osd", "crush", "rule", "create-simple", \
name, name, type,'--keyring',FLAGS.keyring_admin,)
def add_storage_group(self, storage_group, root, types=None):
if types is None:
utils.execute("ceph", "osd", "crush", "add-bucket", storage_group, \
"storage_group", '--keyring',FLAGS.keyring_admin,run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", storage_group,\
"root=%s" % root,'--keyring',FLAGS.keyring_admin, run_as_root=True)
else:
utils.execute("ceph", "osd", "crush", "add-bucket", storage_group, \
"%s"%types[3]['name'], '--keyring',FLAGS.keyring_admin,run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", storage_group,\
"%s=%s" %(types[-1]['name'],root),'--keyring',FLAGS.keyring_admin, run_as_root=True)
def add_zone(self, zone, storage_group,types=None):
if types is None:
utils.execute("ceph", "osd", "crush", "add-bucket", zone, \
"zone", run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", zone, \
"storage_group=%s" % storage_group, run_as_root=True)
else:
utils.execute("ceph", "osd", "crush", "add-bucket", zone, \
"%s"%types[2]['name'], run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", zone, \
"%s=%s" %(types[3]['name'],storage_group), run_as_root=True)
def add_host(self, host_name, zone,types=None):
if types is None:
utils.execute("ceph", "osd", "crush", "add-bucket", host_name, "host",'--keyring',FLAGS.keyring_admin,
run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", host_name,
"zone=%s" % zone,'--keyring',FLAGS.keyring_admin,
run_as_root=True)
else:
utils.execute("ceph", "osd", "crush", "add-bucket", host_name, "%s"%types[1]['name'],'--keyring',FLAGS.keyring_admin,
run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", host_name,
"%s=%s" %(types[2]['name'],zone),'--keyring',FLAGS.keyring_admin,
run_as_root=True)
def remove_host(self, host_name):
utils.execute("ceph", "osd", "crush", "remove", host_name,'--keyring',FLAGS.keyring_admin,
run_as_root=True)
def create_crushmap(self, context, server_list):
LOG.info("DEBUG Begin to create crushmap file in %s" % self._crushmap_path)
LOG.info("DEBUG in create_crushmap body is %s" % server_list)
service_id = []
for i in server_list:
if i["is_storage"]:
service_id.append(i["id"])
#service id is init node id
LOG.info("init node id list %s" % service_id)
osd_num = 0
for id in service_id:
res = self.conductor_api.osd_state_count_by_init_node_id(context, id)
osd_num = osd_num + int(res)
init_node = db.init_node_get(context, service_id[0])
zone_tag = True
zone_cnt = len(db.zone_get_all(context))
if init_node['zone']['name'] == FLAGS.default_zone or zone_cnt <= 1:
zone_tag = False
self._gen_crushmap_optimal()
self._gen_device_osd(osd_num)
self._gen_bucket_type()
self._gen_bucket(context, service_id)
self._generate_rule(context, zone_tag)
LOG.info('Create crushmap over')
return True
def set_crushmap(self, context):
LOG.info("DEBUG Begin to set crushmap")
utils.execute('crushtool', '-c', self._crushmap_path, '-o',
self._crushmap_path+"_compiled", run_as_root=True)
utils.execute('ceph', 'osd', 'setcrushmap', '-i',
self._crushmap_path+"_compiled", run_as_root=True)
#the following is zone version to solve "active_remaped" etc.Don't delete it!
#utils.execute('crushtool', '-c', '/tmp/crushmap',
# '--enable-unsafe-tunables',
# '--set-choose-local-tries','0',
# '--set-choose-local-fallback-tries', '0',
# '--set-choose-total-tries', '50', '-o',
# '/tmp/compiled_crushmap', run_as_root=True)
#utils.execute('ceph', 'osd', 'setcrushmap', '-i',
# '/tmp/compiled_crushmap', run_as_root=True)
# TODO return success here.
return True
def _gen_crushmap_optimal(self):
optimal = "# begin crush map\n" \
"tunable choose_local_tries 0\n" \
"tunable choose_local_fallback_tries 0\n" \
"tunable choose_total_tries 50\n" \
"tunable chooseleaf_descend_once 1\n" \
"tunable chooseleaf_vary_r 1\n" \
"tunable straw_calc_version 1\n"
self._write_to_crushmap(optimal)
def _gen_device_osd(self, osd_num):
self._write_to_crushmap("\n# devices\n")
for i in range(0, osd_num):
string = "device " + str(i) + " osd." + str(i) + "\n"
self._write_to_crushmap(string)
def _gen_bucket_type(self):
string = "\n#types\ntype 0 osd\ntype 1 host\ntype 2 zone\
\ntype 3 storage_group\ntype 4 root\n\n"
self._write_to_crushmap(string)
def _gen_bucket(self, context, service_id):
res = self.conductor_api.storage_group_get_all(context)
storage_groups = []
for i in res:
storage_groups.append(i["name"])
storage_groups = list(set(storage_groups))
LOG.info("storage_groups is: %s " % storage_groups)
res = self.conductor_api.zone_get_all(context)
zones = []
for i in res:
zones.append(i["name"])
hosts = []
for id in service_id:
res = self.conductor_api.init_node_get_by_id(context, id)
hosts.append(res["host"])
node_info = []
LOG.info("DEB-YOU %s " % service_id)
for id in service_id:
res = self.conductor_api.\
ceph_node_info(context, id)
for j in res:
node_info.append(j)
LOG.info("AGENT node info %s" % node_info)
num = 0
host_bucket, num = self._get_host_dic(node_info, storage_groups,\
zones, service_id, num, context)
self._write_host_bucket(host_bucket)
zone_bucket, num = self._get_zone_dic(node_info, host_bucket,\
zones, storage_groups, num)
self._write_zone_bucket(zone_bucket)
storage_group_bucket, num = self._get_storage_group_bucket(storage_groups,\
zone_bucket, num)
self._write_storage_group_bucket(storage_group_bucket)
root_bucket, num = self._get_root_bucket(storage_group_bucket, num)
self._write_root_bucket(root_bucket)
def _get_host_dic(self, node_info, storage_groups, zones, service_id, num, context):
host = []
LOG.info("service id %s " % service_id)
for id in service_id:
res = self.conductor_api.init_node_get_by_id(context, id)
host_name = res["host"]
id2 = res["zone_id"]
res = self.conductor_api.zone_get_by_id(context, id2)
zone = res["name"]
for storage_group in storage_groups:
dic = {}
dic["name"] = host_name + "_" + storage_group + "_" + zone
dic["zone"] = zone
dic["storage_group"] = storage_group
dic["id"] = num - 1
num = num -1
items = []
weight = 0
for node in node_info:
if node["host"] == host_name and node["storage_group_name"] == storage_group:
items.append(node["osd_state_name"])
weight = weight + 1
dic["weight"] = (weight != 0 and weight or FLAGS.default_weight)
dic["item"] = items
if len(items) > 0:
host.append(dic)
return host, num
def _get_zone_dic(self, node_info, hosts, zones, storage_groups, num):
zone_bucket = []
for zone in zones:
for storage_group in storage_groups:
dic = {}
dic["name"] = zone + "_" + storage_group
dic["storage_group"] = storage_group
items = []
weight = 0
for host in hosts:
if host["zone"] == zone and host["storage_group"] == storage_group:
item = {}
item["weight"] = host["weight"]
item["host_name"] = host["name"]
items.append(item)
weight = weight + float(host["weight"])
dic["weight"] = (weight != 0 and weight or FLAGS.default_weight)
dic["item"] = items
num = num - 1
dic["id"] = num
if len(items) > 0:
zone_bucket.append(dic)
#LOG.info('zone_bucket----%s'%zone_bucket)
return zone_bucket, num
def _get_storage_group_bucket(self, storage_groups, zones, num):
storage_group_bucket = []
for storage_group in storage_groups:
dic = {}
dic["name"] = storage_group
items = []
weight = 0
for zone in zones:
if zone["storage_group"] == storage_group:
item = {}
item["weight"] = zone["weight"]
item["zone_name"] = zone["name"]
items.append(item)
weight = weight + float(zone["weight"])
dic["weight"] = (weight != 0 and weight or FLAGS.default_weight)
dic["item"] = items
num = num - 1
dic["id"] = num
if len(items) > 0:
storage_group_bucket.append(dic)
return storage_group_bucket, num
def _get_root_bucket(self, storage_groups, num):
root_bucket = []
dic = {}
dic["name"] = "vsm"
items = []
for storage_group in storage_groups:
if storage_group["weight"] != 0:
item = {}
item["weight"] = storage_group["weight"]
item["storage_group_name"] = storage_group["name"]
items.append(item)
dic["item"] = items
num = num - 1
dic["id"] = num
root_bucket.append(dic)
return root_bucket, num
def _write_host_bucket(self, hosts):
for host in hosts:
self._write_to_crushmap("host " + host["name"] + " {\n")
self._write_to_crushmap(" id " + str(host["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in host["item"]:
self._write_to_crushmap(" item " + item + " weight 1.00\n")
self._write_to_crushmap("}\n\n")
def _write_zone_bucket(self, zones):
for zone in zones:
self._write_to_crushmap("zone " + zone["name"] + " {\n")
self._write_to_crushmap(" id " + str(zone["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in zone["item"]:
self._write_to_crushmap(" item " + item["host_name"] + \
" weight " + str(item["weight"]) + "\n")
self._write_to_crushmap("}\n\n")
def _write_storage_group_bucket(self, storage_groups):
for storage_group in storage_groups:
self._write_to_crushmap("storage_group " + storage_group["name"] + " {\n")
self._write_to_crushmap(" id " + str(storage_group["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in storage_group["item"]:
self._write_to_crushmap(" item " + item["zone_name"] + \
" weight " + str(item["weight"]) + "\n")
self._write_to_crushmap("}\n\n")
def _write_root_bucket(self, roots):
for root in roots:
self._write_to_crushmap("root " + root["name"] + " {\n")
self._write_to_crushmap(" id " + str(root["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in root["item"]:
self._write_to_crushmap(" item " + item["storage_group_name"] + \
" weight " + str(item["weight"]) + "\n")
self._write_to_crushmap("}\n\n")
def _key_for_sort(self, dic):
return dic['rule_id']
def _generate_rule(self, context, zone_tag):
osds = self.conductor_api.osd_state_get_all(context)
storage_groups = [ osd['storage_group']['id'] for osd in osds if osd['storage_group']]
storage_groups = list(set(storage_groups))
if not storage_groups :#is None:
LOG.info("Error in getting storage_groups")
try:
raise exception.GetNoneError
except exception.GetNoneError, e:
LOG.error("%s:%s" %(e.code, e.message))
return False
LOG.info("DEBUG in generate rule begin")
LOG.info("DEBUG storage_groups from conductor %s " % storage_groups)
#sorted_storage_groups = sorted(storage_groups, key=self._key_for_sort)
#LOG.info("DEBUG storage_groups after sorted %s" % sorted_storage_groups)
sting_common = """ type replicated
min_size 0
max_size 10
"""
if zone_tag:
string_choose = """ step chooseleaf firstn 0 type zone
step emit
}
"""
else:
string_choose = """ step chooseleaf firstn 0 type host
step emit
}
"""
for storage_group_id in storage_groups:
storage_group = db.storage_group_get(context,storage_group_id)
storage_group_name = storage_group["name"]
rule_id = storage_group["rule_id"]
string = ""
string = string + "\nrule " + storage_group_name + " {\n"
string = string + " ruleset " + str(rule_id) + "\n"
string = string + sting_common
string = string + " step take " + storage_group_name + "\n"
string = string + string_choose
self._write_to_crushmap(string)
#if storage_group_name.find("value_") == -1:
# string = ""
# string = string + "\nrule " + storage_group_name + " {\n"
# string = string + " ruleset " + str(rule_id) + "\n"
# string = string + sting_common
# string = string + " step take " + storage_group_name + "\n"
# string = string + string_choose
# self._write_to_crushmap(string)
#else:
# string = ""
# string = string + "\nrule " + storage_group_name + " {\n"
# string = string + " ruleset " + str(rule_id) + "\n"
# string = string + " type replicated\n min_size 0\n"
# string = string + " max_size 10\n"
# string = string + " step take " + storage_group_name + "\n"
# if zone_tag:
# string = string + " step chooseleaf firstn 1 type zone\n"
# else:
# string = string + " step chooseleaf firstn 1 type host\n"
# string = string + " step emit\n"
# string = string + " step take " + \
# storage_group_name.replace('value_', '') + "\n"
# if zone_tag:
# string = string + " step chooseleaf firstn -1 type zone\n"
# else:
# string = string + " step chooseleaf firstn -1 type host\n"
# string = string + " step emit\n}\n"
# self._write_to_crushmap(string)
return True
def _gen_rule(self):
string = """\n# rules
rule capacity {
ruleset 0
type replicated
min_size 0
max_size 10
step take capacity
step chooseleaf firstn 0 type host
step emit
}
rule performance {
ruleset 1
type replicated
min_size 0
max_size 10
step take performance
step chooseleaf firstn 0 type host
step emit
}
rule high_performance {
ruleset 2
type replicated
min_size 0
max_size 10
step take high_performance
step chooseleaf firstn 0 type host
step emit
}
rule value_capacity {
ruleset 3
type replicated
min_size 0
max_size 10
step take value_capacity
step chooseleaf firstn 1 type host
step emit
step take capacity
step chooseleaf firstn -1 type host
step emit
}
rule value_performance {
ruleset 4
type replicated
min_size 0
max_size 10
step take value_performance
step chooseleaf firstn 1 type host
step emit
step take performance
step chooseleaf firstn -1 type host
step emit
}
# end crush map
"""
self._write_to_crushmap(string)
class DiamondDriver(object):
"""Create diamond file"""
def __init__(self, execute=utils.execute, *args, **kwargs):
self._diamond_config_path = "/etc/diamond/collectors/"
def change_collector_conf(self,collector,values):
'''
:param collector:
:param values: {'enabled':True,
'interval':15
}
:return:
'''
# try:
# out, err = utils.execute('kill_diamond',
# 'll',
# run_as_root=True)
# except:
# LOG.info("kill_diamond error:%s--%s"%(out,err))
config_file = '%s%s.conf'%(self._diamond_config_path,collector)
keys = values.keys()
content = []
for key in keys:
content.append('%s=%s'%(key,values[key]))
out, err = utils.execute('rm','-rf', config_file, run_as_root=True)
out, err = utils.execute('cp','/etc/vsm/vsm.conf', config_file, run_as_root=True)
for line in content:
out, err = utils.execute('sed','-i','1i\%s'%line, config_file, run_as_root=True)
out, err = utils.execute('sed','-i','%s,$d'%(len(content)+1), config_file, run_as_root=True)
out, err = utils.execute('service', 'diamond', 'restart', run_as_root=True)
return out
class ManagerCrushMapDriver(object):
"""Create crushmap file"""
def __init__(self, execute=utils.execute, *args, **kwargs):
self.conductor_api = conductor.API()
self.conductor_rpcapi = conductor_rpcapi.ConductorAPI()
self._crushmap_path = "/var/run/vsm/mg_crushmap"
def _write_to_crushmap(self, string):
utils.execute('chown', '-R', 'vsm:vsm', self._crushmap_path+'_decompiled',
run_as_root=True)
fd = open(self._crushmap_path+'_decompiled', 'a')
fd.write(string)
fd.close()
def get_crushmap(self):
LOG.info("DEBUG Begin to get crushmap")
utils.execute('ceph', 'osd', 'getcrushmap', '-o',
self._crushmap_path,'--keyring',FLAGS.keyring_admin, run_as_root=True)
utils.execute('crushtool', '-d', self._crushmap_path, '-o',
self._crushmap_path+'_decompiled', run_as_root=True)
return True
def set_crushmap(self):
LOG.info("DEBUG Begin to set crushmap")
utils.execute('crushtool', '-c', self._crushmap_path+'_decompiled', '-o',
self._crushmap_path, run_as_root=True)
utils.execute('ceph', 'osd', 'setcrushmap', '-i',
self._crushmap_path, run_as_root=True)
return True
def _generate_one_rule(self,rule_info):
'''
rule_info:{'rule_name':'test-rule',
'rule_id':None,
'type':'replicated',
'min_size':0,
'max_size':10,
'takes':[{'take_id':-12,
'choose_leaf_type':'host',
'choose_num':2,
},
]
}
:return:{'rule_id':3}
'''
crushmap = get_crushmap_json_format()
rule_id = rule_info.get('rule_id',None)
if rule_id is None:
rule_ids =[rule['rule_id'] for rule in crushmap._rules]
rule_ids.sort()
rule_id = rule_ids[-1]+1
types = crushmap._types
types.sort(key=operator.itemgetter('type_id'))
choose_leaf_type_default = types[1]['name']
rule_type = rule_info.get('type','replicated')
min_size = rule_info.get('min_size',0)
max_size = rule_info.get('max_size',10)
rule_name = rule_info.get('rule_name')
takes = rule_info.get('takes')
sting_common = """ type %s
min_size %s
max_size %s
"""%(rule_type,str(min_size),str(max_size))
string = ""
string = string + "\nrule " + rule_name + " {\n"
string = string + " ruleset " + str(rule_id) + "\n"
string = string + sting_common
for take in takes:
take_name = crushmap.get_bucket_by_id(int(take.get('take_id')))['name']
take_choose_leaf_type = take.get('choose_leaf_type',choose_leaf_type_default)
take_choose_num = take.get('choose_num',1)
string_choose = """ step chooseleaf firstn %s type %s
step emit
"""%(str(take_choose_num),take_choose_leaf_type)
string = string + " step take " + take_name + "\n" + string_choose
string = string +" }\n"
LOG.info('---string-----%s---'%string)
self.get_crushmap()
self._write_to_crushmap(string)
self.set_crushmap()
return {'rule_id':rule_id}
def _modify_takes_of_rule(self,rule_info):
'''
rule_info:{'rule_name':'test-rule',
'rule_id':None,
'type':'replicated',
'min_size':0,
'max_size':10,
'takes':[{'take_id':-12,
'choose_leaf_type':'host',
'choose_num':2,
},
]
}
:return:{'rule_id':3}
'''
crushmap = get_crushmap_json_format()
rule_name = rule_info.get('rule_name')
if crushmap.get_rules_by_name(name = rule_name ) is None:
return self._generate_one_rule(rule_info)
types = crushmap._types
types.sort(key=operator.itemgetter('type_id'))
choose_leaf_type_default = types[1]['name']
# rule_type = rule_info.get('type','')
# min_size = rule_info.get('min_size')
# max_size = rule_info.get('max_size')
takes = rule_info.get('takes')
self.get_crushmap()
fd = open(self._crushmap_path+'_decompiled', 'r')
rule_start_line = None
rule_end_line = None
insert_take_line = None
line_number = -1
lines = fd.readlines()
fd.close()
new_lines = []
LOG.info('rulename=====%s'%rule_name)
for line in lines:
line_number += 1
LOG.info('old lines=====%s----type=%s'%(line,type(line)))
if 'rule %s {'%rule_name in line:
rule_start_line = line_number
if rule_start_line is not None:
if rule_end_line is None and '}' in line:
rule_end_line = line_number
if rule_start_line is not None and rule_end_line is None:
if 'ruleset ' in line:
rule_id = line[0:-1].split(' ')[-1]
if 'step take' in line and insert_take_line is None:
insert_take_line = line_number
#LOG.info('pass--11-%s'%line)
continue
if 'step take' in line and insert_take_line is not None:
#LOG.info('pass--22-%s'%line)
continue
if 'step chooseleaf' in line and insert_take_line is not None:
#LOG.info('pass--22-%s'%line)
continue
if 'step emit' in line and insert_take_line is not None:
#LOG.info('pass--22-%s'%line)
continue
new_lines.append(line)
if insert_take_line is not None:
for take in takes:
take_name = crushmap.get_bucket_by_id(int(take.get('take_id')))['name']
take_choose_leaf_type = take.get('choose_leaf_type',choose_leaf_type_default)
take_choose_num = take.get('choose_num',1)
string = " step take " + take_name + "\n"
new_lines.insert(insert_take_line,string)
string_choose = """ step chooseleaf firstn %s type %s\n"""%(str(take_choose_num),take_choose_leaf_type)
new_lines.insert(insert_take_line+1,string_choose)
new_lines.insert(insert_take_line+2," step emit\n")
insert_take_line +=3
utils.execute('chown', '-R', 'vsm:vsm', self._crushmap_path+'_decompiled',
run_as_root=True)
fd = open(self._crushmap_path+'_decompiled', 'w')
LOG.info('new lines=====%s'%new_lines)
fd.writelines(new_lines)
fd.close()
self.set_crushmap()
return {'rule_id':rule_id}
def add_bucket_to_crushmap(self,bucket_name,bucket_type,parent_bucket_type,parent_bucket_name):
utils.execute("ceph", "osd", "crush", "add-bucket", bucket_name, bucket_type,'--keyring',FLAGS.keyring_admin,
run_as_root=True)
utils.execute("ceph", "osd", "crush", "move", bucket_name,
"%s=%s" % (parent_bucket_type,parent_bucket_name),'--keyring',FLAGS.keyring_admin,
run_as_root=True)
# def _generate_one_rule(self,rule_name,take_id_list,rule_id=None,choose_leaf_type=None,choose_num=None,type='replicated',min_size=0,max_size=10):
# crushmap = get_crushmap_json_format()
# if rule_id is None:
# rule_ids =[rule['rule_id'] for rule in crushmap._rules]
# rule_ids.sort()
# rule_id = rule_ids[-1]+1
# if choose_leaf_type is None:
# types = crushmap._types
# types.sort(key=operator.itemgetter('type_id'))
# choose_leaf_type = types[1]['name']
# sting_common = """ type %s
# min_size %s
# max_size %s
# """%(type,str(min_size),str(max_size))
# string_choose = """ step chooseleaf firstn 1 type %s
# step emit
# """%choose_leaf_type
# string = ""
# string = string + "\nrule " + rule_name + " {\n"
# string = string + " ruleset " + str(rule_id) + "\n"
# string = string + sting_common
# for take in take_id_list:
# take_name = crushmap.get_bucket_by_id(int(take))['name']
# string = string + " step take " + take_name + "\n" + string_choose
# string = string +" }\n"
# self.get_crushmap()
# self._write_to_crushmap(string)
# self.set_crushmap()
# return {'rule_id':rule_id}
#
# def _modify_takes_of_rule(self,rule_name,take_id_list,choose_leaf_type=None,choose_num_list=None):
# crushmap = get_crushmap_json_format()
# if choose_leaf_type is None:
# types = crushmap._types
# types.sort(key=operator.itemgetter('type_id'))
# choose_leaf_type = types[1]['name']
# string_choose = """ step chooseleaf firstn 1 type %s
# step emit
# }
# """%choose_leaf_type
# self.get_crushmap()
# fd = open(self._crushmap_path, 'r')
# rule_start_line = None
# rule_end_line = None
# insert_take_line = None
# line_number = -1
# lines = fd.readlines()
# fd.close()
# new_lines = []
# # LOG.info('rulename=====%s'%rule_name)
# # LOG.info('take_id_list=====%s'%take_id_list)
# # LOG.info('old lines=====%s'%lines)
# for line in lines:
# line_number += 1
# if 'rule %s {'%rule_name in line:
# rule_start_line = line_number
# if rule_start_line is not None:
# if rule_end_line is None and '}' in line:
# rule_end_line = line_number
# if rule_start_line is not None and rule_end_line is None:
# if 'ruleset ' in line:
# rule_id = line[0:-1].split(' ')[-1]
# if 'step take' in line and insert_take_line is None:
# insert_take_line = line_number
# #LOG.info('pass--11-%s'%line)
# continue
# if 'step take' in line and insert_take_line is not None:
# #LOG.info('pass--22-%s'%line)
# continue
# if 'step chooseleaf' in line and insert_take_line is not None:
# #LOG.info('pass--22-%s'%line)
# continue
# if 'step emit' in line and insert_take_line is not None:
# #LOG.info('pass--22-%s'%line)
# continue
# new_lines.append(line)
# if insert_take_line is not None:
# for take in take_id_list:
# take_name = crushmap.get_bucket_by_id(int(take))['name']
# string = " step take " + take_name + "\n"
# new_lines.insert(insert_take_line,string)
# string_choose = """ step chooseleaf firstn 1 type %s\n"""%choose_leaf_type
# new_lines.insert(insert_take_line+1,string_choose)
# new_lines.insert(insert_take_line+2," step emit\n")
# insert_take_line +=3
# fd = open(self._crushmap_path, 'w')
# LOG.info('new lines=====%s'%new_lines)
# fd.writelines(new_lines)
# fd.close()
# self.set_crushmap()
# return {'rule_id':rule_id}
#
#
def get_crushmap_json_format(keyring=None):
'''
:return:
'''
if keyring:
json_crushmap,err = utils.execute('ceph', 'osd', 'crush', 'dump','--keyring',keyring, run_as_root=True)
else:
json_crushmap,err = utils.execute('ceph', 'osd', 'crush', 'dump', run_as_root=True)
crushmap = CrushMap(json_context=json_crushmap)
return crushmap
``` |
{
"source": "01pooja10/adapt-BOT",
"score": 3
} |
#### File: adapt-BOT/chatbot/reply.py
```python
import torch
import torch.nn as nn
import json
import random
from model import BotModel
from chatbot import bag_of_words
import nltk
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#path=r'ausm_intents.json'
path = input("Enter path to dataset: ")
with open(path,'rb') as f:
data=json.load(f,strict=False)
#file1 = 'nn2_model.pth'
file1 = input("Enter path to model's weights: ")
model = torch.load(file1)
input_size = model['input_size']
hidden_size = model['hidden_size']
output_size = model['output_size']
words = model['words']
labels = model['tags']
mod_st = model['model_state']
modela = BotModel(input_size, output_size, hidden_size).to(device)
modela.load_state_dict(mod_st)
modela.eval()
name = 'Han'
def bot_reply(modela,labels,data):
print(name + ': Welcome')
while True:
sent = input('User: ')
if sent == 'exit':
break
pre = nltk.word_tokenize(sent)
b = bag_of_words(pre,words)
pred = b.reshape(1,b.shape[0])
pred = torch.from_numpy(pred).to(dtype=torch.float32).to(device)
pred = pred.reshape(1,1,137)
h = (torch.zeros(1,hidden_size), torch.zeros(1,hidden_size))
outp = modela(pred,h)
i,j = torch.max(outp,dim=1)
tag = labels[j.item()]
for i in data['intents']:
if tag == i['tag']:
resp = i['responses']
break
print('Han: '+ random.choice(resp))
bot_reply(modela,labels,data)
``` |
{
"source": "01pooja10/SimpleGAN",
"score": 3
} |
#### File: SimpleGAN/code/model.py
```python
import torch
import torchvision
import torch.nn as nn
#discriminator model
class Discriminator(nn.Module):
def __init__(self,in_size):
super().__init__()
self.dmodel = nn.Sequential(
nn.Linear(in_size,512),
nn.LeakyReLU(0.2),
nn.Dropout(0.3),
nn.Linear(512,256),
nn.LeakyReLU(0.2),
nn.Dropout(0.3),
nn.Linear(256,128),
nn.LeakyReLU(0.2),
nn.Dropout(0.3),
nn.Linear(128,1))
def forward(self,x):
out = self.dmodel(x)
return out
#generator model
class Generator(nn.Module):
def __init__(self,noise_dim,in_size):
super().__init__()
self.gmodel = nn.Sequential(
nn.Linear(noise_dim,128),
nn.LeakyReLU(0.2),
nn.Linear(128,256),
nn.LeakyReLU(0.2),
nn.Linear(256,512),
nn.LeakyReLU(0.2),
nn.Linear(512,in_size))
def forward(self,x):
out = self.gmodel(x)
return out
``` |
{
"source": "01pooja10/Sketch-to-Shoe",
"score": 3
} |
#### File: Sketch-to-Shoe/code/dataset.py
```python
import os
import PIL
from PIL import Image
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset
class ShoeData(Dataset):
def __init__(self,root_directory,transforms):
super(ShoeData,self).__init__()
self.root = root_directory
self.files = os.listdir(root_directory)
self.transform_img = transforms
def __len__(self):
return len(self.files)
def __getitem__(self,idx):
imfile = self.files[idx]
impath = os.path.join(self.root,imfile)
img = Image.open(impath)
img = np.array(img)
inp = img[:,:256,:]
out = img[:,256:,:]
inp = Image.fromarray(inp)
out = Image.fromarray(out)
inp = self.transform_img(inp)
out = self.transform_img(out)
return inp,out
r'''
img = np.array(Image.open(r'C:\Users\Pooja\Documents\ML_projects\Sketch-to-Shoe\data\28_AB.jpg'))
ipimg = img[:,:256,:]
opimg = img[:,256:,:]
ipimg = Image.fromarray(ipimg)
ipimg.show()
opimg = Image.fromarray(opimg)
opimg.show()
'''
```
#### File: Sketch-to-Shoe/code/train.py
```python
import torch
#used for CUDA run of out memory error
torch.cuda.empty_cache()
import torchvision.transforms as transforms
from model import Discriminator, Generator
from dataset import ShoeData
from save import save_samples,save_model
import torch.nn as nn
import config
import torch.optim as optim
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast,GradScaler
def train(d,g,optd,optg,dscaler,gscaler):
'''
parameters for train function:
d - discriminator model
g - generator model
optd - discriminator optimizer
optg - generator optimizer
dscaler - gradient scaler for discriminator
gscaler - gradient scaler for generator
'''
transforms_list = transforms.Compose([
transforms.ColorJitter(brightness=0.2,saturation=0.3),
transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))
])
train_data = ShoeData(root_directory=config.train_root, transforms=transforms_list)
train_loader = DataLoader(train_data, batch_size=config.batch_size, shuffle=True)
bce_loss = nn.BCEWithLogitsLoss()
l1_loss = nn.L1Loss()
for epoch in range(config.epochs):
for idx, (x,y) in enumerate(tqdm(train_loader)):
x = x.to(config.device)
y = y.to(config.device)
#training the discriminator
with autocast():
yfake = g(x)
dreal = d(x,y)
dfake = d(x,yfake.detach())
drloss = bce_loss(dreal,torch.ones_like(dreal))
dfloss = bce_loss(dfake,torch.ones_like(dfake))
dloss = (drloss+dfloss)/2
optd.zero_grad()
dscaler.scale(dloss).backward()
dscaler.step(optd)
dscaler.update()
#training the generator
with autocast():
dfake = d(x,yfake)
gfloss = bce_loss(dfake,torch.ones_like(dfake))
l1 = l1_loss(yfake,y)*(config.l1_lambda)
gloss = gfloss+l1
optg.zero_grad()
gscaler.scale(gloss).backward()
gscaler.step(optg)
gscaler.update()
print('Epoch: ',str(epoch),'Disc Loss: ',str(dloss.item()), 'Gen Loss: ',str(gloss.item()))
#loading validation results as images into a separate folder
transform_val = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))
])
val_data = ShoeData(root_directory=config.val_root,transforms=transform_val)
val_loader = DataLoader(val_data,batch_size=config.batch_size,shuffle=False)
save_samples(g,val_loader,config.save_folder,epoch)
print('Discriminator Loss: ',str(dloss.item()), 'Generator Loss: ',str(gloss.item()))
save_model(d,optd,config.dmodel_path)
save_model(g,optg,config.gmodel_path)
print('Model saved :)')
def main():
gen = Generator(in_channels=3).to(config.device)
disc = Discriminator(in_channels=3).to(config.device)
optd = optim.Adam(disc.parameters(),lr=config.lr)
optg = optim.Adam(gen.parameters(),lr=config.lr)
dscaler = GradScaler()
gscaler = GradScaler()
train(disc,gen,optd,optg,dscaler,gscaler)
print('Training Complete!')
if __name__=='__main__':
main()
``` |
{
"source": "01pooja10/ViT_pytorch",
"score": 3
} |
#### File: ViT_pytorch/src/embedding.py
```python
import torch
import torch.nn as nn
class Embed(nn.module):
def __init__(self, img_size, patch_size, channels=3, emb_dim=768):
super(Embed,self).__init__()
self.img_s = img_size
self.patch_s = patch_size
self.chn = channels
self.emb = emb_dim
self.npatches = (img_size//patch_size) ** 2
self.project = nn.Conv2d(channels, emb_dim,
kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.project(x)
x = x.flatten(2)
x = x.transpose(1,2)
return x
```
#### File: ViT_pytorch/src/vit.py
```python
import torch
import torch.nn as nn
from src.embedding import Embed
from src.blocks import Blocks
class ViT(nn.module):
def __init__(self, img_size=384,
patch_size=16,
channels=3,
nclasses=100,
emb_dim=768,
depth=12,
nheads=12,
mlp=4,
kqv=True,
projp=0,
attnp=0
):
super(vit,self).__init__()
self.pe = Embed(img_size, patch_size, channels, emb_dim)
self.ctoken = nn.Parameter(torch.zeros(1, 1, emb_dim))
self.pos = nn.Parameter(torch.zeros(1, 1+self.pe.npatches, emb_dim))
self.pdrop = nn.Dropout(projp)
self.blocks = nn.ModuleList([
Blocks(emb_dim, nheads,
mlp, kqv, projp, attnp)
for _ in range(depth)
])
self.norm = nn.LayerNorm(emb_dim, eps=1e-6)
self.flin = nn.Linear(emb_dim, nclasses)
def forward(self, x):
""" x: (batch_size, channels, img_size, img_size) """
bs = x.shape[0]
x = self.pe(x)
ctoken = self.ctoken.expand(bs, -1, -1) #(batch_size, 1, emb_dim)
x = torch.cat((ctoken, x),dim=1) #(batch_size, 1+npatches, emb_dim)
x = x + self.pos
x = self.pdrop(x)
for b in self.blocks:
x = b(x)
x = self.norm(x)
cl = x[:, 0] #select only class embedding
cl = self.flin(cl)
return cl
``` |
{
"source": "01programs/Intellinet_163682_IP_smart_PDU_API",
"score": 2
} |
#### File: 01programs/Intellinet_163682_IP_smart_PDU_API/api.py
```python
import requests
from lxml import etree as et
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlunsplit
""" WARNING - WARNING - WARNING - WARNING - WARNING - WARNING - WARNING
I STRONGLY DISCOURAGE YOU FROM USING THIS PDU IN PRODUCTION.
IT'S SECURITY IS VIRTUALLY NON EXISTENT AND I FOUND MULTIPLE
EXPLOITABLE VULNERABILITIES JUST WHILE WRITING THIS API WRAPPER
WARNING - WARNING - WARNING - WARNING - WARNING - WARNING - WARNING """
class IPU():
"""This class is represents a api wrapper for the Intellinet IP smart PDU API [163682].
It provides all the functionality of the web interface it is based on.
Class-Attributes:
DEFAULT_CREDS (:obj:`tuple` of :obj:`str`): default username/password of pdu
DEFAULT_ENDCODING (str): default encoding of pdu
DEFAULT_SCHEMA (str): default schema of pdu
"""
DEFAULT_SCHEMA = "http"
DEFAULT_ENDCODING = "gb2312"
DEFAULT_CREDS = ("admin", "admin")
def __init__(self, host, auth=None, charset=None, schema=None):
"""
Args:
host (str): IP addr of pdu/ipu
auth (:obj:`tuple` of :obj:`str`, optional): (username, password). Defaults to DEFAULT_CREDS
charset (str): charset used by the pdu. Defaults to DEFAULT_ENDCODING
schema (str, optional): 'http' or 'https'. Defaults to DEFAULT_SCHEMA
"""
self.host = host
self.schema = schema or self.DEFAULT_SCHEMA
self.charset = charset or self.DEFAULT_ENDCODING
self.credentials = auth or self.DEFAULT_CREDS
self.auth = self._auth(self.credentials)
self.endpoints = {
# Information
"status": "status.xml",
"pdu": "info_PDU.htm",
"system": "info_system.htm",
# Control
"outlet": "control_outlet.htm",
# Config
"config_pdu": "config_PDU.htm",
"thresholds": "config_threshold.htm",
"users": "config_user.htm",
"network": "config_network.htm",
}
# api helper functions
def print_help(self):
""" Prints all available endpoints in a quick and dirty format.
"""
print(self.endpoints)
def _get_request(self, page, params=None):
"""Internal wrapper around requests get method and the pdus available endpoints.
Args:
page (str): endpoint / page that is requested
params (dict, optional): get parametrs to be send along with request. Used for updating settings.
Returns:
:obj:`requests.models.Response`: The raw object returned by the requests lib.
"""
url = urlunsplit([self.schema, self.host, page, None, None])
return requests.get(url, auth=self.auth, params=params)
def _post_request(self, page, data):
"""Internal wrapper around requests post method and the pdus available endpoints.
Args:
page (str): See: self._get_request()
data (dict): post data
"""
url = urlunsplit([self.schema, self.host, page, None, None])
headers = {'Content-type': 'application/x-www-form-urlencoded'}
return requests.post(url, auth=self.auth, data=data, headers=headers)
def _decode_response(self, resp):
"""simple helper to decode requests responses.
Args:
resp (:obj:`requests.models.Response`): The raw object returned by the requests lib.
Returns:
str: decoded string that was contained in the response from the api.
"""
return resp.content.decode(self.charset)
def _parse_resp_content(self, raw_resp_content):
"""simple wrapper around lxml that automatically uses the correct xml/html parser.
Args:
raw_resp_content (str): the decoded response from the api.
Returns:
:obj:`lxml.etree._Element`: searchable etree of the response string passed to the function.
"""
# dynamically select parser for html and xml response absed on the key word 'html' in the resp. content.
if 'html' in raw_resp_content.lower():
parser = et.HTML
else:
parser = et.XML
return parser(raw_resp_content)
def _api_request(self, page, params=None, data=None):
"""One strop shop helper for api requests. Hightes level wrapper which requests, decodes and parses in one step.
Args:
page (str): endpoint to be used
params (dict, optional): optional get parameters to be send along with the request.
data (dict, optional): will cause the api call to be performed as HTTP POST request with `data` as payload.
In this case `params` will be ignored.
Returns:
:obj:`lxml.etree._Element`: See: self._parse_resp_content
"""
if data:
resp = self._post_request(page, data=data)
else:
resp = self._get_request(page, params=params)
return self._parse_resp_content(self._decode_response(resp))
def _auth(self, creds):
"""Don't even bother... The PDU only requests a http auth on the / page.
All other pages/endpoints (including settings updates und file uploads)
are unprotected.
Args:
creds (:obj:`tuple` of :obj:`str`): (username, password).
Returns:
:obj:`requests.auth.HTTPBasicAuth`: requestes auth class.
"""
return requests.auth.HTTPBasicAuth(*creds)
def _extract_value(self, etree, xml_element_name):
"""simple weapper around lxml value extration.
Args:
etree (:obj:`lxml.etree._Element`): a lxml etree
xml_element_name (str): the name of the values coresponding element.
Returns:
str: the value belonging to `xml_element_name`
"""
return etree.find(xml_element_name).text
# public api
def status(self):
"""gives you basic status/health of the device.
Values: deg. C, outlet states [on/off], status [read: are there warnings?], humidity in perc, amps.
Returns:
dict: containing the aforementioned stats.
e.g. {'degree_celcius': '26', 'outlet_states': ['on', 'on', 'off', 'on', 'on', 'on', 'on', 'on'],
'stat': 'normal', 'humidity_percent': '27', 'current_amperes': '0.5'}
"""
endpoint = self.endpoints["status"]
e = self._api_request(endpoint)
return {
"current_amperes": self._extract_value(e, "cur0"),
"degree_celcius": self._extract_value(e, "tempBan"),
"humidity_percent": self._extract_value(e, "humBan"),
"stat": self._extract_value(e, "stat0"),
"outlet_states": [self._extract_value(e, "outletStat{}".format(i)) for i in range(0,8)]
}
def pdu_config(self, outlet_configs=None):
""" Getter/setter for outlet configs.
Allows you to name the outlets as well as set turn on/off delays
to prevent overloading or create boot/shutdown orders.
Args:
outlet_configs (dict, optional): if present the pdu config will be updates to fit the given dict. Format:
{'outlet1': {'outlet_name': 'outlet3', 'turn_on_delay': 3, 'turn_of_delay': 3},
'outlet2': ... }
Returns:
:obj:`dict` of :obj:`dict` or None: Keys: `turn_on_delay`, `turn_off_delay`, `name`
"""
if outlet_configs:
self._set_config_pdu(outlet_configs)
return self._get_config_pdu()
def _set_config_pdu(self, outlet_configs):
"""Setter for self.pdu_config()
Args:
outlet_configs (dict): dict that is formatted like the output of self._get_config_pdu()
"""
endpoint = self.endpoints['config_pdu']
translation_table = {'turn_on_delay': 'ondly', 'turn_off_delay': 'ofdly', 'name': 'otlt'}
settings = {}
for k, v in outlet_configs.items():
otl_nr = k.replace('outlet', '')
for _k, _v in v.items():
new_key = translation_table[_k] + otl_nr
settings[new_key] = _v
etree = self._api_request(endpoint, data=settings)
def _get_config_pdu(self):
"""Getter for self.pdu_config()
Returns:
:obj:`dict` of :obj:`dict`: e.g.
{
'outlet5': {'turn_on_delay': 9, 'turn_off_delay': 9, 'name': 'GINA'},
'outlet2': {'turn_on_delay': 6, 'turn_off_delay': 6, 'name': 'Steckdose2'},
'outlet7': {'turn_on_delay': 11, 'turn_off_delay': 11, 'name': 'Steckdose7'},
'outlet1': {'turn_on_delay': 5, 'turn_off_delay': 5, 'name': 'PACS'},
'outlet6': {'turn_on_delay': 10, 'turn_off_delay': 10, 'name': 'GINA Router'},
'outlet3': {'turn_on_delay': 7, 'turn_off_delay': 7, 'name': 'Steckdose3'},
'outlet8': {'turn_on_delay': 12, 'turn_off_delay': 12, 'name': 'UPC Modem'},
'outlet4': {'turn_on_delay': 8, 'turn_off_delay': 8, 'name': 'Steckdose4'}
}
"""
endpoint = self.endpoints['config_pdu']
etree = self._api_request(endpoint)
xpath_input_field_values = './/td/input/@value' # get the value of the value attribute in the input tag which is within a td tag
xpath_input_fields = './/tr[td/input/@value]' # get every tr tag which has at least one td tag which has at least one input tag with a value attribute
config = {}
for idx, outlet in enumerate(etree.xpath(xpath_input_fields)):
values = outlet.xpath(xpath_input_field_values)
config['outlet{}'.format(idx)] = {
'name': values[0],
'turn_on_delay': int(values[1]),
'turn_off_delay': int(values[2])
}
return config
def control_outlets(self, list_of_outlet_ids=None, state=None):
list_of_outlet_ids = list_of_outlet_ids or [i for i in range(0, 8)]
if state:
return self._set_outlet_states(self, list_of_outlet_ids, state)
return self._get_outlet_states(self, list_of_outlet_ids)
def _get_outlet_states(self, list_of_outlet_ids):
"""wrapper around self.status() returns only on/off for the given outlet_ids.
Args:
list_of_outlet_ids (:obj:`list` of `int`): the ids of the outlets you want see.
Returns:
:obj:`list` of `str`: e.g. ['on', 'off', 'off', 'off', 'on']
"""
status = self.status()
return list(status['outlet_states'][i] for i in list_of_outlet_ids)
def _set_outlet_states(self, list_of_outlet_ids, state):
"""A `list_of_outlet_ids` will be set to a given `state`.
Args:
list_of_outlet_ids (:obj:`list` of `int`): the ids of the outlets you want to change.
state (str): One of ['on', 'off', 'power_cycle_off_on']
Returns:
:obj:`lxml.etree._Element`: the api response
"""
endpoint = self.endpoints['outlet']
translation_table = {'on': 0, 'off': 1, 'power_cycle_off_on': 2}
outlet_states = {'outlet{}'.format(k):1 for k in list_of_outlet_ids}
outlet_states['op'] = translation_table[state]
outlet_states['submit'] = 'Anwenden'
return self._api_request(endpoint, params=outlet_states)
def enable_outlets(self, list_of_outlet_ids):
"""Wrapper around self._set_outlet_states() to enable all given outlets
Args:
list_of_outlet_ids (:obj:`list` of `int`): See: self._set_outlet_states()
Returns:
:obj:`lxml.etree._Element`: See: self._set_outlet_states()
"""
return self._set_outlet_states(list_of_outlet_ids, 'on')
def disable_outlets(self, list_of_outlet_ids):
"""Wrapper around self._set_outlet_states() to disable all given outlets
Args:
list_of_outlet_ids (:obj:`list` of `int`): See: self._set_outlet_states()
Returns:
:obj:`lxml.etree._Element`: See: self._set_outlet_states()
"""
return self._set_outlet_states(list_of_outlet_ids, 'off')
def power_cycle_outlets(self, list_of_outlet_ids):
"""Wrapper around self._set_outlet_states() to perform a power cycle on all given outlets
Args:
list_of_outlet_ids (:obj:`list` of `int`): See: self._set_outlet_states()
Returns:
:obj:`lxml.etree._Element`: See: self._set_outlet_states()
"""
return self._set_outlet_states(list_of_outlet_ids, 'power_cycle_off_on')
def outlet_names(self):
"""Simply get a list of outlet names
Returns:
list_of_outlet_ids (:obj:`tuple` of `str`): ('machine_name', 'human_name')
"""
config = self.pdu_config()
names = [(k, v['name']) for k,v in config.items()]
return sorted(names, key=lambda x: x[0])
def config_network(self):
raise NotImplementedError
def config_user(self):
raise NotImplementedError
def config_threshold(self):
raise NotImplementedError
def info_pdu(self):
raise NotImplementedError
def info_system(self):
# this really should be called control/config_system
raise NotImplementedError
``` |
{
"source": "01protocol/zo-sdk-py",
"score": 2
} |
#### File: zo-sdk-py/zo/config.py
```python
from dataclasses import dataclass
from solana.publickey import PublicKey
from .types import PerpType
@dataclass(kw_only=True)
class Config:
CLUSTER_URL: str
ZO_PROGRAM_ID: PublicKey
ZO_DEX_ID: PublicKey
ZO_STATE_ID: PublicKey
SERUM_DEX_ID: PublicKey
configs = {
"devnet": Config(
CLUSTER_URL="https://api.devnet.solana.com",
ZO_PROGRAM_ID=PublicKey("<KEY>"),
ZO_DEX_ID=PublicKey("<KEY>"),
ZO_STATE_ID=PublicKey("<KEY>"),
SERUM_DEX_ID=PublicKey("<KEY>"),
),
"mainnet": Config(
CLUSTER_URL="https://api.mainnet-beta.solana.com",
ZO_PROGRAM_ID=PublicKey("<KEY>"),
ZO_DEX_ID=PublicKey("<KEY>"),
ZO_STATE_ID=PublicKey("<KEY>"),
SERUM_DEX_ID=PublicKey("<KEY>"),
),
}
def taker_fee(t: PerpType, /) -> float:
if t == "future":
return 10 / 10000
if t == "calloption" or t == "putoption":
return 10 / 10000
if t == "square":
return 15 / 10000
raise LookupError(f"invalid perp type {t}")
```
#### File: zo-sdk-py/zo/util.py
```python
from typing import *
import math
from anchorpy import Program, Context
from solana.publickey import PublicKey
from solana.keypair import Keypair
from solana.rpc.commitment import Finalized
from solana.rpc.types import TxOpts
from solana.sysvar import SYSVAR_RENT_PUBKEY
import solana.system_program
CONTROL_ACCOUNT_SIZE = 8 + 4482
def decode_symbol(s) -> str:
s = s.data
i = s.index(0)
return bytes(s[:i]).decode("utf-8")
def decode_wrapped_i80f48(n) -> float:
return n.data / (2**48)
def div_to_float(a: int, b: int) -> float:
q, r = divmod(a, b)
gcd = math.gcd(r, b)
return float(q) + (r // gcd) / (b // gcd)
def big_to_small_amount(n: int | float, /, *, decimals: int) -> int:
shift = 10 ** abs(decimals)
if decimals >= 0:
integral = int(n) * shift
fractional = int((n % 1) * shift)
return integral + fractional
else:
return int(n) // shift
def small_to_big_amount(n: int | float, /, *, decimals: int):
return n / 10**decimals
def price_to_lots(
n: int | float,
/,
*,
base_decimals: int,
quote_decimals: int,
base_lot_size: int,
quote_lot_size: int,
) -> int:
return round(
float(n)
* base_lot_size
/ quote_lot_size
* 10 ** (quote_decimals - base_decimals)
)
def lots_to_price(
n: int,
/,
*,
base_decimals: int,
quote_decimals: int,
base_lot_size: int,
quote_lot_size: int,
) -> float:
n *= quote_lot_size * 10 ** (base_decimals - quote_decimals)
return div_to_float(n, base_lot_size)
def size_to_lots(n: float, /, *, decimals: int, lot_size: int) -> int:
return round(n * 10**decimals) // lot_size
def lots_to_size(n: int, /, *, decimals: int, lot_size: int) -> float:
return div_to_float(n * lot_size, 10**decimals)
def margin_pda(
*,
owner: PublicKey,
state: PublicKey,
program_id: PublicKey,
) -> Tuple[PublicKey, int]:
return PublicKey.find_program_address(
[
owner.__bytes__(),
state.__bytes__(),
bytes("marginv1", "utf-8"),
],
program_id,
)
def open_orders_pda(
*, control: PublicKey, dex_market: PublicKey, program_id: PublicKey
) -> Tuple[PublicKey, int]:
return PublicKey.find_program_address(
[control.__bytes__(), dex_market.__bytes__()], program_id
)
def state_signer_pda(
*,
state: PublicKey,
program_id: PublicKey,
) -> Tuple[PublicKey, int]:
return PublicKey.find_program_address(
[
state.__bytes__(),
],
program_id,
)
async def create_margin(
*, program: Program, state: PublicKey, key: PublicKey, nonce: int
) -> str:
control = Keypair()
control_lamports = (
await program.provider.connection.get_minimum_balance_for_rent_exemption(
CONTROL_ACCOUNT_SIZE
)
)["result"]
return await program.rpc["create_margin"](
nonce,
ctx=Context(
accounts={
"state": state,
"authority": program.provider.wallet.public_key,
"payer": program.provider.wallet.public_key,
"margin": key,
"control": control.public_key,
"rent": SYSVAR_RENT_PUBKEY,
"system_program": solana.system_program.SYS_PROGRAM_ID,
},
pre_instructions=[
solana.system_program.create_account(
solana.system_program.CreateAccountParams(
from_pubkey=program.provider.wallet.public_key,
new_account_pubkey=control.public_key,
lamports=control_lamports,
space=CONTROL_ACCOUNT_SIZE,
program_id=program.program_id,
)
)
],
signers=[control],
options=TxOpts(
max_retries=5,
preflight_commitment=Finalized,
skip_confirmation=False,
skip_preflight=False,
),
),
)
```
#### File: zo-sdk-py/zo/zo.py
```python
from typing import *
import asyncio
import os
import json
from datetime import datetime, timezone as tz
from anchorpy import Idl, Program, Provider, Context, Wallet
from anchorpy.error import AccountDoesNotExistError
from solana.publickey import PublicKey
from solana.keypair import Keypair
from solana.rpc.commitment import Commitment, Finalized
from solana.rpc.async_api import AsyncClient
from solana.rpc.types import TxOpts
from solana.sysvar import SYSVAR_RENT_PUBKEY
from solana.system_program import SYS_PROGRAM_ID
from spl.token.instructions import get_associated_token_address
from spl.token.constants import TOKEN_PROGRAM_ID
from . import util, types, config
from .config import configs, Config
from .types import (
Side,
OrderType,
CollateralInfo,
FundingInfo,
MarketInfo,
PositionInfo,
)
from .dex import Market, Orderbook, Order
T = TypeVar("T")
class ZoIndexer(Generic[T]):
def __init__(self, d: dict[str, T], m: Callable[[str | int | PublicKey], str]):
self.d = d
self.m = m
def __repr__(self):
return self.d.__repr__()
def __iter__(self):
return self.d.items().__iter__()
def __len__(self):
return len(self.d)
def __getitem__(self, i: str | int | PublicKey) -> T:
return self.d[self.m(i)]
class Zo:
__program: Program
__config: Config
__markets: dict[str, MarketInfo]
__collaterals: dict[str, CollateralInfo]
__orderbook: dict[str, Orderbook]
__balance: dict[str, float]
__position: dict[str, float]
__dex_markets: dict[str, Market]
__orders: dict[str, list[Order]]
__markets_map: dict[str | int, str]
__collaterals_map: dict[str | int, str]
_zo_state: Any
_zo_state_signer: PublicKey
_zo_cache: Any
_zo_margin: Any
_zo_margin_key: None | PublicKey
_zo_control: Any
def __init__(
self,
*,
_program,
_config,
_state,
_state_signer,
_margin,
_margin_key,
):
self.__program = _program
self.__config = _config
self._zo_state = _state
self._zo_state_signer = _state_signer
self._zo_margin = _margin
self._zo_margin_key = _margin_key
@classmethod
async def new(
cls,
*,
cluster: Literal["devnet", "mainnet"],
payer: Keypair | None = None,
url: str | None = None,
load_margin: bool = True,
create_margin: bool = True,
tx_opts: TxOpts = TxOpts(
max_retries=None,
preflight_commitment=Finalized,
skip_confirmation=False,
skip_preflight=False,
),
):
"""Create a new client instance.
Args:
cluster: Which cluster to connect to.
payer: The transaction payer and margin owner. Defaults to
the local transaction payer.
url: URL for the RPC endpoint.
load_margin: Whether to load the associated margin account.
If `False`, any transaction requiring a margin will fail.
create_margin: Whether to create the associated margin
account if it doesn't already exist.
tx_opts: The transaction options.
"""
if cluster not in configs.keys():
raise TypeError(f"`cluster` must be one of: {configs.keys()}")
config = configs[cluster]
if url is None:
url = config.CLUSTER_URL
idl_path = os.path.join(os.path.dirname(__file__), "idl.json")
with open(idl_path) as f:
raw_idl = json.load(f)
idl = Idl.from_json(raw_idl)
conn = AsyncClient(url)
wallet = Wallet(payer) if payer is not None else Wallet.local()
provider = Provider(conn, wallet, opts=tx_opts)
program = Program(idl, config.ZO_PROGRAM_ID, provider=provider)
state = await program.account["State"].fetch(config.ZO_STATE_ID)
state_signer, state_signer_nonce = util.state_signer_pda(
state=config.ZO_STATE_ID, program_id=config.ZO_PROGRAM_ID
)
if state.signer_nonce != state_signer_nonce:
raise ValueError(
f"Invalid state key ({config.ZO_STATE_ID}) for program id ({config.ZO_PROGRAM_ID})"
)
margin = None
margin_key = None
if load_margin:
margin_key, nonce = util.margin_pda(
owner=wallet.public_key,
state=config.ZO_STATE_ID,
program_id=config.ZO_PROGRAM_ID,
)
try:
margin = await program.account["Margin"].fetch(margin_key)
except AccountDoesNotExistError as e:
if not create_margin:
raise e
await util.create_margin(
program=program,
state=config.ZO_STATE_ID,
key=margin_key,
nonce=nonce,
)
margin = await program.account["Margin"].fetch(margin_key)
zo = cls(
_config=config,
_program=program,
_state=state,
_state_signer=state_signer,
_margin=margin,
_margin_key=margin_key,
)
await zo.refresh(commitment=Finalized)
return zo
@property
def program(self) -> Program:
return self.__program
@property
def provider(self) -> Provider:
return self.program.provider
@property
def connection(self) -> AsyncClient:
return self.provider.connection
@property
def wallet(self) -> Wallet:
return self.provider.wallet
def _collaterals_map(self, k: str | int | PublicKey) -> str:
if isinstance(k, PublicKey):
for i, c in enumerate(self._zo_state.collaterals):
if c.mint == k:
return self.__collaterals_map[i]
raise ValueError("")
else:
return self.__collaterals_map[k]
def _markets_map(self, k: str | int | PublicKey) -> str:
if isinstance(k, PublicKey):
for i, m in enumerate(self._zo_state.perp_markets):
if m.dex_market == k:
return self.__markets_map[i]
raise ValueError("")
else:
return self.__markets_map[k]
@property
def collaterals(self):
"""List of collaterals and their metadata."""
return ZoIndexer(self.__collaterals, lambda k: self._collaterals_map(k))
@property
def markets(self):
"""List of collaterals and markets metadata."""
return ZoIndexer(self.__markets, lambda k: self._markets_map(k))
@property
def orderbook(self):
"""Current state of the orderbook."""
return ZoIndexer(self.__orderbook, lambda k: self._markets_map(k))
@property
def balance(self):
"""Current account balance."""
return ZoIndexer(self.__balance, lambda k: self._collaterals_map(k))
@property
def position(self):
"""Current position."""
return ZoIndexer(self.__position, lambda k: self._markets_map(k))
@property
def orders(self):
"""Currently active orders."""
return ZoIndexer(self.__orders, lambda k: self._markets_map(k))
def _get_open_orders_info(self, key: int | str, /):
if isinstance(key, str):
for k, v in self.__markets_map.items():
if v == key and isinstance(k, int):
key = k
break
else:
ValueError("")
o = self._zo_control.open_orders_agg[key]
return o if o.key != PublicKey(0) else None
def __reload_collaterals(self):
map = {}
collaterals = {}
for i, c in enumerate(self._zo_state.collaterals):
if c.mint == PublicKey(0):
break
symbol = util.decode_symbol(c.oracle_symbol)
map[symbol] = symbol
map[i] = symbol
collaterals[symbol] = CollateralInfo(
mint=c.mint,
oracle_symbol=symbol,
decimals=c.decimals,
weight=c.weight,
liq_fee=c.liq_fee,
is_borrowable=c.is_borrowable,
optimal_util=c.optimal_util,
optimal_rate=c.optimal_rate,
max_rate=c.max_rate,
og_fee=c.og_fee,
is_swappable=c.is_swappable,
serum_open_orders=c.serum_open_orders,
max_deposit=c.max_deposit,
dust_threshold=c.dust_threshold,
vault=self._zo_state.vaults[i],
)
self.__collaterals_map = map
self.__collaterals = collaterals
def __reload_markets(self):
map = {}
markets = {}
for i, m in enumerate(self._zo_state.perp_markets):
if m.dex_market == PublicKey(0):
break
symbol = util.decode_symbol(m.symbol)
map[symbol] = symbol
map[i] = symbol
oracle = None
for o in reversed(self._zo_cache.oracles):
if util.decode_symbol(m.oracle_symbol) == util.decode_symbol(o.symbol):
oracle = o
break
else:
raise IndexError(f"oracle for market {symbol} not found")
mark = self._zo_cache.marks[i]
price_adj = 10 ** (m.asset_decimals - 6)
index_price = util.decode_wrapped_i80f48(oracle.price) * price_adj
mark_price = util.decode_wrapped_i80f48(mark.price) * price_adj
if types.perp_type_to_str(m.perp_type, program=self.program) == "square":
index_price = index_price**2 / m.strike
funding_sample_start = datetime.fromtimestamp(
mark.twap.last_sample_start_time, tz=tz.utc
)
cumul_avg = util.decode_wrapped_i80f48(mark.twap.cumul_avg)
daily_funding = cumul_avg / funding_sample_start.minute
funding_info = (
None
if abs(cumul_avg) == 0 or funding_sample_start.minute == 0
else FundingInfo(
daily=daily_funding,
hourly=daily_funding / 24,
apr=daily_funding * 100 * 365,
)
)
markets[symbol] = MarketInfo(
address=m.dex_market,
symbol=symbol,
oracle_symbol=util.decode_symbol(m.oracle_symbol),
perp_type=types.perp_type_to_str(m.perp_type, program=self.program),
base_decimals=m.asset_decimals,
base_lot_size=m.asset_lot_size,
quote_decimals=6,
quote_lot_size=m.quote_lot_size,
strike=m.strike,
base_imf=m.base_imf,
liq_fee=m.liq_fee,
index_price=index_price,
mark_price=mark_price,
funding_sample_start_time=funding_sample_start,
funding_info=funding_info,
)
self.__markets_map = map
self.__markets = markets
def __reload_balances(self):
if self._zo_margin is None:
return
balances = {}
for i, c in enumerate(self._zo_margin.collateral):
if i not in self.__collaterals_map:
break
decimals = self.collaterals[i].decimals
c = util.decode_wrapped_i80f48(c)
m = self._zo_cache.borrow_cache[i]
m = m.supply_multiplier if c >= 0 else m.borrow_multiplier
m = util.decode_wrapped_i80f48(m)
balances[self.__collaterals_map[i]] = util.small_to_big_amount(
c * m, decimals=decimals
)
self.__balance = balances
def __reload_positions(self):
if self._zo_margin is None:
return
positions = {}
for s, m in self.markets:
if (oo := self._get_open_orders_info(s)) is not None:
positions[s] = PositionInfo(
size=util.small_to_big_amount(
abs(oo.pos_size), decimals=m.base_decimals
),
value=util.small_to_big_amount(
abs(oo.native_pc_total), decimals=m.quote_decimals
),
realized_pnl=util.small_to_big_amount(
oo.realized_pnl, decimals=m.base_decimals
),
funding_index=util.small_to_big_amount(
oo.funding_index, decimals=m.quote_decimals
),
side="long" if oo.pos_size >= 0 else "short",
)
else:
positions[s] = PositionInfo(
size=0, value=0, realized_pnl=0, funding_index=1, side="long"
)
self.__position = positions
pass
async def __reload_dex_markets(self, *, commitment: None | Commitment = None):
ks = [
m.dex_market
for m in self._zo_state.perp_markets
if m.dex_market != PublicKey(0)
]
res: Any = await self.connection.get_multiple_accounts(
ks, encoding="base64", commitment=commitment
)
res = res["result"]["value"]
self.__dex_markets = {
self.__markets_map[i]: Market.from_base64(res[i]["data"][0])
for i in range(len(self.__markets))
}
async def __reload_orders(self, *, commitment: None | Commitment = None):
ks = []
for i in range(len(self.__markets)):
mkt = self.__dex_markets[self.__markets_map[i]]
ks.extend((mkt.bids, mkt.asks))
res: Any = await self.connection.get_multiple_accounts(
ks, encoding="base64", commitment=commitment
)
res = res["result"]["value"]
orders = self._zo_margin and {}
orderbook = {}
for i in range(len(self.__markets)):
mkt = self.__dex_markets[self.__markets_map[i]]
ob = mkt._decode_orderbook_from_base64(
res[2 * i]["data"][0], res[2 * i + 1]["data"][0]
)
orderbook[self.__markets_map[i]] = ob
if self._zo_margin is not None:
os = []
for slab in [ob.bids, ob.asks]:
for o in slab:
if o.control == self._zo_margin.control:
os.append(o)
orders[self.__markets_map[i]] = os
self.__orderbook = orderbook
self.__orders = orders
async def __refresh_margin(self, *, commitment: None | Commitment = None):
if self._zo_margin_key is not None:
self._zo_margin, self._zo_control = await asyncio.gather(
self.program.account["Margin"].fetch(self._zo_margin_key, commitment),
self.program.account["Control"].fetch(
self._zo_margin.control, commitment
),
)
async def refresh(self, *, commitment: Commitment = Finalized):
"""Refresh the loaded accounts to see updates."""
self._zo_state, self._zo_cache, _ = await asyncio.gather(
self.program.account["State"].fetch(self.__config.ZO_STATE_ID, commitment),
self.program.account["Cache"].fetch(self._zo_state.cache, commitment),
self.__refresh_margin(),
)
self.__reload_collaterals()
self.__reload_markets()
self.__reload_balances()
self.__reload_positions()
await self.__reload_dex_markets(commitment=commitment)
await self.__reload_orders(commitment=commitment)
async def deposit(
self,
amount: float,
/,
*,
mint: PublicKey,
repay_only: bool = False,
token_account: None | PublicKey = None,
) -> str:
"""Deposit collateral into the margin account.
Args:
amount: The amount to deposit, in big units (e.g.: 1.5 SOL, 0.5 BTC).
mint: Mint of the collateral being deposited.
repay_only: If true, will only deposit up to the amount borrowed.
token_account: The token account to deposit from, defaulting to
the associated token account.
Returns:
The transaction signature.
"""
if token_account is None:
token_account = get_associated_token_address(self.wallet.public_key, mint)
decimals = self.collaterals[mint].decimals
amount = util.big_to_small_amount(amount, decimals=decimals)
return await self.program.rpc["deposit"](
repay_only,
amount,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"token_account": token_account,
"vault": self.collaterals[mint].vault,
"token_program": TOKEN_PROGRAM_ID,
}
),
)
async def withdraw(
self,
amount: float,
/,
*,
mint: PublicKey,
allow_borrow: bool = False,
token_account: None | PublicKey = None,
) -> str:
"""Withdraw collateral from the margin account.
Args:
amount: The amount to withdraw, in big units (e.g.: 1.5 SOL, 0.5 BTC).
mint: The mint of the collateral.
allow_borrow: If true, will allow borrowing.
token_account: If false, will only be able to withdraw up to
the amount deposited. If false, amount parameter can be
set to an arbitrarily large number to ensure that all
deposits are fully withdrawn.
Returns:
The transaction signature.
"""
if token_account is None:
token_account = get_associated_token_address(self.wallet.public_key, mint)
decimals = self.collaterals[mint].decimals
amount = util.big_to_small_amount(amount, decimals=decimals)
return await self.program.rpc["withdraw"](
allow_borrow,
amount,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"token_account": token_account,
"vault": self.collaterals[mint].vault,
"token_program": TOKEN_PROGRAM_ID,
}
),
)
async def place_order(
self,
size: float,
price: float,
side: Side,
*,
symbol: str,
order_type: OrderType,
limit: int = 10,
client_id: int = 0,
) -> str:
"""Place an order on the orderbook.
Args:
size: The maximum amount of big base units to buy or sell.
price: The limit price in big quote units per big base
units, e.g. 50000 USD/SOL.
side: Whether to place a bid or an ask.
symbol: The market symbol, e.g. "BTC-PERP".
order_type: The order type.
limit: If this order is taking, the limit sets the number of
maker orders the fill will go through, until stopping and
posting. If running into compute unit issues, then set
this number lower.
client_id: Used to tag an order with a unique id, which can
be used to cancel this order through
cancelPerpOrderByClientId. For optimal use, make sure
all ids for every order is unique.
Returns:
The transaction signature.
"""
mkt = self.__dex_markets[symbol]
info = self.markets[symbol]
is_long = side == "bid"
price = util.price_to_lots(
price,
base_decimals=info.base_decimals,
quote_decimals=info.quote_decimals,
base_lot_size=info.base_lot_size,
quote_lot_size=info.quote_lot_size,
)
order_type_: Any = types.order_type_from_str(order_type, program=self.program)
taker_fee = config.taker_fee(info.perp_type)
fee_multiplier = 1 + taker_fee if is_long else 1 - taker_fee
base_qty = util.size_to_lots(
size, decimals=info.base_decimals, lot_size=info.base_lot_size
)
quote_qty = round(price * fee_multiplier * base_qty * info.quote_lot_size)
pre_ixs = []
oo_key = None
oo_info = self._get_open_orders_info(symbol)
if oo_info is not None:
oo_key = oo_info.key
else:
oo_key, _ = util.open_orders_pda(
control=self._zo_margin.control,
dex_market=info.address,
program_id=self.program.program_id,
)
pre_ixs = [
self.program.instruction["create_perp_open_orders"](
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"authority": self.wallet.public_key,
"payer": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"open_orders": oo_key,
"dex_market": info.address,
"dex_program": self.__config.ZO_DEX_ID,
"rent": SYSVAR_RENT_PUBKEY,
"system_program": SYS_PROGRAM_ID,
},
pre_instructions=pre_ixs,
)
)
]
return await self.program.rpc["place_perp_order"](
is_long,
price,
base_qty,
quote_qty,
order_type_,
limit,
client_id,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"open_orders": oo_key,
"dex_market": info.address,
"req_q": mkt.req_q,
"event_q": mkt.event_q,
"market_bids": mkt.bids,
"market_asks": mkt.asks,
"dex_program": self.__config.ZO_DEX_ID,
"rent": SYSVAR_RENT_PUBKEY,
}
),
)
async def __cancel_order(
self,
*,
symbol: str,
side: None | Side = None,
order_id: None | int = None,
client_id: None | int = None,
):
mkt = self.__dex_markets[symbol]
oo = self._get_open_orders_info(symbol)
if oo is None:
raise ValueError("open orders account is uninitialized")
return await self.program.rpc["cancel_perp_order"](
order_id,
side == "bid",
client_id,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"open_orders": oo.key,
"dex_market": mkt.own_address,
"market_bids": mkt.bids,
"market_asks": mkt.asks,
"event_q": mkt.event_q,
"dex_program": self.__config.ZO_DEX_ID,
}
),
)
async def cancel_order_by_order_id(
self, order_id: int, side: Side, *, symbol: str
) -> str:
"""Cancel an order on the orderbook using the `order_id`.
Args:
order_id: The order id of the order to cancel. To get the
order_id, see the `orders` field.
side: Whether the order is a bid or an ask.
symbol: The market symbol, e.g. "BTC-PERP".
Returns:
The transaction signature.
"""
return await self.__cancel_order(symbol=symbol, order_id=order_id, side=side)
async def cancel_order_by_client_id(self, client_id: int, *, symbol: str) -> str:
"""Cancel an order on the orderbook using the `client_id`.
Args:
client_id: The client id that was assigned to the order
when it was placed..
symbol: The market symbol, e.g. "BTC-PERP".
Returns:
The transaction signature.
"""
return await self.__cancel_order(symbol=symbol, client_id=client_id)
``` |
{
"source": "01remi/network_programs",
"score": 3
} |
#### File: network_programs/python/1_4_finding_service_name.py
```python
import socket
def find_service_name():
protocol_name='tcp'
for port in [80,25]:
print "Port: %s,Service name: %s "%(port,socket.getservbyport(port,protocol_name))
print "port: %s, Service name: %s"%(53,socket.getservbyport(53,'udp'))
if __name__=='__main__':
find_service_name()
```
#### File: network_programs/python/1_6_socket_timeout.py
```python
import socket
def set_socket_timeout():
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print "Default socket timeout: %s" %s.gettimeout()
s.settimeout(100)
print "Current socket timeout: %s" %s.gettimeout()
if __name__=='__main__':
set_socket_timeout()
``` |
{
"source": "01ritvik/dsa-Q",
"score": 4
} |
#### File: 01ritvik/dsa-Q/graph.py
```python
class Node():
def __init__(self,value):
self.value = value
self.adjecent_list = []
self.visited = False
class graph():
def BFS(self,node):
queue = []
queue.append(node)
node.visited = True
traversal = []
while queue:
actualnode = queue.pop(0)
traversal.append(actualnode.value)
for i in actualnode.adjecent_list:
if i.visited is False:
queue.append(i)
i.visited = True
return traversal
def DFS(self,node,trav):
node.visited = True
trav.append(node.value)
for element in node.adjecent_list:
if element.visited is False:
self.DFS(element, trav)
return trav
node1 = Node("A")
node2 = Node("B")
node3 = Node("C")
node4 = Node("D")
node5 = Node("E")
node6 = Node("F")
node7 = Node("G")
node1.adjecent_list.append(node2)
node1.adjecent_list.append(node3)
node1.adjecent_list.append(node4)
node2.adjecent_list.append(node5)
node2.adjecent_list.append(node6)
node4.adjecent_list.append(node7)
Graph = graph()
print(Graph.DFS(node1,[]))
```
#### File: 01ritvik/dsa-Q/invert_tree.py
```python
class treenode:
def __init__(self, val = 0, left = None, right = None):
self.val = val
self.left = left
self.right = right
class solution:
def inverttree(self,root):
if root is None:
return None
root.left , root.right = root.right, root.left
self.inverttree(root.left)
self.inverttree(root.right)
return root
```
#### File: 01ritvik/dsa-Q/linked list.py
```python
class node:
def __init__(self,val):
self.val = val
self.next = None
self.prev = None
class mylinkedlist:
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def get(self,index):
if index < 0 or index >= self.size:
return -1
cur = self.head
while index != 0:
cur = cur.next
index = index -1
return cur.val
def addathead(self,val):
new_node = node(val)
if self.head is None:
self.head = new_node
self.tail = new_node
else:
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
self.size = self.size + 1
def addatTail(self,val):
new_node = node(val)
if self.tail is None:
self.head = new_node
self.tail = new_node
else:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
self.size = self.size + 1
def addatIndex(self,index,val):
if index < 0 or index >= self.size:
return -1
elif index ==0:
self.addathead(val)
elif index == self.size:
self.addatTail(val)
else:
cur = self.head
while index-1 != 0:
cur = cur.next
index -= 1
new_node = node(val)
new_node.next = cur.next
cur.next.prev = new_node
cur.next = new_node
new_node.prev = cur
self.size = self.size +1
def deleteatIndex(self,index,val):
if index < 0 or index >= self.size:
return -1
elif index == 0:
cur = self.head.next
if cur:
cur.prev = None
self.head = self.head.next
self.size -= 1
if self.size == 0:
self.tail = None
elif index == self.size-1:
cur = self.tail.prev
if cur:
cur.next = None
self.tail = self.tail.prev
size -= 1
if self.size == 0:
self.head = None
else:
cur = self.head
while index-1 != 0:
cur = cur.next
index -= 1
cur.next = cur.next.next
cur.next.prev = cur
self.size -=1
```
#### File: 01ritvik/dsa-Q/nodes.py
```python
class node:
def __init__(self,value = None , next = None , prev = None):
self.value = value
self.next = next
self.prev = prev
class linkedlist:
def __init__(self):
self.head = None
LL = linkedlist()
n1 = node(3)
n2 = node(5)
n3 = node(7)
n4 = node(9)
LL.head = n1
n1.next = n2
n2.next = n3
n3.next = n4
n2.prev = n1
```
#### File: 01ritvik/dsa-Q/two_sum.py
```python
class solution:
def twosums(self, nums, target):
store = {}
for i in range (len(nums)):
if nums[i] in store:
return [store[nums[i]] , i]
else:
store[target-nums[i]] = i
obj = solution()
result = obj.twosums([2,7,11,15] , 9)
print(result) #leetcode question
``` |
{
"source": "01studio-lab/MicroPython_Examples",
"score": 3
} |
#### File: 4.拓展模块/2.舵机/servo.py
```python
from machine import Timer,PWM
import time
#PWM通过定时器配置,接到IO17引脚
tim = Timer(Timer.TIMER0, Timer.CHANNEL0, mode=Timer.MODE_PWM)
S1 = PWM(tim, freq=50, duty=0, pin=17)
'''
说明:舵机控制函数
功能:180度舵机:angle:-90至90 表示相应的角度
360连续旋转度舵机:angle:-90至90 旋转方向和速度值。
【duty】占空比值:0-100
'''
def Servo(servo,angle):
S1.duty((angle+90)/180*10+2.5)
while True:
#-90度
Servo(S1,-90)
time.sleep(1)
#-45度
Servo(S1,-45)
time.sleep(1)
#0度
Servo(S1,0)
time.sleep(1)
#45度
Servo(S1,45)
time.sleep(1)
#90度
Servo(S1,90)
time.sleep(1)
```
#### File: 4.WiFi模块/2.Socket通信/tcp_client.py
```python
import network, usocket,pyb
# WiFi信息
SSID='01Studio' # Network SSID
KEY='88888888' # Network key
#socket数据接收中断标志位
socket_node = 0
# Init wlan module and connect to network
print("Trying to connect... (may take a while)...")
wlan = network.WINC()
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
# We should have a valid IP now via DHCP
print(wlan.ifconfig())
#创建socket连接,连接成功后发送“Hello 01Studio!”给服务器。
client=usocket.socket()
addr=('192.168.1.116',10000) #服务器IP和端口
client.connect(addr)
client.send('Hello 01Studio!')
#开启定时器,周期100ms,重复执行socket通信接收任务
def fun(tim):
global socket_node
socket_node = 1
pyb.LED(3).toggle()
tim = pyb.Timer(4,freq=10)
tim.callback(fun)
while True:
if socket_node:
text=client.recv(128) #单次最多接收128字节
if text == '':
pass
else: #打印接收到的信息为字节,可以通过decode('utf-8')转成字符串
print(text)
client.send('I got:'+text.decode('utf-8'))
socket_node=0
```
#### File: 3.MQTT通信/2.订阅者(subscribe)/main.py
```python
import time, network
from mqtt import MQTTClient
SSID='01Studio' # Network SSID
KEY='88888888' # Network key
# Init wlan module and connect to network
print("Trying to connect... (may take a while)...")
wlan = network.WINC()
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
# We should have a valid IP now via DHCP
print(wlan.ifconfig())
#设置MQTT回调函数,有信息时候执行
def MQTT_callback(topic, msg):
print('topic: {}'.format(topic))
print('msg: {}'.format(msg))
SERVER = 'mqtt.p2hp.com'
PORT = 1883
CLIENT_ID = '01Studio-OpenMV' # 客户端ID
TOPIC = '/public/01Studio/1' # TOPIC名称
client = MQTTClient(CLIENT_ID, SERVER, PORT)
client.set_callback(MQTT_callback) #配置回调函数
client.connect()
client.subscribe(TOPIC) #订阅主题
while (True):
client.check_msg() #检测是否收到信息,收到则执行打印。
time.sleep(300) #设置接收间隔
```
#### File: lib/adafruit_onewire/bus.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_OneWire.git"
import busio
from micropython import const
_SEARCH_ROM = const(0xF0)
_MATCH_ROM = const(0x55)
_SKIP_ROM = const(0xCC)
_MAX_DEV = const(10)
class OneWireError(Exception):
"""A class to represent a 1-Wire exception."""
class OneWireAddress:
"""A class to represent a 1-Wire address."""
def __init__(self, rom):
self._rom = rom
@property
def rom(self):
"""The unique 64 bit ROM code."""
return self._rom
@property
def crc(self):
"""The 8 bit CRC."""
return self._rom[7]
@property
def serial_number(self):
"""The 48 bit serial number."""
return self._rom[1:7]
@property
def family_code(self):
"""The 8 bit family code."""
return self._rom[0]
class OneWireBus:
"""A class to represent a 1-Wire bus."""
def __init__(self, pin):
# pylint: disable=no-member
self._ow = busio.OneWire(pin)
self._readbit = self._ow.read_bit
self._writebit = self._ow.write_bit
self._maximum_devices = _MAX_DEV
@property
def maximum_devices(self):
"""The maximum number of devices the bus will scan for. Valid range is 1 to 255.
It is an error to have more devices on the bus than this number. Having less is OK.
"""
return self._maximum_devices
@maximum_devices.setter
def maximum_devices(self, count):
if not isinstance(count, int):
raise ValueError("Maximum must be an integer value 1 - 255.")
if count < 1 or count > 0xFF:
raise ValueError("Maximum must be an integer value 1 - 255.")
self._maximum_devices = count
def reset(self, required=False):
"""
Perform a reset and check for presence pulse.
:param bool required: require presence pulse
"""
reset = self._ow.reset()
if required and reset:
raise OneWireError("No presence pulse found. Check devices and wiring.")
return not reset
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
if end is None:
end = len(buf)
for i in range(start, end):
buf[i] = self._readbyte()
def write(self, buf, *, start=0, end=None):
"""
Write the bytes from ``buf`` to the device.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buf: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
"""
if end is None:
end = len(buf)
for i in range(start, end):
self._writebyte(buf[i])
def scan(self):
"""Scan for devices on the bus and return a list of addresses."""
devices = []
diff = 65
rom = False
count = 0
for _ in range(0xFF):
rom, diff = self._search_rom(rom, diff)
if rom:
count += 1
if count > self.maximum_devices:
raise RuntimeError(
"Maximum device count of {} exceeded.".format(
self.maximum_devices
)
)
devices.append(OneWireAddress(rom))
if diff == 0:
break
return devices
def _readbyte(self):
val = 0
for i in range(8):
val |= self._ow.read_bit() << i
return val
def _writebyte(self, value):
for i in range(8):
bit = (value >> i) & 0x1
self._ow.write_bit(bit)
def _search_rom(self, l_rom, diff):
if not self.reset():
return None, 0
self._writebyte(_SEARCH_ROM)
if not l_rom:
l_rom = bytearray(8)
rom = bytearray(8)
next_diff = 0
i = 64
for byte in range(8):
r_b = 0
for bit in range(8):
b = self._readbit()
if self._readbit():
if b: # there are no devices or there is an error on the bus
return None, 0
else:
if not b: # collision, two devices with different bit meaning
if diff > i or ((l_rom[byte] & (1 << bit)) and diff != i):
b = 1
next_diff = i
self._writebit(b)
r_b |= b << bit
i -= 1
rom[byte] = r_b
return rom, next_diff
@staticmethod
def crc8(data):
"""
Perform the 1-Wire CRC check on the provided data.
:param bytearray data: 8 byte array representing 64 bit ROM code
"""
crc = 0
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x01:
crc = (crc >> 1) ^ 0x8C
else:
crc >>= 1
crc &= 0xFF
return crc
```
#### File: adafruit_ble/advertising/__init__.py
```python
import struct
def to_hex(seq):
"""Pretty prints a byte sequence as hex values."""
return " ".join("{:02x}".format(v) for v in seq)
def to_bytes_literal(seq):
"""Prints a byte sequence as a Python bytes literal that only uses hex encoding."""
return 'b"' + "".join("\\x{:02x}".format(v) for v in seq) + '"'
def decode_data(data, *, key_encoding="B"):
"""Helper which decodes length encoded structures into a dictionary with the given key
encoding."""
i = 0
data_dict = {}
key_size = struct.calcsize(key_encoding)
while i < len(data):
item_length = data[i]
i += 1
if item_length == 0:
break
key = struct.unpack_from(key_encoding, data, i)[0]
value = data[i + key_size : i + item_length]
if key in data_dict:
if not isinstance(data_dict[key], list):
data_dict[key] = [data_dict[key]]
data_dict[key].append(value)
else:
data_dict[key] = value
i += item_length
return data_dict
def compute_length(data_dict, *, key_encoding="B"):
"""Computes the length of the encoded data dictionary."""
value_size = 0
for value in data_dict.values():
if isinstance(value, list):
for subv in value:
value_size += len(subv)
else:
value_size += len(value)
return len(data_dict) + len(data_dict) * struct.calcsize(key_encoding) + value_size
def encode_data(data_dict, *, key_encoding="B"):
"""Helper which encodes dictionaries into length encoded structures with the given key
encoding."""
length = compute_length(data_dict, key_encoding=key_encoding)
data = bytearray(length)
key_size = struct.calcsize(key_encoding)
i = 0
for key, value in data_dict.items():
if isinstance(value, list):
value = b"".join(value)
item_length = key_size + len(value)
struct.pack_into("B", data, i, item_length)
struct.pack_into(key_encoding, data, i + 1, key)
data[i + 1 + key_size : i + 1 + item_length] = bytes(value)
i += 1 + item_length
return data
class AdvertisingDataField:
"""Top level class for any descriptor classes that live in Advertisement or its subclasses."""
# pylint: disable=too-few-public-methods,unnecessary-pass
pass
class AdvertisingFlag:
"""A single bit flag within an AdvertisingFlags object."""
def __init__(self, bit_position):
self._bitmask = 1 << bit_position
def __get__(self, obj, cls):
return (obj.flags & self._bitmask) != 0
def __set__(self, obj, value):
if value:
obj.flags |= self._bitmask
else:
obj.flags &= ~self._bitmask
class AdvertisingFlags(AdvertisingDataField):
"""Standard advertising flags"""
limited_discovery = AdvertisingFlag(0)
"""Discoverable only for a limited time period."""
general_discovery = AdvertisingFlag(1)
"""Will advertise until discovered."""
le_only = AdvertisingFlag(2)
"""BR/EDR not supported."""
# BR/EDR flags not included here, since we don't support BR/EDR.
def __init__(self, advertisement, advertising_data_type):
self._advertisement = advertisement
self._adt = advertising_data_type
self.flags = 0
if self._adt in self._advertisement.data_dict:
self.flags = self._advertisement.data_dict[self._adt][0]
def __len__(self):
return 1
def __bytes__(self):
return bytes([self.flags])
def __str__(self):
parts = []
for attr in dir(self.__class__):
attribute_instance = getattr(self.__class__, attr)
if issubclass(attribute_instance.__class__, AdvertisingFlag):
if getattr(self, attr):
parts.append(attr)
return "<AdvertisingFlags {} >".format(" ".join(parts))
class String(AdvertisingDataField):
"""UTF-8 encoded string in an Advertisement.
Not null terminated once encoded because length is always transmitted."""
def __init__(self, *, advertising_data_type):
self._adt = advertising_data_type
def __get__(self, obj, cls):
if obj is None:
return self
if self._adt not in obj.data_dict:
return None
return str(obj.data_dict[self._adt], "utf-8")
def __set__(self, obj, value):
obj.data_dict[self._adt] = value.encode("utf-8")
class Struct(AdvertisingDataField):
"""`struct` encoded data in an Advertisement."""
def __init__(self, struct_format, *, advertising_data_type):
self._format = struct_format
self._adt = advertising_data_type
def __get__(self, obj, cls):
if obj is None:
return self
if self._adt not in obj.data_dict:
return None
return struct.unpack(self._format, obj.data_dict[self._adt])[0]
def __set__(self, obj, value):
obj.data_dict[self._adt] = struct.pack(self._format, value)
class LazyObjectField(AdvertisingDataField):
"""Non-data descriptor useful for lazily binding a complex object to an advertisement object."""
def __init__(self, cls, attribute_name, *, advertising_data_type, **kwargs):
self._cls = cls
self._attribute_name = attribute_name
self._adt = advertising_data_type
self._kwargs = kwargs
def __get__(self, obj, cls):
if obj is None:
return self
# Return None if our object is immutable and the data is not present.
if not obj.mutable and self._adt not in obj.data_dict:
return None
# Instantiate the object.
bound_obj = self._cls(obj, advertising_data_type=self._adt, **self._kwargs)
setattr(obj, self._attribute_name, bound_obj)
obj.data_dict[self._adt] = bound_obj
return bound_obj
@property
def advertising_data_type(self):
"""Return the data type value used to indicate this field."""
return self._adt
# TODO: Add __set_name__ support to CircuitPython so that we automatically tell the descriptor
# instance the attribute name it has and the class it is on.
class Advertisement:
"""Core Advertisement type"""
prefix = b"\x00" # This is an empty prefix and will match everything.
flags = LazyObjectField(AdvertisingFlags, "flags", advertising_data_type=0x01)
short_name = String(advertising_data_type=0x08)
"""Short local device name (shortened to fit)."""
complete_name = String(advertising_data_type=0x09)
"""Complete local device name."""
tx_power = Struct("<b", advertising_data_type=0x0A)
"""Transmit power level"""
# DEVICE_ID = 0x10
# """Device identifier."""
# SLAVE_CONN_INTERVAL_RANGE = 0x12
# """Slave connection interval range."""
# PUBLIC_TARGET_ADDRESS = 0x17
# """Public target address."""
# RANDOM_TARGET_ADDRESS = 0x18
# """Random target address (chosen randomly)."""
# APPEARANCE = 0x19
appearance = Struct("<H", advertising_data_type=0x19)
"""Appearance."""
# DEVICE_ADDRESS = 0x1B
# """LE Bluetooth device address."""
# ROLE = 0x1C
# """LE Role."""
#
# MAX_LEGACY_DATA_SIZE = 31
# """Data size in a regular BLE packet."""
def __init__(self):
"""Create an advertising packet."""
self.data_dict = {}
self.address = None
self._rssi = None
self.connectable = False
self.mutable = True
self.scan_response = False
@classmethod
def from_entry(cls, entry):
"""Create an Advertisement based on the given ScanEntry. This is done automatically by
`BLERadio` for all scan results."""
self = cls()
self.data_dict = decode_data(entry.advertisement_bytes)
self.address = entry.address
self._rssi = entry.rssi # pylint: disable=protected-access
self.connectable = entry.connectable
self.scan_response = entry.scan_response
self.mutable = False
return self
@property
def rssi(self):
"""Signal strength of the scanned advertisement. Only available on Advertisements returned
from `BLERadio.start_scan()`. (read-only)"""
return self._rssi
@classmethod
def matches(cls, entry):
"""Returns true if the given `_bleio.ScanEntry` matches all portions of the Advertisement
type's prefix."""
if not hasattr(cls, "prefix"):
return True
return entry.matches(cls.prefix)
def __bytes__(self):
"""The raw packet bytes."""
return encode_data(self.data_dict)
def __str__(self):
parts = []
for attr in dir(self.__class__):
attribute_instance = getattr(self.__class__, attr)
if issubclass(attribute_instance.__class__, AdvertisingDataField):
if (
issubclass(attribute_instance.__class__, LazyObjectField)
and not attribute_instance.advertising_data_type in self.data_dict
):
# Skip uninstantiated lazy objects; if we get
# their value, they will be be instantiated.
continue
value = getattr(self, attr)
if value is not None:
parts.append("{}={}".format(attr, str(value)))
return "<{} {} >".format(self.__class__.__name__, " ".join(parts))
def __len__(self):
return compute_length(self.data_dict)
def __repr__(self):
return "Advertisement(data={})".format(
to_bytes_literal(encode_data(self.data_dict))
)
```
#### File: 3.LCD液晶屏显示/2.4_ILI9341/colors.py
```python
BLACK = (0, 0, 0 ) # 0, 0, 0
NAVY = (0, 0, 15) # 0, 0, 128
DARKGREEN = (0, 31, 0 ) # 0, 128, 0
DARKCYAN = (0, 31, 15) # 0, 128, 128
MAROON = (15, 0, 0 ) # 128, 0, 0
PURPLE = (15, 0, 15) # 128, 0, 128
OLIVE = (15, 31, 0 ) # 128, 128, 0
LIGHTGREY = (23, 47, 23) # 192, 192, 192
DARKGREY = (15, 31, 15) # 128, 128, 128
BLUE = (0, 0, 31) # 0, 0, 255
GREEN = (0, 63, 0 ) # 0, 255, 0
CYAN = (0, 63, 31) # 0, 255, 255
RED = (31, 0, 0 ) # 255, 0, 0
MAGENTA = (31, 0, 31) # 255, 0, 255
YELLOW = (31, 63, 0 ) # 255, 255, 0
WHITE = (31, 63, 31) # 255, 255, 255
ORANGE = (31, 39, 0 ) # 255, 165, 0
GREENYELLOW = (18, 63, 4 ) # 173, 255, 47
def rgbTo565(r,g,b):
""" Transform a RGB888 color color to RGB565 color tuple. """
return (r//8, g//4, b//8)
```
#### File: 3.LCD液晶屏显示/2.4_ILI9341/lcd.py
```python
import os
import struct
import math
import array
import pyb, micropython
from pyb import SPI, Pin
from decorators import dimensions
from registers import regs
from colors import *
micropython.alloc_emergency_exception_buf(100)
imgcachedir = 'images/cache'
if 'cache' not in os.listdir('images'):
try:
os.mkdir(imgcachedir)
except OSError: pass
rate = 42000000
class ILI:
_cnt = 0
_regs = dict()
_spi = object()
_rst = object()
_csx = object()
_dcx = object()
_portrait = True
_tftwidth = 240 # TFT width Constant
_tftheight = 320 # TFT height Constant
_curwidth = 240 # Current TFT width
_curheight = 320 # Current TFT height
def __init__(self, rstPin='Y4', csxPin='Y5', dcxPin='Y3', port=2, rate=rate,
chip='ILI9341', portrait=True):
if ILI._cnt == 0:
ILI._regs = regs[chip]
ILI._spi = SPI(port, SPI.MASTER, baudrate=rate, polarity=1, phase=1)
ILI._rst = Pin(rstPin, Pin.OUT_PP) # Reset Pin
ILI._csx = Pin(csxPin, Pin.OUT_PP) # CSX Pin
ILI._dcx = Pin(dcxPin, Pin.OUT_PP) # D/Cx Pin
self.reset()
self._initILI()
self.setPortrait(portrait)
ILI._cnt += 1
def reset(self):
ILI._rst.low() #
pyb.delay(1) # RESET LCD SCREEN
ILI._rst.high() #
def setPortrait(self, portrait):
if ILI._portrait != portrait:
ILI._portrait = portrait
self._setWH()
def _setWH(self):
if ILI._portrait:
ILI._curheight = self.TFTHEIGHT = ILI._tftheight
ILI._curwidth = self.TFTWIDTH = ILI._tftwidth
else:
ILI._curheight = self.TFTHEIGHT = ILI._tftwidth
ILI._curwidth = self.TFTWIDTH = ILI._tftheight
self._graph_orientation()
def _initILI(self):
self._write_cmd(ILI._regs['LCDOFF']) # Display OFF
pyb.delay(10)
self._write_cmd(ILI._regs['SWRESET']) # Reset SW
pyb.delay(50)
self._graph_orientation()
self._write_cmd(ILI._regs['PTLON']) # Partial mode ON
self._write_cmd(ILI._regs['PIXFMT']) # Pixel format set
#self._write_data(0x66) # 18-bit/pixel
self._write_data(0x55) # 16-bit/pixel
self._write_cmd(ILI._regs['GAMMASET'])
self._write_data(0x01)
self._write_cmd(ILI._regs['ETMOD']) # Entry mode set
self._write_data(0x07)
self._write_cmd(ILI._regs['SLPOUT']) # sleep mode OFF
pyb.delay(10)
self._write_cmd(ILI._regs['LCDON'])
pyb.delay(10)
self._write_cmd(ILI._regs['RAMWR'])
def _write(self, word, dc, recv, recvsize=2):
dcs = ['cmd', 'data']
DCX = dcs.index(dc) if dc in dcs else None
ILI._csx.low()
ILI._dcx.value(DCX)
if recv:
fmt = '>B{0}'.format('B' * recvsize)
recv = bytearray(1+recvsize)
data = self.spi.send_recv(struct.pack(fmt, word), recv=recv)
ILI._csx.high()
return data
ILI._spi.send(word)
ILI._csx.high()
def _write_cmd(self, word, recv=None):
data = self._write(word, 'cmd', recv)
return data
def _write_data(self, word):
self._write(word, 'data', recv=None)
def _write_words(self, words):
wordL = len(words)
wordL = wordL if wordL > 1 else ""
fmt = '>{0}B'.format(wordL)
words = struct.pack(fmt, *words)
self._write_data(words)
def _graph_orientation(self):
self._write_cmd(ILI._regs['MADCTL']) # Memory Access Control
# Portrait:
# | MY=0 | MX=1 | MV=0 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
# OR Landscape:
# | MY=0 | MX=0 | MV=1 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
data = 0x48 if ILI._portrait else 0x28
self._write_data(data)
def _char_orientation(self):
self._write_cmd(ILI._regs['MADCTL']) # Memory Access Control
# Portrait:
# | MY=1 | MX=1 | MV=1 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
# OR Landscape:
# | MY=0 | MX=1 | MV=1 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
data = 0xE8 if ILI._portrait else 0x58
self._write_data(data)
def _image_orientation(self):
self._write_cmd(ILI._regs['MADCTL']) # Memory Access Control
# Portrait:
# | MY=0 | MX=1 | MV=0 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
# OR Landscape:
# | MY=0 | MX=1 | MV=0 | ML=1 | BGR=1 | MH=0 | 0 | 0 |
data = 0xC8 if ILI._portrait else 0x68
self._write_data(data)
def _set_window(self, x0, y0, x1, y1):
# Column Address Set
self._write_cmd(ILI._regs['CASET'])
self._write_words(((x0>>8) & 0xFF, x0 & 0xFF, (y0>>8) & 0xFF, y0 & 0xFF))
# Page Address Set
self._write_cmd(ILI._regs['PASET'])
self._write_words(((x1>>8) & 0xFF, x1 & 0xFF, (y1>>8) & 0xFF, y1 & 0xFF))
# Memory Write
self._write_cmd(ILI._regs['RAMWR'])
def _get_Npix_monoword(self, color):
if color == WHITE:
word = 0xFFFF
elif color == BLACK:
word = 0
else:
R, G, B = color
word = (R<<11) | (G<<5) | B
word = struct.pack('>H', word)
return word
class BaseDraw(ILI):
def __init__(self, **kwargs):
super(BaseDraw, self).__init__(**kwargs)
def _set_ortho_line(self, width, length, color):
pixels = width * length
word = self._get_Npix_monoword(color) * pixels
self._write_data(word)
def drawPixel(self, x, y, color, pixels=4):
if pixels not in [1, 4]:
raise ValueError("Pixels count must be 1 or 4")
self._set_window(x, x+1, y, y+1)
self._write_data(self._get_Npix_monoword(color) * pixels)
def drawVline(self, x, y, length, color, width=1):
if length > self.TFTHEIGHT: length = self.TFTHEIGHT
if width > 10: width = 10
self._set_window(x, x+(width-1), y, y+length)
self._set_ortho_line(width, length, color)
def drawHline(self, x, y, length, color, width=1):
if length > self.TFTWIDTH: length = self.TFTWIDTH
if width > 10: width = 10
self._set_window(x, x+length, y, y+(width-1))
self._set_ortho_line(width, length, color)
# Method writed by MCHobby https://github.com/mchobby
# TODO:
# 1. support border > 1
def drawLine(self, x, y, x1, y1, color):
if x==x1:
self.drawVline( x, y if y<=y1 else y1, abs(y1-y), color )
elif y==y1:
self.drawHline( x if x<=x1 else x1, y, abs(x-x1), color )
else:
# keep positive range for x
if x1 < x:
x,x1 = x1,x
y,y1 = y1,y
r = (y1-y)/(x1-x)
# select ratio > 1 for fast drawing (and thin line)
if abs(r) >= 1:
for i in range( x1-x+1 ):
if (i==0): # first always a point
self.drawPixel( x+i, math.trunc(y+(r*i)), color )
else:
# r may be negative when drawing to wrong way > Fix it when drawing
self.drawVline( x+i, math.trunc(y+(r*i)-r)+(0 if r>0 else math.trunc(r)), abs(math.trunc(r)), color )
else:
# keep positive range for y
if y1 < y:
x,x1 = x1,x
y,y1 = y1,y
# invert the ratio (should be close of r = 1/r)
r = (x1-x)/(y1-y)
for i in range( y1-y+1 ):
if( i== 0): # starting point is always a point
self.drawPixel( math.trunc(x+(r*i)), y+i, color )
else:
# r may be negative when drawing the wrong way > fix it to draw positive
self.drawHline( math.trunc(x+(r*i)-r)+(0 if r>0 else math.trunc(r)), y+i, abs(math.trunc(r)), color )
def drawRect(self, x, y, width, height, color, border=1, fillcolor=None):
border = 10 if border > 10 else border
if width > self.TFTWIDTH: width = self.TFTWIDTH
if height > self.TFTHEIGHT: height = self.TFTHEIGHT
if border:
if border > width//2:
border = width//2-1
X, Y = x, y
for i in range(2):
Y = y+height-(border-1) if i == 1 else y
self.drawHline(X, Y, width, color, border)
if border > 1:
Y = y+1
H = height
else:
Y = y
H = height + 1
X = x+width-(border-1) if i == 1 else x
self.drawVline(X, Y, H, color, border)
else:
fillcolor = color
if fillcolor:
xsum = x+border
ysum = y+border
dborder = border*2
self._set_window(xsum, xsum+width-dborder, ysum, ysum+height-dborder)
pixels = width * 8
word = self._get_Npix_monoword(fillcolor) * pixels
part = 1 if height < 20 else 7
i=0
while i < (height//part):
self._write_data(word)
i+=1
def fillMonocolor(self, color, margin=0):
margin = 80 if margin > 80 else margin
width = self.TFTWIDTH-margin*2
height = self.TFTHEIGHT-margin*2
self.drawRect(margin, margin, width, height, color, border=0)
def _get_x_perimeter_point(self, x, degrees, radius):
sin = math.sin(math.radians(degrees))
x = int(x+(radius*sin))
return x
def _get_y_perimeter_point(self, y, degrees, radius):
cos = math.cos(math.radians(degrees))
y = int(y-(radius*cos))
return y
def drawCircleFilled(self, x, y, radius, color):
tempY = 0
for i in range(180):
xNeg = self._get_x_perimeter_point(x, 360-i, radius-1)
xPos = self._get_x_perimeter_point(x, i, radius)
if i > 89:
Y = self._get_y_perimeter_point(y, i, radius-1)
else:
Y = self._get_y_perimeter_point(y, i, radius+1)
if i == 90: xPos = xPos-1
if tempY != Y and tempY > 0:
length = xPos+1
self.drawHline(xNeg, Y, length-xNeg, color, width=4)
tempY = Y
def drawCircle(self, x, y, radius, color, border=1, degrees=360, startangle=0):
border = 5 if border > 5 else border
# adding startangle to degrees
if startangle > 0:
degrees += startangle
if border > 1:
x = x - border//2
y = y - border//2
radius = radius-border//2
for i in range(startangle, degrees):
X = self._get_x_perimeter_point(x, i, radius)
Y = self._get_y_perimeter_point(y, i, radius)
if i == 90: X = X-1
elif i == 180: Y = Y-1
self.drawRect(X, Y, border, border, color, border=0)
def drawOvalFilled(self, x, y, xradius, yradius, color):
tempY = 0
for i in range(180):
xNeg = self._get_x_perimeter_point(x, 360-i, xradius)
xPos = self._get_x_perimeter_point(x, i, xradius)
Y = self._get_y_perimeter_point(y, i, yradius)
if i > 89: Y = Y-1
if tempY != Y and tempY > 0:
length = xPos+1
self.drawHline(xNeg, Y, length-xNeg, color, width=4)
tempY = Y
class BaseChars(ILI, BaseDraw):
def __init__(self, color=BLACK, font=None, bgcolor=WHITE, scale=1,
bctimes=7, **kwargs):
super(BaseChars, self).__init__(**kwargs)
self._fontColor = color
if font:
self._font = font
else:
raise ValueError("""Font not defined. Define font using argument:
lcd.initCh(font=fontname, **kwargs)""")
self._bgcolor = bgcolor
self._fontscale = scale
self._bctimes = bctimes # blink carriage times
def initCh(self, **kwargs):
ch = BaseChars(portrait=ILI._portrait, **kwargs)
return ch
@staticmethod
@micropython.asm_thumb
def _asm_get_charpos(r0, r1, r2):
mul(r0, r1)
adc(r0, r2)
def _set_word_length(self, word):
return bin(word)[3:]
def _fill_bicolor(self, data, x, y, width, height, scale=None):
if not scale:
scale = self._fontscale
bgcolor = self._bgcolor
color = self._fontColor
self._set_window(x, x+(height*scale)-1, y, y+(width*scale)-1)
bgpixel = self._get_Npix_monoword(bgcolor) * scale
pixel = self._get_Npix_monoword(color) * scale
words = ''.join(map(self._set_word_length, data))
words = bytes(words, 'ascii').replace(b'0', bgpixel).replace(b'1', pixel)
self._write_data(words)
def printChar(self, char, x, y, cont=False, scale=None):
if not scale:
scale = self._fontscale
font = self._font
scale = 3 if scale > 3 else scale
index = ord(char)
chrwidth = len(font[index])
height = font['height']
data = font[index]
X = self.TFTHEIGHT - y - (height*scale)+scale
Y = x
self._char_orientation()
self._fill_bicolor(data, X, Y, chrwidth, height, scale=scale)
if not cont:
self._graph_orientation()
def printLn(self, string, x, y, bc=False, scale=None):
if not scale:
scale = self._fontscale
font = self._font
X, Y = x, y
scale = 3 if scale > 3 else scale
for word in string.split(' '):
lnword = len(word)
if (x + lnword*7*scale) >= (self.TFTWIDTH-10):
x = X
y += (font['height']+2)*scale
for i in range(lnword):
chpos = scale-(scale//2)
chrwidth = len(font[ord(word[i])])
cont = False if i == len(word)-1 else True
self.printChar(word[i], x, y, cont=cont, scale=scale)
if chrwidth == 1:
chpos = scale+1 if scale > 2 else scale-1
x += self._asm_get_charpos(chrwidth, chpos, 3)
x += self._asm_get_charpos(len(font[32]), chpos, 3)
if bc: # blink carriage
if (x + 2 * scale) >= (self.TFTWIDTH - 10):
x = X
y += (font['height']+2) * scale
else:
x -= 4 * scale//2
self._blinkCarriage(x, y, scale=scale)
# Blinking rectangular carriage on the end of line
def _blinkCarriage(self, x, y, scale=None):
if not scale:
scale = self._fontscale
font = self._font
bgcolor = self._bgcolor
color = self._fontColor
times = self._bctimes
height = font['height'] * scale
width = 2 * scale
i = 0
while i != times:
self.drawVline(x, y, height, color, width=width)
pyb.delay(500)
self.drawVline(x, y, height, bgcolor, width=width)
pyb.delay(500)
i+=1
class BaseImages(ILI):
def __init__(self, **kwargs):
super(BaseImages, self).__init__(**kwargs)
# solution from forum.micropython.org
# Need to be understandet
@staticmethod
@micropython.asm_thumb
def _reverse(r0, r1): # bytearray, len(bytearray)
b(loopend)
label(loopstart)
ldrb(r2, [r0, 0])
ldrb(r3, [r0, 1])
strb(r3, [r0, 0])
strb(r2, [r0, 1])
add(r0, 2)
label(loopend)
sub (r1, 2) # End of loop?
bpl(loopstart)
def _set_image_headers(self, f):
headers = list()
if f.read(2) != b'BM':
raise OSError('Not a valid BMP image')
for pos in (10, 18, 22): # startbit, width, height
f.seek(pos)
headers.append(struct.unpack('<H', f.read(2))[0]) # read double byte
return headers
def _get_image_points(self, pos, width, height):
if isinstance(pos, (list, tuple)):
x, y = pos
else:
x = 0 if width == self.TFTWIDTH else (self.TFTWIDTH-width)//2
y = 0 if height == self.TFTHEIGHT else (self.TFTHEIGHT-height)//2
return x, y
# Using in renderBmp method
def _render_bmp_image(self, filename, pos):
path = 'images/'
memread = 480
with open(path + filename, 'rb') as f:
startbit, width, height = self._set_image_headers(f)
if width < self.TFTWIDTH:
width -= 1
x, y = self._get_image_points(pos, width, height)
self._set_window(x, (width)+x, y, (height)+y)
f.seek(startbit)
while True:
try:
data = bytearray(f.read(memread))
self._reverse(data, len(data))
self._write_data(data)
except OSError: break
# Using in renderBmp method
def _render_bmp_cache(self, filename, pos):
filename = filename + '.cache'
startbit = 8
memread = 512
with open(imgcachedir + '/' + filename, 'rb') as f:
width = struct.unpack('H', f.readline())[0]
height = struct.unpack('H', f.readline())[0]
if width < self.TFTWIDTH:
width -= 1
x, y = self._get_image_points(pos, width, height)
self._set_window(x, (width)+x, y, (height)+y)
f.seek(startbit)
while True:
try:
self._write_data(f.read(memread))
except OSError: break
# TODO:
# 1. resize large images to screen resolution
# 2. if part of image goes out of the screen, must to be rendered
# only displayed part
def renderBmp(self, filename, pos=None, cached=True, bgcolor=None):
self._image_orientation()
if bgcolor:
self.fillMonocolor(bgcolor)
if filename + '.cache' not in os.listdir('images/cache'):
cached = False
if cached:
self._render_bmp_cache(filename, pos)
else:
self._render_bmp_image(filename, pos)
self._graph_orientation()
def clearImageCache(self, path):
for obj in os.listdir(path):
if obj.endswith('.cache'):
os.remove(path + '/' + obj)
# TODO:
# 1. resize large images to screen resolution
def cacheImage(self, image):
self.fillMonocolor(BLACK)
strings = self.initCh(DARKGREY, bgcolor=BLACK)
strings.printLn("Caching:", 25, 25)
strings.printLn(image + '...', 45, 45)
memread = 480
path = 'images/cache/'
with open('images/' + image, 'rb') as f:
startbit, width, height = self._set_image_headers(f)
c = open(path + image + '.cache', 'ab')
for val in [width, height]:
c.write(bytes(array.array('H', [val])) + b"\n")
f.seek(startbit)
data = '1'
while len(data) != 0:
try:
data = bytearray(f.read(memread))
self._reverse(data, len(data))
c.write(data)
except OSError: break
c.close()
print('Cached:', image)
class BaseTests(BaseDraw, BaseChars, BaseImages):
def __init__(self, **kwargs):
super(BaseTests, self).__init__(**kwargs)
def charsTest(self, color, font=None, bgcolor=WHITE, scale=1):
ch = self.initCh(color=color, font=font, bgcolor=bgcolor, scale=scale)
scale = 2 if scale > 1 else 1
x = y = 7 * scale
for i in range(33, 256):
try: chrwidth = len(font[i])
except KeyError: break
cont = False if i == 127 else True
ch.printChar(chr(i), x, y, cont=cont, scale=scale)
x += self._asm_get_charpos(chrwidth, scale, 3)
if x > (self.TFTWIDTH-10):
x = 10
y = self._asm_get_charpos(font['height'], scale, y)
def renderImageTest(self, cached=True, path='images', cpath='cache'): # images/cache path
starttime = pyb.micros()//1000
for image in os.listdir(path):
if image != cpath and image.endswith('bmp'):
self.renderBmp(image, cached=cached, bgcolor=BLACK)
return (pyb.micros()//1000-starttime)/1000
class BaseWidgets(BaseTests):
def __init__(self, **kwargs):
super(BaseWidgets, self).__init__(**kwargs)
class BaseObjects(BaseWidgets):
def __init__(self, **kwargs):
super(BaseObjects, self).__init__(**kwargs)
class LCD(BaseObjects):
def __init__(self, **kwargs):
super(LCD, self).__init__(**kwargs)
def reset(self):
super(LCD, self).reset()
def setPortrait(self, *args):
super(LCD, self).setPortrait(*args)
def drawPixel(self, *args, **kwargs):
super(LCD, self).drawPixel(*args, **kwargs)
def drawVline(self, *args, **kwargs):
super(LCD, self).drawVline(*args, **kwargs)
def drawHline(self, *args, **kwargs):
super(LCD, self).drawHline(*args, **kwargs)
def drawLine(self, *args, **kwargs):
super(LCD, self).drawLine(*args, **kwargs)
def drawRect(self, *args, **kwargs):
super(LCD, self).drawRect(*args, **kwargs)
def fillMonocolor(self, *args, **kwargs):
super(LCD, self).fillMonocolor(*args, **kwargs)
def drawCircleFilled(self, *args, **kwargs):
super(LCD, self).drawCircleFilled(*args, **kwargs)
def drawCircle(self, *args, **kwargs):
super(LCD, self).drawCircle(*args, **kwargs)
def drawOvalFilled(self, *args, **kwargs):
super(LCD, self).drawOvalFilled(*args, **kwargs)
def initCh(self, **kwargs):
return super(LCD, self).initCh(**kwargs)
def printChar(self, *args, **kwargs):
super(LCD, self).printChar(*args, **kwargs)
def printLn(self, *args, **kwargs):
super(LCD, self).printLn(*args, **kwargs)
def renderBmp(self, *args, **kwargs):
"""
Usage:
With position definition:
obj.renderBmp(f, [(tuple or list of x, y), cached or not, bgcolor or None])
Without position definition image renders in center of screen:
obj.renderBmp(f, [cached or not, bgcolor or None])
By default method renders cached image, but only if BMP image cached
before. For image caching see: lcd.cacheImage()
"""
super(LCD, self).renderBmp(*args, **kwargs)
def clearImageCache(self, *args, **kwargs):
super(LCD, self).clearImageCache(*args, **kwargs)
def cacheImage(self, *args, **kwargs):
super(LCD, self).cacheImage(*args, **kwargs)
def charsTest(self, *args, **kwargs):
super(LCD, self).charsTest(*args, **kwargs)
def renderImageTest(self, *args, **kwargs):
return super(LCD, self).renderImageTest(*args, **kwargs)
if __name__ == '__main__':
from fonts.arial_14 import Arial_14
from fonts.vera_14 import Vera_14
starttime = pyb.micros()//1000
d = LCD() # or d = LCD(portrait=False) for landscape
d.fillMonocolor(GREEN)
d.drawRect(5, 5, 230, 310, BLUE, border=10, fillcolor=ORANGE)
d.drawOvalFilled(120, 160, 60, 120, BLUE)
d.drawCircleFilled(120, 160, 55, RED)
d.drawCircle(120, 160, 59, GREEN, border=5)
c = d.initCh(color=BLACK, bgcolor=ORANGE, font=Vera_14) # define string obj
p = d.initCh(color=BLACK, bgcolor=RED, font=Arial_14, scale=2) # define string obj
c.printChar('@', 30, 30)
c.printLn('Hello BaseChar class', 30, 290)
p.printLn('Python3', 89, 155)
d.setPortrait(False) # Changing mode to landscape
d.renderBmp("test.bmp", (0, 0))
# last time executed in: 1.379 seconds
print('executed in:', (pyb.micros()//1000-starttime)/1000, 'seconds')
```
#### File: 3.MQTT通信/2.订阅者(subscribe)/main.py
```python
import network,pyb,time
from machine import I2C,Pin
from ssd1306 import SSD1306_I2C
from simple import MQTTClient
#初始化OLED
i2c = I2C(sda=Pin('Y10'), scl=Pin('Y9'))
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c)
#初始化以太网模块
nic = network.WIZNET5K(pyb.SPI(2), pyb.Pin.board.Y5, pyb.Pin.board.Y4)
nic.active(True)
nic.ifconfig('dhcp')
#设置MQTT回调函数,有信息时候执行
def MQTT_callback(topic, msg):
print('topic: {}'.format(topic))
print('msg: {}'.format(msg))
#判断网络是否连接成功
if nic.isconnected():
print(nic.ifconfig()) #打印IP信息
#OLED数据显示
oled.fill(0) #清屏背景黑色
oled.text('IP/Subnet/GW:',0,0)
oled.text(nic.ifconfig()[0], 0, 20)
oled.text(nic.ifconfig()[1],0,38)
oled.text(nic.ifconfig()[2],0,56)
oled.show()
SERVER = 'mqtt.p2hp.com'
PORT = 1883
CLIENT_ID = '01Studio-pyBoard' # 客户端ID
TOPIC = '/public/01Studio/1' # TOPIC名称
client = MQTTClient(CLIENT_ID, SERVER, PORT)
client.set_callback(MQTT_callback) #配置回调函数
client.connect()
client.subscribe(TOPIC) #订阅主题
while (True):
client.check_msg() #检测是否收到信息,收到则执行回调函数打印。
time.sleep_ms(300) #接收间隔
```
#### File: 1.基础实验/5.定时器/main.py
```python
from machine import Pin,Timer
led=Pin(25, Pin.OUT)
def fun(tim):
led.toggle()
#构建定时器
tim = Timer()
tim.init(period=1000, mode=Timer.PERIODIC,callback=fun) #周期为1000ms
```
#### File: 1.基础实验/3.外部中断/main.py
```python
from machine import Pin
import time
LED=Pin(2,Pin.OUT) #构建LED对象,开始熄灭
KEY=Pin(0,Pin.IN,Pin.PULL_UP) #构建KEY对象
state=0 #LED引脚状态
#LED状态翻转函数
def fun(KEY):
global state
time.sleep_ms(10) #消除抖动
if KEY.value()==0: #确认按键被按下
state = not state
LED.value(state)
KEY.irq(fun,Pin.IRQ_FALLING) #定义中断,下降沿触发
```
#### File: 2.传感器实验/4.光敏传感器/main.py
```python
from machine import Pin,I2C,ADC,Timer
from ssd1306 import SSD1306_I2C
#初始化相关模块
i2c = I2C(sda=Pin(13), scl=Pin(14))
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c)
#初始化ADC,Pin=36,11DB衰减,测量电压0-3.3V
Light = ADC(Pin(32))
Light.atten(ADC.ATTN_11DB)
#中断回调函数
def fun(tim):
oled.fill(0) # 清屏显示黑色背景
oled.text('01Studio', 0, 0) # 首行显示01Studio
oled.text('Light test:', 0, 15) # 次行显示实验名称
value=Light.read() #获取ADC数值
#显示数值
oled.text(str(value)+' (4095)',0,40)
#计算电压值,获得的数据0-4095相当于0-3.3V,('%.2f'%)表示保留2位小数
oled.text(str('%.2f'%(value/4095*3.3))+' V',0,55)
#判断光照强度,分3档显示。
if 0 < value <=1365:
oled.text('Bright', 60, 55)
if 1365 < value <= 2730:
oled.text('Normal', 60, 55)
if 2730 < value <= 4095:
oled.text('Weak ', 60, 55)
oled.show()
#开启RTOS定时器
tim = Timer(-1)
tim.init(period=1000, mode=Timer.PERIODIC, callback=fun) #周期1s
```
#### File: 2.传感器实验/8.超声波传感器/main.py
```python
from HCSR04 import HCSR04 #子文件夹下的调用方式
from machine import Pin,I2C,Timer
from ssd1306 import SSD1306_I2C
#初始化OLED
i2c = I2C(sda=Pin(13), scl=Pin(14))
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c)
#初始化接口 trig=17,echo=16
trig = Pin(17,Pin.OUT)
echo = Pin(16,Pin.IN)
HC=HCSR04(trig,echo)
#中断回调函数
def fun(tim):
oled.fill(0) # 清屏,背景黑色
oled.text('01Studio', 0, 0)
oled.text('Distance test:', 0, 15)
Distance = HC.getDistance() #测量距离
# OLED显示距离
oled.text(str(Distance) + ' CM', 0, 35)
oled.show()
#串口打印
print(str(Distance)+' CM')
#开启RTOS定时器
tim = Timer(-1)
tim.init(period=1000, mode=Timer.PERIODIC, callback=fun) #周期1s
```
#### File: 3.拓展实验/2.RGB灯带/main.py
```python
import time
from machine import Pin,Timer
from neopixel import NeoPixel
#定义红、绿、蓝三种颜色
RED=(255,0,0)
GREEN=(0,255,0)
BLUE=(0,0,255)
#22引脚连接灯带,灯珠数量30
pin = Pin(22, Pin.OUT)
np = NeoPixel(pin, 30)
#设置灯珠颜色,本实验供30个灯珠
def Color_buf(color):
for i in range(30):
np[i]=color
while True:
Color_buf(RED) #红色
np.write() # 写入数据
time.sleep(1)
Color_buf(GREEN) #红色
np.write() # 写入数据
time.sleep(1)
Color_buf(BLUE) #红色
np.write() # 写入数据
time.sleep(1)
```
#### File: 3.拓展实验/3.RGB彩灯(Neopixel)/main.py
```python
import time
from machine import Pin,Timer
from neopixel import NeoPixel
#定义红、绿、蓝三种颜色
RED=(255,0,0)
GREEN=(0,255,0)
BLUE=(0,0,255)
#灯珠数量,板子1个灯珠,01Studio RGB灯带的数量为30
LED_NUM=1
#pyWiFi-ESP32-C3引脚8连接彩灯
pin = Pin(8, Pin.OUT)
np = NeoPixel(pin, LED_NUM)
#设置灯珠颜色
def Color_buf(color):
for i in range(LED_NUM):
np[i]=color
while True:
Color_buf(RED) #红色
np.write() # 写入数据
time.sleep(1)
Color_buf(GREEN) #红色
np.write() # 写入数据
time.sleep(1)
Color_buf(BLUE) #红色
np.write() # 写入数据
time.sleep(1)
```
#### File: 2.传感器实验/4.光敏传感器/main.py
```python
from machine import Pin,SoftI2C,ADC,Timer
from ssd1306 import SSD1306_I2C
#初始化相关模块
i2c = SoftI2C(sda=Pin(40), scl=Pin(38))
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c)
#初始化ADC,Pin=10,11DB衰减,测量电压0-3.3V
Light = ADC(Pin(10))
Light.atten(ADC.ATTN_11DB)
#中断回调函数
def fun(tim):
oled.fill(0) # 清屏显示黑色背景
oled.text('01Studio', 0, 0) # 首行显示01Studio
oled.text('Light test:', 0, 15) # 次行显示实验名称
value=Light.read() #获取ADC数值
#显示数值
oled.text(str(value)+' (8191)',0,40)
#计算电压值,获得的数据0-4095相当于0-3.3V,('%.2f'%)表示保留2位小数
oled.text(str('%.2f'%(value/8191*3.3))+' V',0,55)
#判断光照强度,分3档显示。
if 0 < value <=2730:
oled.text('Bright', 60, 55)
if 2730 < value <= 5460:
oled.text('Normal', 60, 55)
if 5460 < value <= 8191:
oled.text('Weak ', 60, 55)
oled.show()
#开启RTOS定时器
tim = Timer(1)
tim.init(period=1000, mode=Timer.PERIODIC, callback=fun) #周期1s
```
#### File: 4.拓展实验/3.RGB灯带/main.py
```python
import time
from machine import Pin,Timer
from neopixel import NeoPixel
#定义红、绿、蓝三种颜色
RED=(255,0,0)
GREEN=(0,255,0)
BLUE=(0,0,255)
LED_NUM=1 #灯珠数量
#板上灯珠连接到引脚3
pin = Pin(3, Pin.OUT)
np = NeoPixel(pin, LED_NUM)
#设置灯珠颜色,本实验供LED_NUM个灯珠
def Color_buf(color):
for i in range(LED_NUM):
np[i]=color
while True:
Color_buf(RED) #红色
np.write() # 写入数据
time.sleep(1)
Color_buf(GREEN) #红色
np.write() # 写入数据
time.sleep(1)
Color_buf(BLUE) #红色
np.write() # 写入数据
time.sleep(1)
```
#### File: 5.USB摄像头/1.拍照/main.py
```python
from machine import Pin
import esp_usb,time
KEY=Pin(0,Pin.IN,Pin.PULL_UP) #构建KEY对象
#摄像头初始化,默认帧大小 QVGA 320X240
cam = esp_usb.CAM(framesize = esp_usb.CAM.QVGA)
cam.display() #LCD显示
num=0 #用于命名图片
cam_flag = 0 #拍照标志位
##############################
# USR按键 拍照并保存
##############################
def fun(KEY):
global cam_flag
cam_flag = 1
#中断初始化
KEY.irq(fun,Pin.IRQ_FALLING) #定义中断,下降沿触发
while True:
#收到拍照命令
if cam_flag == 1:
#拍照并保存图片
cam.snapshot("/"+str(num)+".jpg")
num=num+1 #照片名称
cam_flag=0 #清空标志位
```
#### File: 2.传感器实验/2.温湿度传感器DHT11/main.py
```python
from machine import Pin,I2C,Timer
from ssd1306 import SSD1306_I2C
import dht,time
#初始化相关模块
i2c = I2C(sda=Pin(13), scl=Pin(14))
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c)
#创建DTH11对象
d = dht.DHT11(Pin(5)) #传感器连接到引脚14
time.sleep(1) #首次启动停顿1秒让传感器稳定
def dht_get(tim):
d.measure() #温湿度采集
#OLED显示温湿度
oled.fill(0) #清屏背景黑色
oled.text('01Studio', 0, 0)
oled.text('DHT11 test:',0,15)
oled.text(str(d.temperature() )+' C',0,40) #温度显示
oled.text(str(d.humidity())+' %',48,40) #湿度显示
oled.show()
#开启RTOS定时器,编号为-1
tim = Timer(-1)
tim.init(period=2000, mode=Timer.PERIODIC,callback=dht_get) #周期为2000ms
```
#### File: 1.基础实验/5.外部中断/main.py
```python
from pyb import Pin,ExtInt,LED
#定义回调函数,需要将ext中断号传递进来
def fun1(ext):
LED(4).toggle()
ext = ExtInt(Pin('A0'), ExtInt.IRQ_FALLING, Pin.PULL_UP, fun1) #下降沿触发,打开上拉电阻
```
#### File: 1.基础实验/9.触摸屏按钮/main.py
```python
from tftlcd import LCD43M
from touch import GT1151
from pyb import LED,Timer
import gui,time
#定义常用颜色
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
ORANGE =(0xFF,0x7F,0x00) #橙色
#LCD初始化
d = LCD43M() #默认方向
d.fill(WHITE) #填充白色
#触摸屏初始化
t = GT1151()#默认方向
#####################
#定义2个按键和回调函数
#####################
def fun1(B1):
LED(3).toggle() #LED3状态翻转
def fun2(B2):
LED(4).toggle() #LED4状态翻转
B1 = gui.TouchButton(140,200,200,100,ORANGE,'LED3',WHITE,fun1)
B2 = gui.TouchButton(140,500,200,100,BLUE,'LED4',WHITE,fun2)
#############################
#### 定时器用于触发按钮事件 ##
#############################
tim_flag = 0
def count(tim):
global tim_flag
tim_flag = 1
tim = Timer(1,freq=50) #20ms刷新一次
tim.callback(count)
while True:
#执行按钮触发的任务
if tim_flag == 1:
t.tick_inc()
gui.task_handler()
tim_flag = 0
```
#### File: 3.通讯实验/5.CAN/main.py
```python
from pyb import CAN,Switch,delay,LED
send_flag = 0
def send():
global send_flag
#消除抖动,sw按下返回1,松开返回0。
if sw.value()==1:
delay(10)
if sw.value()==1:
send_flag = 1
sw = Switch() #定义按键对象名字为sw
sw.callback(send) #当按键被按下时,执行函数send()
can=CAN(1, CAN.NORMAL) #设置CAN1为普通模式(RX-->PB8,TX-->PB9)
#设置接收相关配置 id=123, 124, 125 和 126
can.setfilter(0, CAN.LIST16, 0, (123, 124, 125, 126))
can.send('message!', 123) #发送id=123的信息
num=0
while True:
#判断有无收到信息
if can.any(0):
text=can.recv(0) #读取数据
print(text) #通过REPL打印串口3接收的数据
if send_flag == 1:
can.send(str(num), 123) #发送id=123的信息
num=num+1
send_flag = 0
```
#### File: 3.音频播放/01-物理按键版/main.py
```python
import audio,time
from pyb import Switch
from machine import Pin
#构建音频对象
wm=audio.WM8978()
vol = 80 #音量初始化,80
######################
# 播放 USR按键
######################
play_flag = 0
def music_play():
global play_flag
play_flag = 1
sw =Switch()
sw.callback(music_play)
######################
# 音量加 A0按键
######################
VOL_U = Pin('A0',Pin.IN,Pin.PULL_UP) #构建按键A0
vol_up_flag = 0
def vol_up(VOL_U):
global vol
#消除按键抖动
if VOL_U.value() == 0:
time.sleep_ms(10)
if VOL_U.value() == 0:
vol=vol+10
if vol > 100:
vol = 100
wm.volume(vol)
VOL_U.irq(vol_up,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发
######################
# 音量减 E3按键
######################
VOL_D = Pin('E3',Pin.IN,Pin.PULL_UP) #构建按键A0
vol_down_flag = 0
def vol_down(VOL_D):
global vol
#消除按键抖动
if VOL_D.value() == 0:
time.sleep_ms(10)
if VOL_D.value() == 0:
vol=vol-10
if vol < 10:
vol = 10
wm.volume(vol)
VOL_D.irq(vol_down,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发
#加载音乐
wm.load('/flash/music/Seasons In The Sun.mp3')
while True:
#播放音乐
if play_flag == 1:
wm.play()
play_flag = 0
```
#### File: 1.基础实验/4.外部中断/main.py
```python
from machine import Pin
import time
#构建LED对象
led=Pin('C7', Pin.OUT)
#配置按键
key = Pin('A0', Pin.IN, Pin.PULL_DOWN)
state=0 #LED 引脚状态
#LED 状态翻转函数
def fun(key):
global state
time.sleep_ms(10) #消除抖动
if key.value()==1: #确认按键被按下,开发板按下拉高
state = not state
led.value(state)
key.irq(fun,Pin.IRQ_RISING) #定义中断,下降沿触发
```
#### File: 9.触摸屏按钮/7寸RGB屏/main.py
```python
from tftlcd import LCD7R
from touch import GT911
from machine import LED,Timer
import gui,time
#定义常用颜色
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
ORANGE =(0xFF,0x7F,0x00) #橙色
#LCD初始化
d = LCD7R() #默认方向
d.fill(WHITE) #填充白色
#触摸屏初始化
t = GT911() #默认方向
#####################
#定义4个按键和回调函数
#####################
def fun1(B1):
LED(1).toggle() #LED3状态翻转
def fun2(B2):
LED(2).toggle() #LED4状态翻转
def fun3(B3):
LED(3).toggle() #LED3状态翻转
def fun4(B4):
LED(4).toggle() #LED4状态翻转
B4 = gui.TouchButton(40,50,150,80,BLUE,'LED4',WHITE,fun4)
B3 = gui.TouchButton(230,50,150,80,ORANGE,'LED3',WHITE,fun3)
B2 = gui.TouchButton(420,50,150,80,GREEN,'LED2',WHITE,fun2)
B1 = gui.TouchButton(610,50,150,80,RED,'LED1',WHITE,fun1)
#############################
#### 定时器用于扫描按钮触发事件 ##
#############################
tim_flag = 0
def count(tim):
global tim_flag
tim_flag = 1
#构建软件定时器,编号1
tim = Timer(1)
tim.init(period=20, mode=Timer.PERIODIC,callback=count) #周期为20ms
while True:
#执行按钮触发的任务
if tim_flag == 1:
t.tick_inc()
gui.task_handler()
tim_flag = 0
``` |
{
"source": "01-vyom/mRNA-Vaccine-Degradation-Prediction",
"score": 2
} |
#### File: src/graph_transformer/train.py
```python
import gc
import os
from matplotlib import pyplot as plt
import pandas as pd
from src.graph_transformer.data_processing import *
from src.graph_transformer.model_utils import *
import tensorflow as tf
from sklearn.model_selection import KFold
gc.enable()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Code for training the auto encoder
def train_auto_encoder(
X_node,
X_node_pub,
X_node_pri,
adjacency_matrix,
adjacency_matrix_pub,
adjacency_matrix_pri,
epochs,
epochs_each,
batch_size,
save_path,
):
base = get_base(X_node, adjacency_matrix)
ae_model = get_ae_model(base, X_node, adjacency_matrix)
# Iterate epoch //epochs_each times over the three data
for i in range(epochs // epochs_each):
print(f"------ {i} ------")
# Using the training dataset
ae_model.fit(
[X_node, adjacency_matrix],
[X_node[:, 0]],
epochs=epochs_each,
batch_size=batch_size,
)
gc.collect()
print("--- public ---")
# using the public dataset
ae_model.fit(
[X_node_pub, adjacency_matrix_pub],
[X_node_pub[:, 0]],
epochs=epochs_each,
batch_size=batch_size,
)
print("--- private ---")
# Use the private dataset
ae_model.fit(
[X_node_pri, adjacency_matrix_pri],
[X_node_pri[:, 0]],
epochs=epochs_each,
batch_size=batch_size,
)
gc.collect()
print("****** save ae model ******")
base.save_weights(save_path)
'''
# Train the GCN model, takes as input the
# @X_node: It is the train node features,
# @adjacency_matrix: It is the edge feature in adjacency matrix form
# @seq_len_target: It is the sequence length of the input data
# @epochs: The number of epochs to run the training
# @batch_size: The batch size to be used for the training
# @model_path: The path where the trained model is to be saved.
# @ae_model_path: Specifies the location of the ae pretrained model.
# If null doesnt use any pre trained model.
# @plt_name: Name of the plot
# @n_fold: The number of folds to use for training
# @validation_frequency: Frequency in epochs after which the validation is to be run
# @y: The ground truth target
'''
def train_gcn(
X_node,
adjacency_matrix,
seq_len_target,
epochs,
batch_size,
model_path,
ae_model_path=None,
plt_name="withhout_ae",
n_fold=5,
validation_frequency=1,
y=None,
):
kfold = KFold(n_fold, shuffle=True, random_state=42)
legends = []
for fold, (tr_idx, va_idx) in enumerate(kfold.split(X_node, adjacency_matrix)):
gc.collect()
tf.keras.backend.clear_session()
print("Fold ", fold + 1, "/", n_fold, ": Training Started...")
X_node_tr = X_node[tr_idx]
X_node_va = X_node[va_idx]
As_tr = adjacency_matrix[tr_idx]
As_va = adjacency_matrix[va_idx]
y_tr = y[tr_idx]
y_va = y[va_idx]
base = get_base(X_node, adjacency_matrix)
if ae_model_path:
base.load_weights(ae_model_path)
model = get_model(base, X_node, adjacency_matrix, seq_len_target)
filepath_list = model_path.split(".")
filepath = f"{filepath_list[0]}_{fold}.{filepath_list[1]}"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=True,
monitor="val_loss",
mode="min",
save_best_only=True,
)
history = model.fit(
[X_node_tr, As_tr],
[y_tr],
validation_data=([X_node_va, As_va], [y_va]),
epochs=epochs,
batch_size=batch_size,
validation_freq=validation_frequency,
callbacks=[
tf.keras.callbacks.ReduceLROnPlateau(),
model_checkpoint_callback,
],
)
print(
f"Min training loss={min(history.history['loss'])}, min validation loss={min(history.history['val_loss'])}"
)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
legends.append(f"loss_fold_{fold}")
legends.append(f"val_loss_fold_{fold}")
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(
legends,
bbox_to_anchor=(1.02, 1),
loc="upper left",
borderaxespad=0,
fontsize="x-small",
)
plt.savefig("./result/GCN_Model_" + plt_name + ".png", bbox_inches="tight", dpi=600)
plt.show()
def __main__():
denoise = True # if True, use train data whose signal_to_noise > 1
allocate_gpu_memory()
path = "."
aug_data = "./data/augmented_data.csv"
n_fold = 7
aug_df = pd.read_csv(aug_data)
train = pd.read_json(f"{path}/train.json", lines=True)
train = augmentation(train, aug_df)
if denoise:
train = train[train.signal_to_noise > 1].reset_index(drop=True)
test = pd.read_json(f"{path}/test.json", lines=True)
test_pub = test[test["seq_length"] == 107]
test_pri = test[test["seq_length"] == 130]
sub = pd.read_csv(f"{path}/sample_submission.csv")
targets = list(sub.columns[1:])
print(targets)
y_train = []
seq_len = train["seq_length"].iloc[0]
seq_len_target = train["seq_scored"].iloc[0]
ignore = -10000
ignore_length = seq_len - seq_len_target
for target in targets:
y = np.vstack(train[target])
dummy = np.zeros([y.shape[0], ignore_length]) + ignore
y = np.hstack([y, dummy])
y_train.append(y)
y = np.stack(y_train, axis=2)
y.shape
adjacency_matrix = get_adj_matrix(train)
adjacency_matrix_pub = get_adj_matrix(test_pub)
adjacency_matrix_pri = get_adj_matrix(test_pri)
X_node = get_node_features(train)
X_node_pub = get_node_features(test_pub)
X_node_pri = get_node_features(test_pri)
epochs_list = [30, 10, 3, 3, 5, 5]
batch_size_list = [8, 16, 32, 64, 128, 256]
# Train model without auto encoder
epochs = epochs_list[0]
batch_size = batch_size_list[1]
model_path = "./model_without_ae.h5"
train_gcn(
X_node,
adjacency_matrix,
seq_len_target,
epochs,
batch_size,
model_path,
n_fold=n_fold,
y=y,
)
# Train model with auto encoder
ae_epochs = 30 # epoch of training of denoising auto encoder
ae_epochs_each = 10 # epoch of training of denoising auto encoder each time.
ae_batch_size = 32
ae_path = "./base_ae"
train_auto_encoder(
X_node,
X_node_pub,
X_node_pri,
adjacency_matrix,
adjacency_matrix_pub,
adjacency_matrix_pri,
ae_epochs,
ae_epochs_each,
ae_batch_size,
ae_path,
)
epochs_list = [30, 10, 3, 3, 5, 5]
batch_size_list = [8, 16, 32, 64, 128, 256]
epochs = epochs_list[0]
batch_size = batch_size_list[1]
model_path = "./model_with_ae.h5"
train_gcn(
X_node,
adjacency_matrix,
seq_len_target,
epochs,
batch_size,
model_path,
ae_model_path=ae_path,
n_fold=n_fold,
y=y,
)
```
#### File: src/GRU_LSTM/data_processing.py
```python
import numpy as np
pred_cols = ["reactivity", "deg_Mg_pH10", "deg_pH10", "deg_Mg_50C", "deg_50C"]
token2int = {x: i for i, x in enumerate("().<KEY>")}
def preprocess_inputs(df, cols=["sequence", "structure", "predicted_loop_type"]):
base_fea = np.transpose(
np.array(
df[cols].applymap(lambda seq: [token2int[x] for x in seq]).values.tolist()
),
(0, 2, 1),
)
bpps_sum_fea = np.array(df["bpps_sum"].to_list())[:, :, np.newaxis]
bpps_max_fea = np.array(df["bpps_max"].to_list())[:, :, np.newaxis]
bpps_nb_fea = np.array(df["bpps_nb"].to_list())[:, :, np.newaxis]
bpps_v_fea = np.array(df["bpps_v"].to_list())[:, :, np.newaxis]
bpps_m_fea = np.array(df["bpps_m"].to_list())[:, :, np.newaxis]
return np.concatenate(
[base_fea, bpps_sum_fea, bpps_max_fea, bpps_nb_fea, bpps_v_fea, bpps_m_fea], 2
)
return base_fea
# additional features
def read_bpps_sum(df):
bpps_arr = []
for mol_id in df.id.to_list():
bpps_arr.append(np.load(f"./bpps/{mol_id}.npy").sum(axis=1))
return bpps_arr
def read_bpps_max(df):
bpps_arr = []
for mol_id in df.id.to_list():
bpps_arr.append(np.load(f"./bpps/{mol_id}.npy").max(axis=1))
return bpps_arr
def read_bpps_nb(df):
# normalized non-zero number
# from https://www.kaggle.com/symyksr/openvaccine-deepergcn
bpps_nb_mean = 0.077522 # mean of bpps_nb across all training data
bpps_nb_std = 0.08914 # std of bpps_nb across all training data
bpps_arr = []
for mol_id in df.id.to_list():
bpps = np.load(f"./bpps/{mol_id}.npy")
bpps_nb = (bpps > 0).sum(axis=0) / bpps.shape[0]
bpps_nb = (bpps_nb - bpps_nb_mean) / bpps_nb_std
bpps_arr.append(bpps_nb)
return bpps_arr
def read_bpps_m(df):
e = 0.00000001
bpps_arr = []
for mol_id in df.id.to_list():
bpps = np.load(f"./bpps/{mol_id}.npy")
vec = []
for i in range(bpps.shape[0]):
m = 0
l = 0
for j in range(bpps.shape[0]):
if bpps[i][j] > 0:
m = m + (j * bpps[i][j])
l = l + 1
m = m / (l + e)
vec.append(m)
bpps_arr.append(vec)
return bpps_arr
def read_bpps_v(df):
b = 0.9 # beta for exponential weaghted average with bias correction
e = 0.00000001
bpps_arr = []
for mol_id in df.id.to_list():
bpps = np.load(f"./bpps/{mol_id}.npy")
vec = []
for i in range(bpps.shape[0]):
v = 0
m = 0
l = 0
for j in range(bpps.shape[0]):
if bpps[i][j] > 0:
v = b * v + (1 - b) * bpps[i][j]
m = m + v
l = l + 1
m = m / (l + e)
vec.append(m)
bpps_arr.append(vec)
return bpps_arr
def augmentation(df, aug_df):
# from https://www.kaggle.com/code/its7171/how-to-generate-augmentation-data
target_df = df.copy()
new_df = aug_df[aug_df["id"].isin(target_df["id"])]
del target_df["structure"]
del target_df["predicted_loop_type"]
new_df = new_df.merge(target_df, on=["id", "sequence"], how="left")
df["cnt"] = df["id"].map(new_df[["id", "cnt"]].set_index("id").to_dict()["cnt"])
df["log_gamma"] = 100
df["score"] = 1.0
df = df.append(new_df[df.columns])
return df
``` |
{
"source": "01walid/py-dz-phone-number",
"score": 2
} |
#### File: py-dz-phone-number/tests/conftest.py
```python
import random
import string
import pytest
from dz_phone_number import LandlinePrefix, MobileOperator
@pytest.fixture(scope="session")
def invalid_phone_numbers():
return [
"ABCD",
"123456",
# Statisfies the length for mobile
"1234567890",
"+111234567890",
"00111234567890",
# Statisfies the length for a landline
"123456789",
"+11123456789",
"0011123456789",
# Is valid but not Algerian number
# Valid mobile
"+216512345678",
"00216512345678",
# Valid Landline
"0021638123456",
"+21638123456",
# has spaces
# non-string
213512345678,
# empty string
"",
# Is None
None,
# too many ( or )
"+ (213) (5) 12345678",
"+ (213) (5) 12345678)",
"+ (213) (5) 12345(67)(8)",
# over dashed
"+ (213) 512-34-56--78-",
# starts with a dash
"- (213) 512-34-56-78",
]
@pytest.fixture(scope="session")
def valid_phone_numbers():
return [
"0512345678",
"0612345678",
"0712345678",
"00213512345678",
"+213512345678",
"+213 5 12 34 56 78",
"+213-5-12-34-56-78",
"(+213) 5-12-34-56-78",
"+213 6 12 34 56 78",
"+213-6-12-34-56-78",
"(+213) 6-12-34-56-78",
"+213 7 12 34 56 78",
"+213-7-12-34-56-78",
"(+213) 38-12-34-56",
"0 38-12-34-56",
"00213 38-12-34-56",
"00213-38-12-34-56",
"0-21-12-34-56",
]
@pytest.fixture(scope="session")
def valid_random_number():
def rand_func():
random_indicative = random.choice(["0", "+213", "00213", "(+213)", "(00213)"])
numbers = random.choices(string.digits, k=random.choice([6, 8]))
operator_or_region = (
MobileOperator.all() if len(numbers) == 8 else LandlinePrefix.all()
)
operator_or_region = random.choice(operator_or_region)
random_number = "".join(numbers)
return f"{random_indicative}{operator_or_region}{random_number}"
return rand_func
```
#### File: py-dz-phone-number/tests/test_correctness.py
```python
import pytest
from dz_phone_number import DZPhoneNumber, InvalidDZPhoneNumber
def test_invalid_phone_numbers(invalid_phone_numbers):
for number in invalid_phone_numbers:
with pytest.raises(InvalidDZPhoneNumber):
DZPhoneNumber(number)
def test_valid_phone_numbers_with_some_formatting(valid_phone_numbers):
for number in valid_phone_numbers:
dz_phone_number = DZPhoneNumber(number)
assert dz_phone_number.raw_number == number
```
#### File: py-dz-phone-number/tests/test_is.py
```python
import pytest
from dz_phone_number import DZPhoneNumber, LandlinePrefix
def test_is_mobile():
number = DZPhoneNumber("0512345678")
assert number.is_mobile()
assert not number.is_landline()
def test_is_landline():
number = DZPhoneNumber("038123456")
assert number.is_landline()
assert not number.is_mobile()
def test_is_ooredoo():
number = DZPhoneNumber("0512345678")
assert number.is_ooredoo()
assert not number.is_djezzy()
assert not number.is_mobilis()
def test_is_djezzy():
number = DZPhoneNumber("0712345678")
assert number.is_djezzy()
assert not number.is_ooredoo()
assert not number.is_mobilis()
def test_is_mobilis():
number = DZPhoneNumber("0612345678")
assert number.is_mobilis()
assert not number.is_ooredoo()
assert not number.is_djezzy()
def test_is_wilayas():
for wilaya, dial_number in LandlinePrefix.__members__.items():
pn = DZPhoneNumber(f"0{dial_number.value}123456")
attr = getattr(pn, f"is_{wilaya.lower()}")
assert attr()
``` |
{
"source": "01xu10/pity",
"score": 3
} |
#### File: app/middleware/AsyncHttpClient.py
```python
import json
import time
import aiohttp
class AsyncRequest(object):
def __init__(self, url: str, timeout=15, **kwargs):
self.url = url
self.kwargs = kwargs
self.timeout = aiohttp.ClientTimeout(total=timeout)
def get_cookie(self, session):
cookies = session.cookie_jar.filter_cookies(self.url)
return {k: v.value for k, v in cookies.items()}
async def invoke(self, method: str):
start = time.time()
async with aiohttp.ClientSession(cookie_jar=aiohttp.CookieJar(unsafe=True)) as session:
async with session.request(method, self.url, timeout=self.timeout, **self.kwargs) as resp:
if resp.status != 200:
return await self.collect(False, self.kwargs.get("data"), resp.status)
cost = "%.0fms" % ((time.time() - start) * 1000)
response = await AsyncRequest.get_resp(resp)
cookie = self.get_cookie(session)
return await self.collect(True, self.kwargs.get("data"), resp.status, response,
resp.headers, resp.request_info.headers, elapsed=cost,
cookies=cookie
)
@staticmethod
async def get_resp(resp):
try:
data = await resp.json(encoding='utf-8')
return json.dumps(data, ensure_ascii=False, indent=4)
except:
data = await resp.text()
return data
@staticmethod
def get_request_data(body):
request_body = body
if isinstance(body, bytes):
request_body = request_body.decode()
if isinstance(request_body, str) or request_body is None:
return request_body
return json.dumps(request_body, ensure_ascii=False)
@staticmethod
async def collect(status, request_data, status_code=200, response=None, response_headers=None,
request_headers=None, cookies=None, elapsed=None, msg="success"):
"""
收集http返回数据
:param status: 请求状态
:param request_data: 请求入参
:param status_code: 状态码
:param response: 相应
:param response_headers: 返回header
:param request_headers: 请求header
:param cookies: cookie
:param elapsed: 耗时
:param msg: 报错信息
:return:
"""
request_headers = json.dumps({k: v for k, v in request_headers.items()} if request_headers is not None else {},
ensure_ascii=False)
response_headers = json.dumps(
{k: v for k, v in response_headers.items()} if response_headers is not None else {},
ensure_ascii=False)
cookies = {k: v for k, v in cookies.items()} if cookies is not None else {}
cookies = json.dumps(cookies, ensure_ascii=False)
return {
"status": status, "response": response, "status_code": status_code,
"request_data": AsyncRequest.get_request_data(request_data),
"response_headers": response_headers, "request_headers": request_headers,
"msg": msg, "cost": elapsed, "cookies": cookies,
}
```
#### File: app/routers/__init__.py
```python
from fastapi import Header
from starlette import status
from app.excpetions.RequestException import AuthException, PermissionException
from app.middleware.Jwt import UserToken
from config import Config
FORBIDDEN = "对不起, 你没有足够的权限"
class Permission:
def __init__(self, role: int = Config.GUEST):
self.role = role
def __call__(self, token: str = Header(...)):
if not token:
raise AuthException(status.HTTP_200_OK, "用户信息身份认证失败, 请检查")
try:
user_info = UserToken.parse_token(token)
if user_info.get("role", 0) < self.role:
raise PermissionException(status.HTTP_200_OK, FORBIDDEN)
except PermissionException as e:
raise e
except Exception as e:
raise AuthException(status.HTTP_200_OK, str(e))
return user_info
``` |
{
"source": "01xz/approx-vision",
"score": 2
} |
#### File: pipelines/CLI/pipeline.py
```python
from PIL import Image
import ast
import argparse
import ConfigParser
import os
import subprocess, sys, shutil
import pdb
CONFIG_FILE = "/approx-vision/pipelines/CLI/config/config.ini"
IMAGE_TEMP_IN = "/approx-vision/pipelines/CLI/temp/temp_in.png"
IMAGE_TEMP_OUT = "/approx-vision/pipelines/CLI/temp/output/output.png"
'''
Module for running input image through image processing pipeline.
Options:
path to input image file
path to output image file
path to camera model
wb index
num of control points
pipeline version #
pipeline version stages
path to pipeline source cpp code
True to build pipeline source cpp code, False otherwise
'''
class Pipeline(object):
def __init__(self, in_file_path, out_file_path, cam_model_path, \
wb_index, num_ctrl_pts, version, stages,
pipeline_path, build ):
self.in_file_path = in_file_path
self.out_file_path = out_file_path
self.cam_model_path = cam_model_path
self.wb_index = wb_index
self.num_ctrl_pts = num_ctrl_pts
self.version = version
self.stages = stages
self.pipeline_path = pipeline_path
self.pipeline_folder = os.path.dirname(pipeline_path)
self.pipeline_obj_path = pipeline_path[:-3] + "o"
self.build = build
def run(self):
# print "\nStarted running pipeline version # " + str(self.version)
try:
# build pipeline source files
if self.build:
command = ["make", "--directory", self.pipeline_folder]
subprocess.call(" ".join(command), shell=True)
# create temp image file
# img_filename = os.path.basename(self.in_file_path)
# img = Image.open(self.in_file_path)
# img.save(IMAGE_TEMP_IN)
# change working dir
os.chdir(self.pipeline_folder)
# call pipeline
command = [self.pipeline_obj_path, self.in_file_path, self.out_file_path]
command.extend([self.cam_model_path, self.wb_index, self.num_ctrl_pts])
command.append(self.stages)
print " ".join(command)
subprocess.call(" ".join(command), shell=True)
# move output to output folder
# out_file_path = os.path.join(self.out_dir_path, img_filename)
# temp_out_file_path = IMAGE_TEMP_OUT + "output.png"
# img = Image.open(temp_out_file_path)
# img.save(out_file_path)
# command = ["rm", "-rf", IMAGE_TEMP_IN, temp_out_file_path ]
# subprocess.call(" ".join(command), shell=True)
print "Finished converting " + str(self.in_file_path) + " under V" + str(self.version)
except Exception as e:
print "Failed to run pipeline: "
print e
###############################################################################
'''
Command line tool for converting image data via defined image pipeline
argv:
path to input image file
path to output image directory
path to camera model
wb index
num of control points
version #
'''
if __name__ == "__main__":
# config parsing
config = ConfigParser.ConfigParser()
config.read(CONFIG_FILE)
cam_model_path = config.get("default", "cam_model_path")
wb_index = config.get("default", "wb_index")
num_ctrl_pts = config.get("default", "num_ctrl_pts")
pipeline_path = config.get("default", "pipeline_path")
# args
parser = argparse.ArgumentParser(description="Runs Pipeline")
parser.add_argument("--build",
default="True",
help="'True' to compile pipeline source files, otherwise set to 'False'")
parser.add_argument("--infile",
default=IMAGE_TEMP_IN,
help="Filepath to input image file")
parser.add_argument("--outfile",
default=IMAGE_TEMP_OUT,
help="Filepath to output image file")
parser.add_argument("--version",
default=1,
help="Pipeline version to run")
parser.add_argument("--campath",
default=cam_model_path,
help="Path to camera model")
parser.add_argument("--wb",
default=wb_index,
help="White balance index")
parser.add_argument("--ctrl",
default=num_ctrl_pts,
help="Number of control points")
args = parser.parse_args()
version_stages = config.get("version", "V"+str(args.version))
version_stages = " ".join(version_stages.split("/"))
pipeline = Pipeline(args.infile, args.outfile, args.campath, args.wb, args.ctrl, \
args.version, version_stages, pipeline_path, \
ast.literal_eval(args.build))
pipeline.run()
``` |
{
"source": "020101sha/Soluciones",
"score": 3
} |
#### File: soluciones/simulador_cuotas_cronograma/main.py
```python
from models import Cliente, Cuota, Cronograma
from utils import convertir_fecha, show
# pip install prototools
from prototools import float_input, choice_input, date_input, main_loop
def _registrar_cliente():
cliente = Cliente(
apellidos=input("Ingresar apellidos: "),
nombres=input("Ingresar nombres: "),
dni=input("Ingresar DNI: "),
)
return cliente
def _registrar_cuota():
cuota = Cuota(
monto_prestamo=float_input("Préstamo: ", min=5_000, max=30_000),
dias_pago=choice_input(("15", "25"), "Días de pago: "),
fecha=convertir_fecha(date_input("de desembolso", "-")),
)
return cuota
def procesar():
cliente = _registrar_cliente()
cuota = _registrar_cuota()
cronograma = Cronograma(cliente, cuota)
show(cronograma.data)
if __name__ == "__main__":
main_loop(procesar)
``` |
{
"source": "0201shj/Python-OpenCV",
"score": 3
} |
#### File: 0201shj/Python-OpenCV/1005.py
```python
import cv2
import numpy as np
#1
roi = None
drag_start = None
mouse_status = 0
tracking_start = False
def onMouse(event, x, y, flags, param=None):
global roi
global drag_start
global mouse_status
global tracking_start
if event == cv2.EVENT_LBUTTONDOWN:
drag_start = (x, y)
mouse_status = 1
tracking_start = False
elif event == cv2.EVENT_MOUSEMOVE:
if flags == cv2.EVENT_FLAG_LBUTTON:
xmin = min(x, drag_start[0])
ymin = min(y, drag_start[1])
xmax = max(x, drag_start[0])
ymax = max(y, drag_start[1])
roi = (xmin, ymin, xmax, ymax)
mouse_status = 2 # dragging
elif event == cv2.EVENT_LBUTTONUP:
mouse_status = 3 # complete
#2
cv2.namedWindow('tracking')
cv2.setMouseCallback('tracking', onMouse)
cap = cv2.VideoCapture('./data/checkBoard3x3.avi')
if (not cap.isOpened()):
print('Error opening video')
height, width = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
roi_mask = np.zeros((height, width), dtype=np.uint8)
params = dict(maxCorners=16,qualityLevel=0.001,minDistance=10,blockSize=5)
term_crit = (cv2.TERM_CRITERIA_MAX_ITER+cv2.TERM_CRITERIA_EPS,10,0.01)
params2 = dict(winSize= (5,5), maxLevel = 3, criteria = term_crit)
#3
t = 0
while True:
ret, frame = cap.read()
if not ret: break
t+=1
print('t=',t)
imgC = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
imgC = cv2.GaussianBlur(imgC, (5, 5), 0.5)
#3-1
if mouse_status==2:
x1, y1, x2, y2 = roi
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
#3-2
if mouse_status==3:
print('initialize....')
mouse_status = 0
x1, y1, x2, y2 = roi
roi_mask[:,:] = 0
roi_mask[y1:y2, x1:x2] = 1
p1 = cv2.goodFeaturesToTrack(imgC,mask=roi_mask,**params)
if len(p1)>=4:
p1 = cv2.cornerSubPix(imgC, p1, (5,5),(-1,-1), term_crit)
rect = cv2.minAreaRect(p1)
box_pts = cv2.boxPoints(rect).reshape(-1,1,2)
tracking_start = True
#3-3
if tracking_start:
p2,st,err= cv2.calcOpticalFlowPyrLK(imgP,imgC,p1,None,**params2)
p1r,st,err=cv2.calcOpticalFlowPyrLK(imgC,imgP,p2,None,**params2)
d = abs(p1-p1r).reshape(-1, 2).max(-1)
stat = d < 1.0 # 1.0 is distance threshold
good_p2 = p2[stat==1].copy()
good_p1 = p1[stat==1].copy()
for x, y in good_p2.reshape(-1, 2):
cv2.circle(frame, (x, y), 3, (0,0,255), -1)
if len(good_p2)<4:
continue
H, mask = cv2.findHomography(good_p1, good_p2, cv2.RANSAC, 3.0)
box_pts = cv2.perspectiveTransform(box_pts, H)
cv2.polylines(frame,[np.int32(box_pts)],True,(255,0, 0),2)
p1 = good_p2.reshape(-1,1,2)
#3-4
cv2.imshow('tracking',frame)
imgP = imgC.copy()
key = cv2.waitKey(25)
if key == 27:
break
if cap.isOpened():
cap.release();
cv2.destroyAllWindows()
```
#### File: 0201shj/Python-OpenCV/1010.py
```python
import cv2
import numpy as np
'''
ref:
http://www.morethantechnical.com/2011/06/17/simple-kalman-filter-for-tracking-using-opencv-2-2-w-code/
'''
#1
def onMouse(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
param[0][:,:] = 0 # clear image
param[1][0] = x # mouse point z
param[1][1] = y
#2
frame = np.zeros((512,512,3), np.uint8)
z = np.zeros((2,1), np.float32) # measurement
cv2.namedWindow('<NAME>')
cv2.setMouseCallback('<NAME>',onMouse, [frame, z])
#3
q = 1e-5 # process noise covariance
r = 0.01 # measurement noise covariance, r = 1
KF = cv2.KalmanFilter(4,2,0)
KF.transitionMatrix = np.array([[1,0,1,0],
[0,1,0,1],
[0,0,1,0],
[0,0,0,1]], np.float32) # A
KF.measurementMatrix = np.array([[1,0,0,0],
[0,1,0,0]],np.float32) # H
KF.processNoiseCov = q* np.eye(4, dtype=np.float32) # Q
KF.measurementNoiseCov = r* np.eye(2, dtype=np.float32) # R
#4 initial value
KF.errorCovPost = np.eye(4, dtype=np.float32) # P0 = I
KF.statePost = np.zeros((4, 1), dtype=np.float32) # x0 = 0
last_z = z.copy()
last_estimate = KF.statePost.copy()
#5
while True:
predict = KF.predict()
estimate =KF.correct(z)
x1, y1 = np.int0(last_z)
x2, y2 = np.int0(z)
cv2.line(frame, (x1, y1),(x2, y2), (0,0,255), 2 )
x1, y1,_, _ = np.int0(last_estimate)
x2, y2, _, _ = np.int0(estimate)
cv2.line(frame, (x1, y1),(x2, y2), (255,0,0), 2 )
cv2.imshow('<NAME>',frame)
last_z = z.copy()
last_estimate = estimate.copy()
key = cv2.waitKey(30)
if key == 27: break
cv2.destroyAllWindows()
```
#### File: Python-OpenCV/Chapter02/0211.py
```python
import cv2
import matplotlib.pyplot as plt
#1
def handle_key_press(event):
if event.key == 'escape':
cap.release()
plt.colse()
def handle_close(evt):
print('Close figure!')
cap.release()
#2 프로그램 시작
cap = cv2.VideoCapture(0) #0번카메라
plt.ion() #대화모드 설정
fig = plt.figure(figsize = (10, 6)) # fig.set_size_inches(10,6)
plt.axis('off')
#ax = fig.gca()
#ax.set_axis_off()
fig.canvas.set_window_title('Video Capture')
fig.canvas.mpl_connect('key_press_event', handle_key_press)
fig.canvas.mpl_connect('close_event', handle_close)
retval, frame = cap.read() #첫 프레임 캡처
im = plt.imshow(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
#3
while True:
retval, frame = cap.read()
if not retval:
break
# plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BRG2RGB))
# im.set_array(cv2.cvtColor(frame, cv2.COLOR_BRG2RGB))
fig.canvas.draw()
# fig.canvas.draw_idle()
fig.canvas.flush_events() # plt.pause(0.001)
if cap.isOpened():
cap.release()
```
#### File: Python-OpenCV/Chapter02/0212.py
```python
import cv2
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#프로그램 시작
cap = cv2.VideoCapture(0)
fig = plt.figure(figsize =(10,6)) #fig.set_size_inches(10,6)
fig.canvas.set_window_title('Video Capture')
plt.axis('off')
def init():
global im
retval, frame = cap.read()
im = plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
## return im,
def updateFrame(k):
retval, frame = cap.read()
if retval:
im.set_array(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
ani = animation.FuncAnimation(fig, updateFrame, init_func= init, interval = 50)
plt.show()
if cap.isOpened():
cap.release()
```
#### File: Python-OpenCV/Chapter08/0820.py
```python
import cv2
import numpy as np
#1
def rectSum(sumImage, rect):
x, y, w, h = rect
a = sumImage[y, x]
b = sumImage[y, x+w]
c = sumImage[y+h, x]
d = sumImage[y+h, x+w]
return a + d - b - c
def compute_Haar_feature1(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f1 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, rows-y+1):
for w in range(1, (cols-x)//2+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x+w,y, w, h))
f1.append([1, x, y, w, h, s1-s2])
return f1
def compute_Haar_feature2(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f2 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, (rows-y)//2+1):
for w in range(1, cols-x+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x,y+h, w, h))
f2.append([2, x, y, w, h, s2-s1])
return f2
def compute_Haar_feature3(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f3 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, rows-y+1):
for w in range(1, (cols-x)//3+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x+w, y, w, h))
s3 = rectSum(sumImage, (x+2*w,y, w, h))
f3.append([3, x, y, w, h, s1-s2+s3])
return f3
def compute_Haar_feature4(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f4 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, (rows-y)//3+1):
for w in range(1, cols-x+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x,y+h, w, h))
s3 = rectSum(sumImage, (x,y+2*h, w, h))
f4.append([4, x, y, w, h, s1-s2+s3])
return f4
def compute_Haar_feature5(sumImage):
rows, cols = sumImage.shape
rows -= 1
cols -= 1
f5 = []
for y in range(0, rows):
for x in range(0, cols):
for h in range(1, (rows-y)//2+1):
for w in range(1, (cols-x)//2+1):
s1 = rectSum(sumImage, (x, y, w, h))
s2 = rectSum(sumImage, (x+w,y, w, h))
s3 = rectSum(sumImage, (x, y+h, w, h))
s4 = rectSum(sumImage, (x+w,y+h, w, h))
f5.append([5, x, y, w, h, s1-s2-s3+s4])
return f5
#2
gray = cv2.imread('./data/lenaFace24.jpg', cv2.IMREAD_GRAYSCALE) # 24 x 24
gray_sum = cv2.integral(gray)
f1 = compute_Haar_feature1(gray_sum)
n1 = len(f1)
print('len(f1)=',n1)
for i, a in enumerate(f1[:2]):
print('f1[{}]={}'.format(i, a))
#3
f2 = compute_Haar_feature2(gray_sum)
n2 = len(f2)
print('len(f2)=',n2)
for i, a in enumerate(f2[:2]):
print('f2[{}]={}'.format(i, a))
#4
f3 = compute_Haar_feature3(gray_sum)
n3 = len(f3)
print('len(f3)=',n3)
for i, a in enumerate(f3[:2]):
print('f3[{}]={}'.format(i, a))
#5
f4 = compute_Haar_feature4(gray_sum)
n4 = len(f4)
print('len(f4)=',n4)
for i, a in enumerate(f4[:2]):
print('f4[{}]={}'.format(i, a))
#6
f5 = compute_Haar_feature5(gray_sum)
n5 = len(f5)
print('len(f5)=',n5)
for i, a in enumerate(f5[:2]):
print('f5[{}]={}'.format(i, a))
print('total features =', n1+n2+n3+n4+n5)
```
#### File: Python-OpenCV/Chapter09/0915.py
```python
import cv2
import numpy as np
#1
src1 = cv2.imread('./data/book1.jpg')
src2 = cv2.imread('./data/book2.jpg')
img1= cv2.cvtColor(src1,cv2.COLOR_BGR2GRAY)
img2= cv2.cvtColor(src2,cv2.COLOR_BGR2GRAY)
#2
surF = cv2.xfeatures2d.SURF_create()
kp1, des1 = surF.detectAndCompute(img1, None)
kp2, des2 = surF.detectAndCompute(img2, None)
print('len(kp1)=', len(kp1))
print('len(kp2)=', len(kp2))
#3
##bf = cv2.BFMatcher()
##matches = bf.radiusMatch(des1,des2, maxDistance=0.3)
flan = cv2.FlannBasedMatcher_create()
matches = flan.radiusMatch(des1,des2, maxDistance=0.3)
#4
def draw_key2image(kp, img):
x, y = kp.pt
size = kp.size
rect = ((x, y), (size, size), kp.angle)
box = cv2.boxPoints(rect).astype(np.int32)
cv2.polylines(img, [box], True, (0,255,0), 2)
cv2.circle(img, (round(x), round(y)), round(size/2), (255,0,0), 2)
## return img
for i, radius_match in enumerate(matches):
if len(radius_match) != 0:
print('i=', i)
print('len(matches[{}])={}'.format(i,len(matches[i])))#len(radius_match)
src1c = src1.copy()
draw_key2image(kp1[radius_match[0].queryIdx], src1c)
src2c = src2.copy()
for m in radius_match:
draw_key2image(kp2[m.trainIdx], src2c)
dst = cv2.drawMatches(src1c,kp1,src2c,kp2,radius_match, None,flags=2)
cv2.imshow('dst', dst)
cv2.waitKey()
cv2.waitKey()
cv2.destroyAllWindows()
``` |
{
"source": "0202zc/my-subscription",
"score": 2
} |
#### File: crawlers/beta/process_util.py
```python
import os
import sys
import logging
from datetime import datetime
import covid19_spider as covid19
import spider_customize as customize
import spider_hot as hot
import weibo_spider as weibo
import zhihu_spider as zhihu
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='log' + str(datetime.now().strftime("%Y%m%d%H%M%S")) + '.txt',
filemode='a')
def my_listener(event):
if event.exception:
print('任务出错了!!!!!!')
else:
print('任务照常运行...')
# 任务
def job_weibo():
print("<job_weibo> - " + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
weibo.main()
def job_zhihu():
print("<job_zhihu> - " + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
zhihu.main()
def job_hot():
print("<job_hot> - " + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
local_time = datetime.now().strftime("%H:%M")
hot.hot(str(local_time)[:2] + ':00:00')
def job_covid19():
print("<job_covid19> - " + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
covid19.process()
def job_customize():
print("<job_customize> - " + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
local_time = datetime.now().strftime("%H:%M")
customize.process(str(local_time)[:2] + ':00:00')
def my_job(text):
print(text)
if __name__ == '__main__':
# 允许两个job同时执行
job_defaults = {'max_instances': 2}
# BlockingScheduler 会阻塞进程,不能执行start()后面的程序
scheduler = BlockingScheduler()
# 不会阻塞进程,可以执行start()后面的程序
# scheduler = BackgroundScheduler()
scheduler.add_job(func=job_hot, trigger='cron', day_of_week='0-6', hour='8,12,20', minute='00', id='job_hot',
misfire_grace_time=60)
scheduler.add_job(func=job_customize, trigger='cron', day_of_week='0-6', hour='8,12,20', id='job_customize',
misfire_grace_time=60)
scheduler.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
jobs = scheduler.get_jobs()
print(jobs)
scheduler.start()
```
#### File: crawlers/beta/zhihu_spider.py
```python
import demjson
import os
import re
import requests
import sys
import time
import urllib.parse
from bs4 import BeautifulSoup
from mail_assist import send_mail_with_time
from apscheduler.schedulers.blocking import BlockingScheduler
from datetime import datetime
url = "https://www.zhihu.com/topsearch"
file_path = 'C:/Users/Administrator/Desktop/runnable/beta/customize/zhihu_spider.py'
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36'
}
def get_soup(url):
print("Getting soup...")
html = get_HTML_text(url)
soup = BeautifulSoup(html, "html.parser")
return soup
def get_HTML_text(url):
try:
r = requests.get(url, headers=header)
while r.status_code != 200:
time.sleep(600)
r = requests.get(url, headers=header)
print(r.status_code)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except Exception as e:
print(e)
return e
# 单独发送
def process():
data = prettify()
local_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
local_time = datetime.now().strftime("%H:%M:%S")
print(local_datetime)
send_mail_with_time("知乎热搜 " + local_time, data, send_time=(str(local_time)[:2] + ':00:00'),
service_name='知乎热搜榜')
def prettify():
soup = get_soup(url).findAll("script", id="js-initialData")
script_text = str(soup[0])
rule = r'"topsearch":(.*?),"requestHashId"'
result = re.findall(rule, script_text)
temp = result[0].replace("false", '"False"').replace("true", '"True"') + "}"
data_dict = demjson.decode(temp)['data']
content = """
<table border="1" cellspacing="0">
<tr>
<th align="center">排行</th>
<th align="center">标题</th>
</tr>
"""
rank = 1
for data in data_dict:
content += '<tr><td align="center">' + str(rank) + "</td>"
content += '<td><a href="https://www.zhihu.com/search?type=content&q=' + urllib.parse.quote(
data['realQuery']) + '" target="_blank">' + data['queryDisplay'] + "</a><br>" + data[
'queryDescription'] + "</td></tr>"
rank += 1
content += "</table>"
return content
if __name__ == '__main__':
print("Running...")
content = prettify()
``` |
{
"source": "0206pdh/TradingGym",
"score": 3
} |
#### File: TradingGym/environments/read_config.py
```python
import json
class EnvConfig():
"""environment configuration from json file
tgym requires you configure your own parameters in json file.
Args:
config_file path/file.json
"""
def __init__(self,config_file ='./data/config/gdbusd-test-1.json'):
self.config = {}
with open(config_file) as j:
self.config = json.load(j)
def env_parameters(self,item=''):
"""environment variables
"""
if item:
return self.config["env"][item]
else:
return self.config["env"]
def symbol(self, asset="GBPUSD", item='') :
"""get trading pair (symbol) information
Args:
asset (str, optional): symbol in config. Defaults to "GBPUSD".
item (str, optional): name of item, if '' return dict, else return item value. Defaults to ''.
Returns:
[type]: [description]
"""
if item:
return self.config["symbol"][asset][item]
else:
return self.config["symbol"][asset]
def trading_hour(self,place="New York"):
"""forex trading hour from different markets
Args:
place (str, optional): [Sydney,Tokyo,London] Defaults to "New York".
Returns:
[dict]: from time, to time
"""
if place:
return self.config["trading_hour"][place]
else:
return self.config["trading_hour"]
if __name__ == '__main__':
cf = EnvConfig()
print(f'{cf.env_parameters()}')
print(cf.env_parameters("observation_list"))
print(f'asset_col: {cf.env_parameters()["asset_col"]}')
print(cf.symbol(asset="GBPUSD")["point"])
print(f'trading hour new york: {cf.trading_hour("new york")}')
``` |
{
"source": "02221/Hands-On-Data-Analysis-with-Pandas-2nd-edition",
"score": 4
} |
#### File: Hands-On-Data-Analysis-with-Pandas-2nd-edition/ch_06/color_utils.py
```python
import re
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
def hex_to_rgb_color_list(colors):
"""
Take color or list of hex code colors and convert them
to RGB colors in the range [0,1].
Parameters:
- colors: Color or list of color strings of the format
'#FFF' or '#FFFFFF'
Returns:
The color or list of colors in RGB representation.
"""
if isinstance(colors, str):
colors = [colors]
for i, color in enumerate(
[color.replace('#', '') for color in colors]
):
hex_length = len(color)
if hex_length not in [3, 6]:
raise ValueError(
'Colors must be of the form #FFFFFF or #FFF'
)
regex = '.' * (hex_length // 3)
colors[i] = [
int(val * (6 // hex_length), 16) / 255
for val in re.findall(regex, color)
]
return colors[0] if len(colors) == 1 else colors
def blended_cmap(rgb_color_list):
"""
Created a colormap blending from one color to the other.
Parameters:
- rgb_color_list: A list of colors represented as [R, G, B]
values in the range [0, 1], like [[0, 0, 0], [1, 1, 1]],
for black and white, respectively.
Returns:
A matplotlib `ListedColormap` object
"""
if not isinstance(rgb_color_list, list):
raise ValueError('Colors must be passed as a list.')
elif len(rgb_color_list) < 2:
raise ValueError('Must specify at least 2 colors.')
elif (
not isinstance(rgb_color_list[0], list)
or not isinstance(rgb_color_list[1], list)
) or (
len(rgb_color_list[0]) != 3 or len(rgb_color_list[1]) != 3
):
raise ValueError(
'Each color should be represented as a list of size 3.'
)
N, entries = 256, 4 # red, green, blue, alpha
rgbas = np.ones((N, entries))
segment_count = len(rgb_color_list) - 1
segment_size = N // segment_count
remainder = N % segment_count # need to add this back later
for i in range(entries - 1): # we don't alter alphas
updates = []
for seg in range(1, segment_count + 1):
# determine how much needs to be added back to account for remainders
offset = 0 if not remainder or seg > 1 else remainder
updates.append(np.linspace(
start=rgb_color_list[seg - 1][i],
stop=rgb_color_list[seg][i],
num=segment_size + offset
))
rgbas[:,i] = np.concatenate(updates)
return ListedColormap(rgbas)
def draw_cmap(cmap, values=np.array([[0, 1]]), **kwargs):
"""
Draw a colorbar for visualizing a colormap.
Parameters:
- cmap: A matplotlib colormap
- values: The values to use for the colormap, defaults to [0, 1]
- kwargs: Keyword arguments to pass to `plt.colorbar()`
Returns:
A matplotlib `Colorbar` object, which you can save with:
`plt.savefig(<file_name>, bbox_inches='tight')`
"""
img = plt.imshow(values, cmap=cmap)
cbar = plt.colorbar(**kwargs)
img.axes.remove()
return cbar
```
#### File: visual-aids/visual_aids/sim_viz.py
```python
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
def show_distributions():
"""Generate a plot for each of distributions used in the simulation."""
fig, axes = plt.subplots(2, 3, figsize=(15, 10))
axes = axes.flatten()
fig.delaxes(axes[-2])
# triangular distribution defined by min (a), max (b) and mode
a, b, mode = 1.5, 5, 2.75
peak = 2 / (b - a)# peak of PDF is at 2/(b-a)
axes[0].plot([a, mode, b], [0, peak, 0])
axes[0].set_title('Triangular PDF')
axes[0].set_xlabel('x')
axes[0].set_ylabel('density')
axes[0].annotate('min', xy=(a, 0), xytext=(a + 1, 0), arrowprops=dict(arrowstyle='->'))
axes[0].annotate('max', xy=(b, 0), xytext=(b - 1.25, 0), arrowprops=dict(arrowstyle='->'))
axes[0].annotate('peak', xy=(mode, peak), xytext=(mode - 0.2, peak - 0.2), arrowprops=dict(arrowstyle='->'))
# uniform distribution defined by min (a) and max (b)
a, b = 0, 1
peak = 1 / (b - a)
axes[1].plot([a, a, b, b], [0, peak, peak, 0])
axes[1].set_title('Uniform PDF')
axes[1].set_ylabel('density')
axes[1].set_xlabel('x')
axes[1].annotate('min', xy=(a, peak), xytext=(a + 0.2, peak - 0.2), arrowprops=dict(arrowstyle='->'))
axes[1].annotate('max', xy=(b, peak), xytext=(b - 0.3, peak - 0.2), arrowprops=dict(arrowstyle='->'))
axes[1].set_ylim(0, 1.5)
# gaussian
mu, sigma = 1.01, 0.01
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
axes[2].plot(x, stats.norm.pdf(x, mu, sigma) / 100)
axes[2].set_title('Gaussian PDF')
axes[2].set_ylabel('density')
axes[2].set_xlabel('x')
axes[2].annotate(r'$\mu$', xy=(mu, 0.4), xytext=(mu - 0.001, 0.3), arrowprops=dict(arrowstyle='->'))
axes[2].annotate(
'', xy=(mu-sigma, 0.25), xytext=(mu + 0.01, 0.25),
arrowprops=dict(arrowstyle='|-|, widthB=0.5, widthA=0.5')
)
axes[2].annotate(r'$2\sigma$', xy=(mu - 0.002, 0.22))
# exponential
x = np.linspace(0, 5, 100)
axes[3].plot(x, stats.expon.pdf(x, scale=1/3))
axes[3].set_title('Exponential PDF')
axes[3].set_ylabel('density')
axes[3].set_xlabel('x')
axes[3].annotate(r'$\lambda$ = 3', xy=(0, 3), xytext=(0.5, 2.8), arrowprops=dict(arrowstyle='->'))
# Poisson PMF (probability mass function) because this is a discrete random variable
x = np.arange(0, 10)
axes[5].plot(x, stats.poisson.pmf(x, mu=3), linestyle='--', marker='o')
axes[5].set_title('Poisson PMF')
axes[5].set_ylabel('mass')
axes[5].set_xlabel('x')
axes[5].annotate(r'$\lambda$ = 3', xy=(3, 0.225), xytext=(1.9, 0.2), arrowprops=dict(arrowstyle='->'))
plt.suptitle('Understanding the distributions used for the simulation', fontsize=15, y=0.95)
return axes
``` |
{
"source": "02ayoub02/sdia-python",
"score": 4
} |
#### File: sdia_python/lab2/ball_window.py
```python
import numpy as np
from math import gamma
class BallWindow:
"""class BallWindow contains balls defined by centers and radius"""
def __init__(self, center, R):
"""initialization
Args:
center (array): the center
R (float): radius of the ball
"""
try:
assert R >= 0
except:
print("Please submit a positive radius")
try:
# This will detect problems with center
assert len(center) > 0
except:
print("Please submit a valid center")
self.center = np.array(center, dtype = np.float32)
self.R = R
def __str__(self):
""" print the ball
Returns:
str: BallWindow: center=..., radius=...
"""
float_formatter = "{:.2f}".format
np.set_printoptions(formatter={"float_kind": float_formatter})
return (
"BallWindow: "
+ "center="
+ str(self.center)
+ ", radius="
+ str("%.2f" % round(self.R, 2))
)
def indicator(self, point):
r"""True if the point in the ball
Args:
point (list): point
Returns:
bool: True if the point in the ball
"""
try:
assert self.dimension() == len(point)
except:
print("dimension error")
return np.sum((point - self.center) ** 2) <= self.R ** 2
# s = 0
# for i in range(self.dimension):
# s += (point[i] - self.center[i]) ** 2
# if s <= self.radius ** 2:
# return True
# return False
def dimension(self):
"""the dimension of the ball
Returns:
int: dimension
"""
return len(self.center)
def volume(self):
"""The volume of the ball
Returns:
float: volume of the ball
"""
return (
(np.pi ** (self.dimension() / 2))
* (self.R ** self.dimension())
/ (gamma(self.dimension() / 2 + 1))
)
class UnitBallWindow(BallWindow):
def __init__(self, center):
super().__init__(center, R=1)
``` |
{
"source": "02ChenBo/python-websocket-server",
"score": 3
} |
#### File: 02ChenBo/python-websocket-server/server.py
```python
from websocket_server import WebsocketServer
# Called for every client connecting (after handshake)
def new_client(client, server):
print("New client connected and was given id %d" % client['id'])
# server.send_message_to_all("a new client...")
server.send_message(client, "请问有什么可以帮到您?")
# Called for every client disconnecting
def client_left(client, server):
print("Client(%d) disconnected" % client['id'])
# Called when a client sends a message
def message_received(client, server, message):
if len(message) > 200:
message = message[:200] + '..'
print("Client(%d)_address%s said: %s" % (client['id'], client['address'], message))
server.send_message(client, 'Received your message:'+message)
PORT = 9003
server = WebsocketServer(PORT, host="192.168.50.70")
server.set_fn_new_client(new_client)
server.set_fn_client_left(client_left)
server.set_fn_message_received(message_received)
server.run_forever()
``` |
{
"source": "02infiinity/dash-docs",
"score": 3
} |
#### File: tutorial/examples/dynamic_content.py
```python
import dash
from dash.dependencies import Event, Output
import dash_html_components as html
app = dash.Dash(__name__)
app.config.suppress_callback_exceptions = True
app.layout = html.Div([
html.Button('Click to load children', id='display-children-button'),
html.Div('-', id='display-children')
])
# When you click the button, this content gets loaded
@app.callback(
Output('display-children', 'children'),
events=[Event('display-children-button', 'click')])
def render():
return html.Div([
html.H3('Hello Dash')
])
if __name__ == '__main__':
app.run_server(debug=True)
```
#### File: tutorial/examples/graph_callbacks_crossfiltering.py
```python
import math
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
app = dash.Dash(__name__)
# Get data
df = pd.read_csv(
'https://raw.githubusercontent.com/'
'plotly/datasets/master/'
'gapminderDataFiveYear.csv')
# Get a list of unique years in the dataframe
years = sorted(list(df.year.unique()))
app.layout = html.Div([
html.Div([
html.Div([
dcc.Graph(id='graph-left'),
], className='six columns'),
html.Div([
dcc.Graph(id='graph-right')
], className='six columns')
], className='row'),
dcc.Slider(
id='year-slider',
marks={
i: str((str(i) if (i-2) % 10 == 0 else ''))
for i in years
},
value=1952, min=years[0], max=years[-1]
)
])
# Common figure generation function shared by both callbacks
def create_figure(year, selectedData, hoverData, yaxis_column):
if selectedData is None:
selectedData = {'points': []}
if hoverData is None:
hoverData = {'points': []}
filtered_countries = set([
point['customdata']
for point in selectedData['points'] + hoverData['points']
])
filtered_df = df[df.year == year]
traces = []
for i, continent in enumerate(df.continent.unique()):
continent_df = filtered_df[filtered_df.continent == continent]
traces.append({
'x': continent_df.gdpPercap,
'y': continent_df[yaxis_column],
'text': continent_df.country,
'customdata': continent_df.country,
'marker': {
'size': 10,
'opacity': [
1.0
if (j in filtered_countries or
len(filtered_countries) == 0)
else 0.3
for j in list(continent_df.country)
],
'line': {'width': 0.5, 'color': 'lightgrey'}
},
'name': continent,
'mode': 'markers'
})
return {
'data': traces,
'layout': {
'xaxis': {
'title': 'GDP per Capita', 'type': 'log',
'range': [math.log10(10), math.log10(120*1000)],
'autorange': False
},
'yaxis': {
'title': 'Life Expectancy',
'range': [20, 90], 'autorange': False
},
'annotations': [{
'x': 0, 'xref': 'paper', 'xanchor': 'left',
'y': 1, 'yref': 'paper', 'yancor': 'bottom',
'text': year,
'font': {'size': 16}, 'showarrow': False
}],
'legend': {
'x': 1, 'xanchor': 'right',
'y': 0, 'yanchor': 'bottom',
'bgcolor': 'rgba(255, 255, 255, 0.5)'
},
'margin': {'l': 40, 'r': 0, 't': 40, 'b': 40},
'hovermode': 'closest', 'dragmode': 'lasso'
}
}
@app.callback(
Output('graph-left', 'figure'),
[Input('year-slider', 'value'),
Input('graph-right', 'selectedData'),
Input('graph-right', 'hoverData')])
def filterScatterPlot(sliderValue, selectedData, hoverData):
figure = create_figure(sliderValue, selectedData, hoverData, 'lifeExp')
figure['layout']['yaxis'] = {
'title': 'Life Expectancy',
'range': [10, 90], 'autorange': False
}
return figure
@app.callback(
Output('graph-right', 'figure'),
[Input('year-slider', 'value'),
Input('graph-left', 'selectedData'),
Input('graph-left', 'hoverData')])
def filterScatterPlot(sliderValue, selectedData, hoverData):
figure = create_figure(sliderValue, selectedData, hoverData, 'pop')
figure['layout']['yaxis'] = {
'title': 'Population', 'type': 'log',
'range': [math.log10(100), math.log10(10*1000*1000*1000)],
'autorange': False
}
return figure
if __name__ == '__main__':
app.run_server(debug=True)
``` |
{
"source": "02JanDal/Flask-Consent",
"score": 2
} |
#### File: Flask-Consent/flask_consent/__init__.py
```python
import json
from collections import OrderedDict
from dataclasses import dataclass
from datetime import timedelta, datetime
from importlib.resources import read_text
from typing import Callable, Iterable, List, Set
from flask import current_app, render_template_string, render_template, request, jsonify, Flask, Response
from markupsafe import Markup
from .version import version as _version
__version__ = _version
@dataclass(frozen=True)
class ConsentCategory:
"""A "category" of consent, for example a group of cookies (or even just a single cookie) that belong together."""
name: str
title: str
description: str
default: bool
is_required: bool
class ConsentExtensionState:
def __init__(self, extension, app):
"""Used internally."""
self.extension = extension # type: Consent
self.app = app # type: Flask
@property
def full_template(self):
return self.app.config['CONSENT_FULL_TEMPLATE']
@property
def banner_template(self):
return self.app.config['CONSENT_BANNER_TEMPLATE']
@property
def contact_mail(self):
return self.app.config['CONSENT_CONTACT_MAIL']
@property
def cookie_name(self):
return self.app.config['CONSENT_COOKIE_NAME']
@property
def valid_for(self):
return timedelta(days=int(self.app.config['CONSENT_VALID_FOR_MONTHS']) / 12 * 365)
@property
def primary_servername(self):
val = self.app.config.get('CONSENT_PRIMARY_SERVERNAME', self.app.config.get('SERVER_NAME'))
assert val, 'you need to set CONSENT_PRIMARY_SERVERNAME or SERVER_NAME'
return val
def html(self):
primary_domain = self.primary_servername.split(':')[0]
if request.endpoint == 'flask_consent' or request.consent.is_stale():
return Markup(render_template_string(
read_text(__name__, 'injection.html'),
flask_consent_banner=self.extension._render_template_func(
self.banner_template,
flask_consent_contact_mail=self.contact_mail,
flask_consent_categories=self.extension.categories.values()),
flask_consent_contact_mail=self.contact_mail,
flask_consent_primary_domain=primary_domain,
flask_consent_domains=self.extension.domains + [primary_domain]
))
else:
return ''
class ConsentData:
def __init__(self, state: ConsentExtensionState):
"""This class contains the user facing API during a request. You can access it using request.consent."""
self._state = state
data = json.loads(request.cookies.get(self._state.cookie_name, '{}')) # type: dict
try:
self._last_updated = datetime.fromisoformat(data['last_updated'])
except (ValueError, KeyError):
self._last_updated = datetime.utcnow()
if self._state.cookie_name not in request.cookies or 'enabled' not in data:
self._enabled = {c.name for c in self._state.extension.categories.values() if c.default}
else:
self._enabled = set(data['enabled']) if isinstance(data['enabled'], list) else set()
self._dirty = False
def is_stale(self):
if self._state.cookie_name not in request.cookies:
return True
return (self._last_updated + self._state.valid_for) < datetime.utcnow()
def finalize(self, response):
if self._dirty:
response.set_cookie(self._state.cookie_name,
json.dumps(dict(
enabled=list(self._enabled),
last_updated=self._last_updated.isoformat()
)),
secure=not current_app.debug and not current_app.testing,
samesite='None',
max_age=int(self._state.valid_for.days * 24 * 60 * 60))
@property
def last_updated(self) -> datetime:
return self._last_updated
@property
def enabled(self) -> Set[str]:
return self._enabled
def __getitem__(self, key: (ConsentCategory, str)) -> bool:
"""
Lookup if the given consent category is enabled.
:param key: The consent category, either as a ConsentCategory object or the name as a string
:return: True if enabled, False if not
"""
if isinstance(key, ConsentCategory):
key = key.name
return key in self._enabled
def __setitem__(self, key: (ConsentCategory, str), value: bool):
"""
Set a consent category to be enabled or disabled
If an actual change was done we will send an updated Set-Cookie with the request.
:param key: The consent category, either as a ConsentCategory object or the name as a string
:param value: True if enabled, False if not
"""
if isinstance(key, ConsentCategory):
key = key.name
if value and key not in self._enabled:
self._enabled.add(key)
self._dirty = True
self._last_updated = datetime.utcnow()
elif not value and key in self._enabled:
self._enabled.remove(key)
self._dirty = True
self._last_updated = datetime.utcnow()
class Consent:
def __init__(self, app: Flask = None):
"""
This Flask extension handles multi-domain cookie consent.
When visiting a page we first check if we have a consent cookie for the current domain. If
not we send an AJAX request to `GET primary.domain/crossdomain/consent/` which returns consent
information for the cookies for the primary domain. If available that information is set on the
current domain through `POST current.domain/crossdomain/consent/`. This consent information
contains both the domains given consent for (all domains currently available) as well as what
consent was given.
If neither the current nor the primary domain contain consent information we ask the user. Upon
the users selection (either in the pop up or later) we send `POST <domain>/crossdomain/consent/`
for all domains.
"""
self._categories = OrderedDict()
self._domain_loader = lambda: []
self._render_template_func = render_template
self.app = app
if self.app:
self.init_app(app)
def domain_loader(self, func: Callable[[], Iterable[str]]):
"""
Register the method that returns the list of valid domain names
"""
self._domain_loader = func
@property
def domains(self) -> List[str]:
"""
Returns the list of valid domain names
"""
result = list(self._domain_loader())
if current_app.debug:
host_domain = request.headers['Host'].split('/')[-1].split(':')[0]
if host_domain == 'localhost':
result.append(request.headers['Host'])
return result
def set_render_template_func(self, f):
"""
Overrides the template rendering function used (normally flask.render_template).
Can be used to support themes or similar.
"""
self._render_template_func = f
def init_app(self, app: Flask):
app.config.setdefault('CONSENT_FULL_TEMPLATE', None)
app.config.setdefault('CONSENT_BANNER_TEMPLATE', None)
app.config.setdefault('CONSENT_CONTACT_MAIL', None)
app.config.setdefault('CONSENT_COOKIE_NAME', '_consent')
app.config.setdefault('CONSENT_VALID_FOR_MONTHS', 12)
app.config.setdefault('CONSENT_PRIMARY_SERVERNAME', app.config.get('SERVER_NAME', None))
app.config.setdefault('CONSENT_PATH', '/consent')
if 'consent' in app.extensions:
raise KeyError('It seems you have already registered this extension on this app')
app.extensions['consent'] = ConsentExtensionState(self, app)
app.add_url_rule(app.config['CONSENT_PATH'], 'flask_consent',
self._handle_consent_route, methods=('GET', 'POST'))
@app.context_processor
def context_processor():
return dict(flask_consent_code=self.state().html)
@app.before_request
def prepare_request():
request.consent = ConsentData(self.state())
@app.after_request
def finalize_request(response):
request.consent.finalize(response)
return response
@classmethod
def state(cls) -> ConsentExtensionState:
return current_app.extensions['consent']
def add_category(self, name: str, title: str, description: str,
default: bool, is_required: bool = False) -> ConsentCategory:
"""
Register a new category of consent
:param name: A name used to identify the category (e.g. preferences, analytics)
:param title: A human readable title for the category (i.e. Preferences, Analytics)
:param description: A human readable description on what these cookies are used for
:param default: The default value (pre-checked or not)
:param is_required: Whether allowing this category is required for the site to function or not
:return:
"""
self._categories[name] = ConsentCategory(name, title, description, default, is_required)
return self._categories[name]
def add_standard_categories(self):
"""
For getting started quickly you can use this function to add 3 common categories of cookies
"""
self.add_category(
'required',
'Required',
'These cookies are required for the site to function, like handling login (remembering who '
'you are logged in as between page visits).',
default=True, is_required=True)
self.add_category(
'preferences',
'Preferences',
'These cookies are used for convenience functionality, like saving local preferences you have made.',
default=True, is_required=False)
self.add_category(
'analytics',
'Analytics',
'These cookies are used to track your page visits across the site and record some basic information '
'about your browser. We use this information in order to see how our users are using the site, '
'allowing us to focus improvements.',
default=True, is_required=False)
@property
def categories(self) -> OrderedDict:
return self._categories
def _handle_consent_route(self):
if request.content_type == 'application/json':
def respond(status_code, **kwargs):
response: Response = jsonify(**kwargs)
response.status_code = status_code
response.headers['Access-Control-Allow-Credentials'] = 'true'
return response
if request.method == 'POST':
new = request.json
if not isinstance(new, list):
return respond(400, msg='payload is not a list')
for cat in new:
if cat not in self._categories:
return respond(400, msg='invalid consent category specified: ' + cat)
for cat in self._categories.keys():
request.consent[cat] = cat in new
return respond(200,
enabled=list(request.consent.enabled),
last_updated=request.consent.last_updated.isoformat())
else:
return self._render_template_func(
self.state().full_template,
flask_consent_categories=self._categories.values(),
flask_consent_contact_mail=self.state().contact_mail
)
``` |
{
"source": "02JanDal/Flask-CrossDomain-Session",
"score": 2
} |
#### File: Flask-CrossDomain-Session/flask_crossdomain_session/model.py
```python
from datetime import datetime
from secrets import token_hex
from enum import Enum
from typing import Any, Iterable, Optional, Type
from flask import Request
class SessionType(Enum):
cookie = 1
api = 2
class SessionMixin:
type: SessionType
token: str
ip: Optional[str]
user_agent: Optional[str]
user: Optional[Any]
data: dict
instances: Iterable["SessionInstanceMixin"]
def __init__(self, *args, **kwargs):
super(SessionMixin, self).__init__(*args, **kwargs)
def generate_token(self):
self.token = token_hex(32)
@classmethod
def find_by_token(cls, token: str, type_: SessionType = None): # pragma: no cover
raise NotImplementedError()
def save(self): # pragma: no cover
raise NotImplementedError()
def delete(self): # pragma: no cover
raise NotImplementedError()
def is_new(self): # pragma: no cover
raise NotImplementedError()
@classmethod
def commit(cls): # pragma: no cover
raise NotImplementedError()
def make_session_class(db, user_class):
class Session(SessionMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
type = db.Column(db.Enum(SessionType), nullable=False)
token = db.Column(db.String(128), unique=True, nullable=False)
ip = db.Column(db.String(64), nullable=True)
user_agent = db.Column(db.String(512), nullable=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'), nullable=True)
user = db.relationship(user_class)
data = db.Column(db.JSON, nullable=False)
instances = db.relationship('SessionInstance', back_populates='session',
cascade='all, delete-orphan', passive_deletes=True)
@classmethod
def find_by_token(cls, token: str, type_: SessionType = None):
if type_ is None:
return cls.query.filter_by(token=token).first()
else:
return cls.query.filter_by(token=token, type=type_).first()
def save(self):
db.session.add(self)
def delete(self):
if self.id:
db.session.delete(self)
else:
db.session.expunge(self)
def is_new(self):
return self.id is None
@classmethod
def commit(cls):
db.session.commit()
return Session
class SessionInstanceMixin:
created_at: datetime
domain: str
session: SessionMixin
session_class: Type[SessionMixin]
def __init__(self, *args, **kwargs):
super(SessionInstanceMixin, self).__init__(*args, **kwargs)
@classmethod
def find_by_session_and_domain(cls, session: SessionMixin, domain: str): # pragma: no cover
raise NotImplementedError()
@classmethod
def from_request(cls, app, request: Request, token=None, host=None, type_=None) -> "SessionInstanceMixin":
if token and not type_:
raise ValueError('need to provide type_ if token provided') # pragma: no cover
if not token:
if app.session_cookie_name in request.cookies:
token = request.cookies[app.session_cookie_name]
type_ = SessionType.cookie
elif 'Authorization' in request.headers and request.headers['Authorization'].startswith('Bearer '):
token = request.headers['Authorization'].split(' ')[1]
type_ = SessionType.api
else:
token = None
if not host:
host = request.host
domain = '.'.join(host.split(':')[0].split('.')[-2:])
session = cls.session_class.find_by_token(token, type_) if token else None
if token is None or session is None:
session = cls.session_class(ip=request.remote_addr or '',
user_agent=request.user_agent.string,
type=SessionType.cookie)
session.generate_token()
session.data = dict(_token=session.token)
session.save()
instance = None
else:
instance = cls.find_by_session_and_domain(session, domain)
if not instance:
instance = cls(session=session, created_at=datetime.utcnow(), domain=domain)
instance.save()
return instance
def save(self):
raise NotImplementedError() # pragma: no cover
def make_session_instance_class(db, sess_class):
class SessionInstance(SessionInstanceMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
session_id = db.Column(db.Integer, db.ForeignKey('session.id', ondelete='CASCADE'), nullable=False)
session = db.relationship('Session', back_populates='instances', lazy='joined')
session_class = sess_class
created_at = db.Column(db.DateTime, nullable=False)
domain = db.Column(db.String(64), nullable=False)
@classmethod
def from_request(cls, app, request: Request, token=None, host=None, type_=None):
with db.session.no_autoflush:
return super(SessionInstance, cls).from_request(app, request, token, host, type_)
@classmethod
def find_by_session_and_domain(cls, session: SessionMixin, domain: str):
return cls.query.filter_by(session=session, domain=domain).first()
def save(self):
db.session.add(self)
return SessionInstance
```
#### File: Flask-CrossDomain-Session/flask_crossdomain_session/session_interface.py
```python
from flask import request
from flask.sessions import SessionInterface, SecureCookieSession
from flask_crossdomain_session.model import SessionInstanceMixin, SessionMixin, SessionType
class SessionValueAccessor(SecureCookieSession):
def __init__(self, instance: SessionInstanceMixin, is_new):
self._instance = instance
self._session = instance.session
super(SessionValueAccessor, self).__init__(self._session.data)
self.new = is_new
@property
def instance(self) -> SessionInstanceMixin:
return self._instance
def replace_instance(self, new_instance):
self._instance = new_instance
self._session = self._instance.session
self.clear()
self.update(self._session.data)
self.modified = False
self.accessed = False
class DummySession(SecureCookieSession):
def __getattr__(self, item):
return self
def __setattr__(self, key, value):
pass
class ServerSessionInterface(SessionInterface):
def __init__(self, extension):
self._extension = extension
def open_session(self, app, request_):
if request_.endpoint and (request_.endpoint.endswith('.static') or request_.endpoint == 'static'):
return DummySession()
if request_.method == 'OPTIONS':
return DummySession()
instance = self._extension.session_instance_class.from_request(app, request_)
is_new = instance.session.is_new()
instance.session.commit()
return SessionValueAccessor(instance, is_new)
def save_session(self, app, session, response):
if isinstance(session, DummySession):
return
sess: SessionMixin = session.instance.session
if not self._extension.may_set_cookie:
if sess.is_new():
sess.delete()
return
if session.accessed and sess.type == SessionType.cookie:
response.vary.add('Cookie')
if session.new or (session.modified and dict(session) != sess.data):
# TODO: is this still needed?
# db.session.rollback()
sess.data = dict(session)
sess.commit()
cookie_name = app.session_cookie_name
token_changed = sess.token != request.cookies.get(cookie_name)
if sess.type == SessionType.cookie and (session.new or cookie_name not in request.cookies or token_changed):
response.set_cookie(
cookie_name,
sess.token,
expires=self.get_expiration_time(app, session),
domain=session.instance.domain,
secure=self.get_cookie_secure(app),
httponly=self.get_cookie_httponly(app),
path=self.get_cookie_path(app),
samesite=self.get_cookie_samesite(app)
)
``` |
{
"source": "02strich/aws-embedded-metrics-python",
"score": 2
} |
#### File: aws_embedded_metrics/environment/lambda_environment.py
```python
from aws_embedded_metrics.environment import Environment
from aws_embedded_metrics.logger.metrics_context import MetricsContext
from aws_embedded_metrics.sinks import Sink
from aws_embedded_metrics.sinks.stdout_sink import StdoutSink
import os
def get_env(key: str) -> str:
if key in os.environ:
return os.environ[key]
return ""
sink = StdoutSink()
class LambdaEnvironment(Environment):
async def probe(self) -> bool:
return len(get_env("AWS_LAMBDA_FUNCTION_NAME")) > 0
def get_name(self) -> str:
return self.get_log_group_name()
def get_type(self) -> str:
return "AWS::Lambda::Function"
def get_log_group_name(self) -> str:
return get_env("AWS_LAMBDA_FUNCTION_NAME")
def configure_context(self, context: MetricsContext) -> None:
context.set_property("executionEnvironment", get_env("AWS_EXECUTION_ENV"))
context.set_property("memorySize", get_env("AWS_LAMBDA_FUNCTION_MEMORY_SIZE"))
context.set_property("functionVersion", get_env("AWS_LAMBDA_FUNCTION_VERSION"))
context.set_property("logStreamId", get_env("AWS_LAMBDA_LOG_STREAM_NAME"))
trace_id = get_env("_X_AMZN_TRACE_ID")
if len(trace_id) > 0 and "Sampled=1" in trace_id:
context.set_property("traceId", trace_id)
def get_sink(self) -> Sink:
"""Create the appropriate sink for this environment."""
return sink
```
#### File: aws_embedded_metrics/logger/metrics_context.py
```python
from aws_embedded_metrics import constants, utils
from aws_embedded_metrics.config import get_config
from aws_embedded_metrics.logger.metric import Metric
from typing import List, Dict, Any
class MetricsContext(object):
"""
Stores metrics and their associated properties and dimensions.
"""
def __init__(
self,
namespace: str = None,
properties: Dict[str, Any] = None,
dimensions: List[Dict[str, str]] = None,
default_dimensions: Dict[str, str] = None,
):
self.namespace: str = namespace or get_config().namespace or constants.DEFAULT_NAMESPACE
self.properties: Dict[str, Any] = properties or {}
self.dimensions: List[Dict[str, str]] = dimensions or []
self.default_dimensions: Dict[str, str] = default_dimensions or {}
self.metrics: Dict[str, Metric] = {}
self.should_use_default_dimensions = True
self.meta: Dict[str, Any] = {"Timestamp": utils.now()}
def put_metric(self, key: str, value: float, unit: str = None) -> None:
"""
Adds a metric measurement to the context.
Multiple calls using the same key will be stored as an
array of scalar values.
```
context.put_metric("Latency", 100, "Milliseconds")
```
"""
metric = self.metrics.get(key)
if metric:
# TODO: we should log a warning if the unit has been changed
metric.add_value(value)
else:
self.metrics[key] = Metric(value, unit)
def put_dimensions(self, dimensions: Dict[str, str]) -> None:
"""
Adds dimensions to the context.
```
context.put_dimensions({ "k1": "v1", "k2": "v2" })
```
"""
if dimensions is None:
# TODO add ability to define failure strategy
return
self.dimensions.append(dimensions)
def set_dimensions(self, dimensionSets: List[Dict[str, str]]) -> None:
"""
Overwrite all dimensions.
```
context.set_dimensions([
{ "k1": "v1" },
{ "k1": "v1", "k2": "v2" }])
```
"""
self.should_use_default_dimensions = False
self.dimensions = dimensionSets
def set_default_dimensions(self, default_dimensions: Dict) -> None:
"""
Sets default dimensions for all other dimensions that get added
to the context.
If no custom dimensions are specified, the metrics will be emitted
with the defaults.
If custom dimensions are specified, they will be prepended with
the default dimensions.
"""
self.default_dimensions = default_dimensions
def set_property(self, key: str, value: Any) -> None:
self.properties[key] = value
def get_dimensions(self) -> List[Dict]:
"""
Returns the current dimensions on the context
"""
# user has directly called set_dimensions
if not self.should_use_default_dimensions:
return self.dimensions
if not self.__has_default_dimensions():
return self.dimensions
if len(self.dimensions) == 0:
return [self.default_dimensions]
# we have to merge dimensions on the read path
# because defaults won't actually get set until the flush
# method is called. This allows us to not block the user
# code while we're detecting the environment
return list(
map(lambda custom: {**self.default_dimensions, **custom}, self.dimensions)
)
def __has_default_dimensions(self) -> bool:
return self.default_dimensions is not None and len(self.default_dimensions) > 0
def create_copy_with_context(self) -> "MetricsContext":
"""
Creates a deep copy of the context excluding metrics.
"""
new_properties: Dict = {}
new_properties.update(self.properties)
# dimensions added with put_dimension will not be copied.
# the reason for this is so that you can flush the same scope multiple
# times without stacking new dimensions. Example:
#
# @metric_scope
# def my_func(metrics):
# metrics.put_dimensions(...)
#
# my_func()
# my_func()
new_dimensions: List[Dict] = []
new_default_dimensions: Dict = {}
new_default_dimensions.update(self.default_dimensions)
return MetricsContext(
self.namespace, new_properties, new_dimensions, new_default_dimensions
)
@staticmethod
def empty() -> "MetricsContext":
return MetricsContext()
```
#### File: tests/config/test_config.py
```python
from aws_embedded_metrics import config
from faker import Faker
from importlib import reload
fake = Faker()
def get_config():
# reload the configuration module since it is loaded on
# startup and cached
reload(config)
return config.get_config()
def test_can_get_config_from_environment(monkeypatch):
# arrange
debug_enabled = True
service_name = fake.word()
service_type = fake.word()
log_group = fake.word()
log_stream = fake.word()
agent_endpoint = fake.word()
ec2_metadata_endpoint = fake.word()
namespace = fake.word()
disable_metric_extraction = True
environment_override = fake.word()
monkeypatch.setenv("AWS_EMF_ENABLE_DEBUG_LOGGING", str(debug_enabled))
monkeypatch.setenv("AWS_EMF_SERVICE_NAME", service_name)
monkeypatch.setenv("AWS_EMF_SERVICE_TYPE", service_type)
monkeypatch.setenv("AWS_EMF_LOG_GROUP_NAME", log_group)
monkeypatch.setenv("AWS_EMF_LOG_STREAM_NAME", log_stream)
monkeypatch.setenv("AWS_EMF_AGENT_ENDPOINT", agent_endpoint)
monkeypatch.setenv("AWS_EMF_EC2_METADATA_ENDPOINT", ec2_metadata_endpoint)
monkeypatch.setenv("AWS_EMF_NAMESPACE", namespace)
monkeypatch.setenv("AWS_EMF_DISABLE_METRIC_EXTRACTION", str(disable_metric_extraction))
monkeypatch.setenv("AWS_EMF_ENVIRONMENT", environment_override)
# act
result = get_config()
# assert
assert result.debug_logging_enabled == debug_enabled
assert result.service_name == service_name
assert result.service_type == service_type
assert result.log_group_name == log_group
assert result.log_stream_name == log_stream
assert result.agent_endpoint == agent_endpoint
assert result.ec2_metadata_endpoint == ec2_metadata_endpoint
assert result.namespace == namespace
assert result.disable_metric_extraction == disable_metric_extraction
assert result.environment == environment_override
def test_can_override_config(monkeypatch):
# arrange
monkeypatch.setenv("AWS_EMF_ENABLE_DEBUG_LOGGING", str(True))
monkeypatch.setenv("AWS_EMF_SERVICE_NAME", fake.word())
monkeypatch.setenv("AWS_EMF_SERVICE_TYPE", fake.word())
monkeypatch.setenv("AWS_EMF_LOG_GROUP_NAME", fake.word())
monkeypatch.setenv("AWS_EMF_LOG_STREAM_NAME", fake.word())
monkeypatch.setenv("AWS_EMF_AGENT_ENDPOINT", fake.word())
monkeypatch.setenv("AWS_EMF_EC2_METADATA_ENDPOINT", fake.word())
monkeypatch.setenv("AWS_EMF_NAMESPACE", fake.word())
monkeypatch.setenv("AWS_EMF_DISABLE_METRIC_EXTRACTION", str(True))
monkeypatch.setenv("AWS_EMF_ENVIRONMENT", fake.word())
config = get_config()
debug_enabled = False
service_name = fake.word()
service_type = fake.word()
log_group = fake.word()
log_stream = fake.word()
agent_endpoint = fake.word()
ec2_metadata_endpoint = fake.word()
namespace = fake.word()
disable_metric_extraction = False
environment = fake.word()
# act
config.debug_logging_enabled = debug_enabled
config.service_name = service_name
config.service_type = service_type
config.log_group_name = log_group
config.log_stream_name = log_stream
config.agent_endpoint = agent_endpoint
config.ec2_metadata_endpoint = ec2_metadata_endpoint
config.namespace = namespace
config.disable_metric_extraction = disable_metric_extraction
config.environment = environment
# assert
assert config.debug_logging_enabled == debug_enabled
assert config.service_name == service_name
assert config.service_type == service_type
assert config.log_group_name == log_group
assert config.log_stream_name == log_stream
assert config.agent_endpoint == agent_endpoint
assert config.ec2_metadata_endpoint == ec2_metadata_endpoint
assert config.namespace == namespace
assert config.disable_metric_extraction == disable_metric_extraction
assert config.environment == environment
```
#### File: tests/environment/test_lambda_environment.py
```python
import os
from aws_embedded_metrics.environment.lambda_environment import LambdaEnvironment
from aws_embedded_metrics.sinks.stdout_sink import StdoutSink
import pytest
from faker import Faker
fake = Faker()
@pytest.mark.asyncio
async def test_probe_returns_true_if_fcn_name_in_env():
# arrange
os.environ["AWS_LAMBDA_FUNCTION_NAME"] = fake.word()
env = LambdaEnvironment()
# act
result = await env.probe()
# assert
assert result is True
def test_get_name_returns_function_name():
# arrange
expected_name = fake.word()
os.environ["AWS_LAMBDA_FUNCTION_NAME"] = expected_name
env = LambdaEnvironment()
# act
result = env.get_name()
# assert
assert result == expected_name
def test_get_type_returns_cfn_lambda_name():
# arrange
env = LambdaEnvironment()
# act
result = env.get_type()
# assert
assert result == "AWS::Lambda::Function"
def test_get_log_group_name_returns_function_name():
# arrange
expected_name = fake.word()
os.environ["AWS_LAMBDA_FUNCTION_NAME"] = expected_name
env = LambdaEnvironment()
# act
result = env.get_log_group_name()
# assert
assert result == expected_name
def test_create_sink_creates_LambdaSink():
# arrange
env = LambdaEnvironment()
# act
result = env.get_sink()
# assert
assert isinstance(result, StdoutSink)
``` |
{
"source": "02TheBear/Discord_bot",
"score": 3
} |
#### File: commands/func/func_randomint.py
```python
import discord
from discord.ext import commands
import random
import asyncio
class func_randomint(commands.Cog):
def __intit__(self, client):
self.client = client
# Random number command
@commands.command()
async def random(self, ctx, number):
try:
arg = random.randint(1, int(number))
except ValueError:
await ctx.send("Invalid number")
else:
await ctx.send(str(arg))
def setup(client):
client.add_cog(func_randomint(client))
``` |
{
"source": "02TPr1804/keepasshttplib",
"score": 3
} |
#### File: keepasshttplib/keepasshttplib/encrypter.py
```python
import base64
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from pkcs7 import PKCS7Encoder
class Encrypter():
"""Encrypting and decrypting strings using AES"""
def __init__(self, key):
self.key = key
self.encoder = PKCS7Encoder()
def get_verifier(self, iv=None):
"""getting the verifier"""
if iv == None:
iv = get_random_bytes(16)
aes = AES.new(self.key, AES.MODE_CBC, iv)
base64_private_key = base64.b64encode(self.key).decode()
base64_iv = base64.b64encode(iv).decode()
padded_iv = self.encoder.encode(base64_iv)
verifier = base64.b64encode(aes.encrypt(padded_iv.encode())).decode()
return (base64_private_key, base64_iv, verifier)
def encrypt(self, plain, iv=None):
"""encryption"""
if iv == None:
iv = get_random_bytes(16)
aes = AES.new(self.key, AES.MODE_CBC, iv)
padded_plain = self.encoder.encode(plain)
return base64.b64encode(aes.encrypt(padded_plain.encode())).decode()
def decrypt(self, encrypted, iv=None):
"""decryption"""
if iv == None:
iv = get_random_bytes(16)
aes = AES.new(self.key, AES.MODE_CBC, iv)
decrypted = aes.decrypt(base64.b64decode(encrypted))
return self.encoder.decode(decrypted.decode())
def generate_key():
"""key generation"""
return get_random_bytes(32)
```
#### File: keepasshttplib/keepasshttplib/httpclient.py
```python
import requests
import json
URL = 'http://localhost:19455'
def associate(key, nonce, verifier):
"""Associate a client with KeepassHttp."""
payload = {
'RequestType':'associate',
'Key':key,
'Nonce':nonce,
'Verifier':verifier
}
r = requests.post(URL, data=json.dumps(payload))
return r.json()['Id']
def test_associate(nonce, verifier, id):
"""Test if client is Associated with KeepassHttp."""
payload = {
'Nonce':nonce,
'Verifier':verifier,
'RequestType':'test-associate',
'TriggerUnlock':'false',
'Id':id
}
r = requests.post(URL, data=json.dumps(payload))
return r.json()['Success']
def get_logins(id, nonce, verifier, url):
"""getting logins through url"""
payload = {
'RequestType':'get-logins',
'SortSelection':'true',
'TriggerUnlock':'false',
'Id':id,
'Nonce':nonce,
'Verifier':verifier,
'Url':url,
'SubmitUrl':url
}
r = requests.post(URL, data=json.dumps(payload))
return (r.json()['Entries'], r.json()['Nonce'])
``` |
{
"source": "03015417pzj/PyRankine",
"score": 3
} |
#### File: PyRankine/step3/pump.py
```python
import node
class Pump():
"""
Pump class: Represents a pump in the Rankine cycle
"""
def __init__(self, name, inletNode, exitNode):
"""
Initializes the pump with nodes
"""
self.inletNode = inletNode
self.exitNode = exitNode
self.name = name
def state(self, nodes):
nodes[self.exitNode].s = nodes[self.inletNode].s
nodes[self.exitNode].h = nodes[self.inletNode].h + nodes[self.inletNode].v * \
(nodes[self.exitNode].p - nodes[self.inletNode].p) * 1000
nodes[self.exitNode].hs()
def simulate(self, nodes):
"""
Simulates the pump
"""
self.workRequired = nodes[self.exitNode].h - nodes[self.inletNode].h
def mdotenergy(self, mdot):
self.WRequired = mdot * self.workRequired
```
#### File: PyRankine/step3/rankine.py
```python
import csv
import node
import turbine
import pump
import condenser
import boiler
def read_nodesfile(filename):
""" nodes in the csv file"""
countNodes = len(open(filename, 'r').readlines()) - 1
nodes = [None for i in range(countNodes)]
csvfile = open(filename, 'r')
reader = csv.DictReader(csvfile)
for line in reader:
i = int(line['NID'])
nodes[i] = node.Node(line['NAME'], i)
try:
nodes[i].p = float(line['p'])
except:
nodes[i].p = None
try:
nodes[i].t = float(line['t'])
except:
nodes[i].t = None
try:
nodes[i].x = float(line['x'])
except:
nodes[i].x = None
if line['p'] != '' and line['t'] != '':
nodes[i].pt()
elif line['p'] != '' and line['x'] != '':
nodes[i].px()
elif line['t'] != '' and line['x'] != '':
nodes[i].tx()
return nodes, countNodes
def read_devicefile(filename):
devFile = open(filename, 'r')
discardHeader = devFile.readline()
Comps = {}
i = 0
begId = 2
for line in devFile:
dev = line.split(',')
if dev[1] == "TURBINE":
Comps[dev[0]] = turbine.Turbine(
dev[0], int(dev[begId]), int(dev[begId + 1]))
elif dev[1] == "BOILER":
Comps[dev[0]] = boiler.Boiler(
dev[0], int(dev[begId]), int(dev[begId + 1]))
elif dev[1] == "CONDENSER":
Comps[dev[0]] = condenser.Condenser(
dev[0], int(dev[begId]), int(dev[begId + 1]))
elif dev[1] == "PUMP":
Comps[dev[0]] = pump.Pump(dev[0], int(
dev[begId]), int(dev[begId + 1]))
i = i + 1
DevNum = i
return Comps, DevNum
class RankineCycle(object):
def __init__(self, name, nodefilename, devfilename):
self.name = name
self.nodes = []
self.devs = {}
self.nodes, self.NodeNum = read_nodesfile(nodefilename)
self.devs, self.DevNum = read_devicefile(devfilename)
def state(self):
for key in self.devs:
self.devs[key].state(self.nodes)
def simulate(self):
for key in self.devs:
self.devs[key].simulate(self.nodes)
self.bwr = self.devs['Pump'].workRequired / \
self.devs['Turbine'].workExtracted
self.efficiency = (self.devs['Turbine'].workExtracted - self.devs[
'Pump'].workRequired) / (self.devs['Boiler'].heatAdded)
def spower_simulate(self, Wcycledot):
self.Wcycledot = Wcycledot
self.mdot = Wcycledot * 1000.0 * 3600.0 / \
(self.devs['Turbine'].workExtracted -
self.devs['Pump'].workRequired)
for key in self.devs:
self.devs[key].mdotenergy(self.mdot)
def cw_simulate(self):
""" Circulating water system:Condenser Cooling Water"""
self.nodew = []
self.nodew.append(node.Node('CW-Inlet', 0))
self.nodew.append(node.Node('CW-Outlet', 1))
self.nodew[0].t = 15
self.nodew[0].x = 0
self.nodew[1].t = 35
self.nodew[1].x = 0
self.nodew[0].tx()
self.nodew[1].tx()
self.devs['Condenser'].cw_nodes(0, 1)
self.devs['Condenser'].cw_simulate(self.nodew)
def export(self):
print(" \n -------- %s ----------------------------------" % self.name)
print("The net power output: ", self.Wcycledot, "MW")
print("Efficiency: ", '%.2f' % (self.efficiency * 100), "%")
print("The back work ratio: ", '%.2f' % (self.bwr * 100), "%")
print("The mass flow rate: ", '%.2f' % self.mdot, "kg/h")
print('The rate of heat transfer as the fluid passes the boiler: ',
'%.2f' % self.devs['Boiler'].Qindot, 'MW')
print(" \n ------- Circulating Water System --------------")
print("Cooling water enters the condenser T:", self.nodew[0].t, u'°C')
print("Cooling water exits the condenser T:", self.nodew[1].t, u'°C')
print('The rate of heat transfer from the condensing steam: ',
'%.2f' % self.devs['Condenser'].Qoutdot, 'MW')
print('The mass flow rate of the condenser cooling water: ', '%.2f' %
self.devs['Condenser'].mcwdot, 'kg/h')
print(" \n -------- NODES -----------------------------------")
print("\nNodeID\tName\tP\tT\tH\tS\tV\tX")
for inode in self.nodes:
print(inode)
if __name__ == '__main__':
nds_filename = 'rankine81-nds.csv'
dev_filename = 'rankine81-dev.csv'
c81 = RankineCycle("Rankine81", nds_filename, dev_filename)
c81.state()
c81.simulate()
# Specified Net Output Power
Wcycledot = 100
c81.spower_simulate(Wcycledot)
c81.cw_simulate()
c81.export()
``` |
{
"source": "030-yvonne/py4kids",
"score": 4
} |
#### File: 030-yvonne/py4kids/guess_num.py
```python
import random
SAMPLE_SPACE = tuple([str(i) for i in range(1, 10)])
DIGIT_LENGTH = 3
def get_user_input(msg):
input_data = ''
##################################################
#TODO: get user's input
assert False, "Unimplemented!"
##################################################
return input_data
def prompt_message(msg):
##################################################
#TODO: show message to user
assert False, "Unimplemented!"
##################################################
def generate_data(sample_space = SAMPLE_SPACE, digit_length = DIGIT_LENGTH):
data = ""
indexes = list(range(0, len(sample_space)))
##################################################
#TODO: random sampling (without replace) from sample space
assert False, "Unimplemented!"
##################################################
return data
def check_length(input_data, digit_length = DIGIT_LENGTH):
result = False
##################################################
#TODO: check the length
assert False, "Unimplemented!"
##################################################
return result
def check_unique(intput_data):
result = False
##################################################
#TODO: check all elements are unique
assert False, "Unimplemented!"
##################################################
return result
def check_validity(input_data, sample_space = SAMPLE_SPACE):
##################################################
#TODO: check all elements are in sample space
assert False, "Unimplemented!"
##################################################
return True
def counts_matches(input_data, true_data):
assert len(input_data) == len(true_data), "The lenght is not correct"
counts_A = 0
counts_B = 0
##################################################
#TODO: count each element of input_data
#A: the counts of matches at the same index
#B: the counts of matches at different index
assert False, "Unimplemented!"
##################################################
return (counts_A, counts_B)
def ask_valid_guess(digit_length = 3):
while 1:
msg = "!?"
input_data = get_user_input("Please input your guess:")
if check_length(input_data) != True:
##################################################
#TODO: modify message
pass
##################################################
elif check_validity(input_data) != True:
##################################################
#TODO: modify message
pass
##################################################
elif check_unique(input_data) != True:
##################################################
#TODO: modify message
pass
##################################################
else:
break
prompt_message(msg)
return input_data
def play_game():
##################################################
#TODO: Implement this mode
assert False, "Unimplemented!"
##################################################
def play_game_computer_guess():
##################################################
#TODO: Implement this mode
assert False, "Unimplemented!"
##################################################
def play_game_human_guess():
is_match = False
history = []
truth = generate_data()
while 1:
input_data = ask_valid_guess(DIGIT_LENGTH)
A, B = counts_matches(input_data, truth)
history.append((A, B, input_data))
msg = "A:{0} B:{1} guess:{2} #trails:{3}".format(A, B, input_data, len(history))
prompt_message(msg)
if A == DIGIT_LENGTH:
msg = "Correct! #trials:{0}".format(len(history))
prompt_message(msg)
break
return history
def play_game_two_players():
##################################################
#TODO: Implement this mode
assert False, "Unimplemented!"
##################################################
if __name__ == "__main__":
#create a new game
mode = get_user_input("Please enter the mode: 0)Normal, 1)You Guess, 2)Computer Guess, 3)Two players:")
if mode == '0':
play_game()
elif mode == '1':
play_game_human_guess()
elif mode == '2':
play_game_computer_guess()
elif mode == '3':
play_game_two_players()
else:
prompt_message("You should enter: {0,1,2,3}")
``` |
{
"source": "037g/cissp_test_sim",
"score": 3
} |
#### File: 037g/cissp_test_sim/questengine.py
```python
import json
import random
from pathlib import Path
class QuestionEngine:
def __init__(self, jsonfile: str = 'bank/cissp_questions.json',
questionisrandom: bool = True,
answerisalpha: bool = True,
answerisrandom: bool = True):
"""Use JSON files to make exams"""
self._jsonFile = jsonfile
self._questionIsRandom = questionisrandom
self._answerIsAlpha = answerisalpha
self._answerIsRandom = answerisrandom
self._jsonData = self.__loadJsonFile()
self.correct = 0
self.incorrect = 0
self.totalQuestions = len(self._jsonData['questions'])
self.questionSet = self.__complieQuestions()
def __loadJsonFile(self) -> dict:
"""Load the json question file"""
jsonDataFile = Path(self._jsonFile)
with open(jsonDataFile) as f:
self._jsonData = json.load(f)
f.close()
return self._jsonData
def __complieQuestions(self) -> dict:
"""Create dictionary of questions and question number"""
if self._questionIsRandom:
questions = random.sample(range(0, self.totalQuestions),
self.totalQuestions)
else:
questions = list(range(0, self.totalQuestions))
questionSet = {}
currentAnswers = {}
for itr, question in enumerate(questions):
answers = self._jsonData['questions'][question]['answers']
questionSection = self._jsonData['questions'][question]
answerKeys = '123456789'
if self._answerIsAlpha:
answerKeys = 'abcdefghi'
answerValues = list(answers.keys())
if self._answerIsRandom:
random.shuffle(answerValues)
currentAnswers = {}
for answer in range(len(answerKeys)):
if answer >= len(answerValues):
break
else:
currentAnswers.update({answerKeys[answer]: {
answerValues[answer]: answers[answerValues[answer]]}})
questionSet[itr] = ({'question': questionSection['question'],
'answers': currentAnswers,
'solution': questionSection['solution'],
'explanation': questionSection['explanation']})
return questionSet
def getQuestion(self, questionnumber: int) -> str:
"""Return question from compiled questions"""
return self.questionSet[questionnumber]['question']
def getAnswers(self, questionnumber: int) -> dict:
"""Return dictionary with answers for given question"""
return self.questionSet[questionnumber]['answers']
def getExplanation(self, questionnumber: int) -> str:
"""Return solution for given question"""
return self.questionSet[questionnumber]['explanation']
def getSolutionText(self, questionnumber: int) -> str:
"""Return solution for given question"""
solution = self.questionSet[questionnumber]['solution']
answers = self.questionSet[questionnumber]['answers']
solutiontext = ""
for key, value in answers.items():
for k, v in value.items():
if solution == k:
solutiontext = v
return solutiontext
def compareSolution(self, questionnumber: int, answerguess: int) -> bool:
"""Compare value to solution"""
if answerguess == self.questionSet[questionnumber]['solution']:
return True
else:
return False
``` |
{
"source": "0382/have-fun",
"score": 3
} |
#### File: have-fun/videoplay/imshow.py
```python
import numpy as np
import cv2
import os
import sys
font_ratio = 2.5
def get_img(filename, lines, columns):
img = cv2.imread(filename)
x, y, _ = img.shape
if y*lines * font_ratio >= x*columns:
ty = columns
tx = int(np.floor(x*columns / y / font_ratio))
else:
tx = lines
ty = int(np.floor(font_ratio * y*lines / x))
return cv2.resize(img, (ty, tx))
def print_img(img):
for row in img:
for col in row:
b,g,r = col
print(f"\033[0;48;2;{r};{g};{b}m ", end='')
print('\033[0m\n',end='')
def imshow(file:str):
columns, lines = os.get_terminal_size()
img = get_img(file, lines, columns)
print_img(img)
if __name__ == "__main__":
imshow(sys.argv[1])
``` |
{
"source": "0399obot/Sm5",
"score": 3
} |
#### File: 03993/src/sms.py
```python
import requests,os,sys,time
from bs4 import BeautifulSoup as BS
class docter:
def __init__(self):
self.ses=requests.Session()
def alodoc(self,num):
self.ses.headers.update({'referer':'https://www.alodokter.com/login-alodokter'})
req1=self.ses.get('https://www.alodokter.com/login-alodokter')
bs1=BS(req1.text,'html.parser')
token=bs1.find('meta',{'name':'csrf-token'})['content']
# print(token)
head={
'user-agent':'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Mobile Safari/537.36',
'content-type':'application/json',
'referer':'https://www.alodokter.com/login-alodokter',
'accept':'application/json',
'origin':'https://www.alodokter.com',
'x-csrf-token':token
}
req2=self.ses.post('https://www.alodokter.com/login-with-phone-number',headers=head,json={"user":{"phone":num}})
# print(req2.json())
if req2.json()['status'] == 'success':
print("[•] Berhasil")
else:
print("[-] Gagal")
def klikdok(self,num):
req1=self.ses.get('https://m.klikdokter.com/users/create')
bs=BS(req1.text,'html.parser')
token=bs.find('input',{'name':'_token'})['value']
# print(token)
head={
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Origin': 'https://m.klikdokter.com',
'Upgrade-Insecure-Requests': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Mobile Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'https://m.klikdokter.com/users/create?back-to=',
}
ata={
'_token':token,
'full_name':'BambangSubianto',
'email':'<EMAIL>',
'phone':num,
'back-to':'',
'submit':'Daftar',
}
req2=self.ses.post('https://m.klikdokter.com/users/check',headers=head,data=ata)
# print(req2.url)
if "sessions/auth?user=" in req2.url:
print("[•] Berhasil")
else:
print("[-] Gagal")
def prosehat(self,num):
head={
'accept': 'application/json, text/javascript, */*; q=0.01',
'origin': 'https://www.prosehat.com',
'x-requested-with': 'XMLHttpRequest',
'user-agent': 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Mobile Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'referer': 'https://www.prosehat.com/akun',
}
ata={'phone_or_email':num,'action':'ajaxverificationsend'}
req=requests.post('https://www.prosehat.com/wp-admin/admin-ajax.php',data=ata,headers=head)
# print(req.text)
if "token" in req.text:
print("[•] Berhasil")
for x in range(60):
print(end=f"\r>> Sleep {60-(x+1)}s << ",flush=True)
time.sleep(1)
print()
else:
print(f"[-] Gagal {req.text}")
for x in range(60):
print(end=f"\r>> Sleep {60-(x+1)}s << ",flush=True)
time.sleep(1)
print()
while True:
try:
os.system('clear')
print("""
••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
S P A M SMS 0399obot
IG : https://instagram.com/setiaji.ios
WEB: http://actslowly.6te.net
••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
[1] 4X / JAM
[2] 1X / MENIT ( UNLIMITED )
""")
pil=int(input("> Nomor : "))
print("="*25)
num=input("[?] Nomor Target: ")
lop=int(input("[?] Waktu : "))
print()
main=docter()
if pil == 1:
for i in range(lop):
main.alodoc(num)
elif pil == 2:
for i in range(lop):
main.prosehat(num)
else:
print("?: Ulangi dengan Teliti!?")
lgi=input("\n[?] Terus Bangkit (Y/n) ")
if lgi.lower() == 'n':
sys.exit('Makasih :*')
except Exception as Err:
sys.exit(Err)
``` |
{
"source": "03b8/snakesist",
"score": 2
} |
#### File: snakesist/tests/test_exist_client.py
```python
import pytest # type: ignore
import requests
from delb import Document, FailedDocumentLoading
from snakesist import ExistClient
def test_exist_client_delete_node(rest_base_url, test_client):
Document(
'<example id="t4">i stay<deletee> and i am to be deleted</deletee></example>',
existdb_client=test_client,
).existdb_store(filename="foo.xml")
xq = "let $node := //deletee return util:absolute-resource-id($node)"
abs_res_id = requests.get(f"{rest_base_url}&_query={xq}").content.decode()
xq = "let $node := //deletee return util:node-id($node)"
node_id = requests.get(f"{rest_base_url}&_query={xq}").content.decode()
test_client.delete_node(abs_res_id, node_id)
response = requests.get(f"{rest_base_url}&_query=//example[@id='t4']")
node = response.content.decode()
assert node == '<example id="t4">i stay</example>'
def test_exist_client_delete_document(rest_base_url, test_client):
Document(
'<example id="t5">i am to be deleted</example>', existdb_client=test_client
).existdb_store(collection="/bar", filename="foo.xml")
test_client.delete_document("/bar/foo.xml")
with pytest.raises(FailedDocumentLoading):
Document("/bar/foo.xml", existdb_client=test_client)
def test_exist_client_xpath(test_client):
paragraph_1 = "<p>retrieve me first!</p>"
paragraph_2 = "<p>retrieve me too!</p>"
Document(
f'<example id="t7">{paragraph_1}</example>', existdb_client=test_client
).existdb_store(filename="document_1.xml")
Document(paragraph_2, existdb_client=test_client).existdb_store(
filename="document_2.xml"
)
retrieved_nodes = test_client.xpath("//p")
retrieved_nodes_str = [str(node) for node in retrieved_nodes]
assert paragraph_1 in retrieved_nodes_str
assert paragraph_2 in retrieved_nodes_str
@pytest.mark.usefixtures("db")
@pytest.mark.parametrize(
"url, properties",
(
("existdb://localhost/exist", ("https", "", "", "localhost", 443, "exist")),
(
"existdb+https://localhost/exist",
("https", "", "", "localhost", 443, "exist"),
),
("existdb+http://localhost/exist", ("http", "", "", "localhost", 80, "exist")),
(
"existdb+http://localhost:8080/exist",
("http", "", "", "localhost", 8080, "exist"),
),
(
"existdb://admin:@localhost/exist",
("https", "admin", "", "localhost", 443, "exist"),
),
),
)
def test_url_parsing(url, properties):
client = ExistClient.from_url(url)
assert client.transport == properties[0]
assert client.user == properties[1]
assert client.password == properties[2]
assert client.host == properties[3]
assert client.port == properties[4]
assert client.prefix == properties[5]
def test_query_with_lengthy_contents(test_client):
document = Document("existdb://localhost/exist/db/apps/test-data/dada_manifest.xml")
long_paragraph = document.root.full_text * 5 # 30625 characters total length
Document(
f'<example id="t8"><p>{long_paragraph}</p></example>', existdb_client=test_client
).existdb_store(filename="the_long_dada.xml")
retrieved_nodes = test_client.xpath(f'//p[contains(., "{long_paragraph}")]')
assert len(retrieved_nodes) == 1
``` |
{
"source": "03b8/TEfy",
"score": 3
} |
#### File: TEfy/tefy/tefy.py
```python
import requests
from lxml import etree
class OxGaWrap(object):
"""
Very basic wrapper for a small subset of OxGarage conversions, one-way from doc/docx/odt to TEI XML.
"""
def __init__(self, path, lang='en'):
"""
:param path: path to file to be converted
:param lang: value of the oxgarage.lang conversion property
"""
self.response = None
self._req_baseurl = 'https://oxgarage.tei-c.org/ege-webservice/Conversions/'
self._convcodes = {
'in': {
'odt': 'odt%3Aapplication%3Avnd.oasis.opendocument.text/',
'doc': 'doc%3Aapplication%3Amsword/odt%3Aapplication%3Avnd.oasis.opendocument.text/',
'docx': 'docx%3Aapplication%3Avnd.openxmlformats-officedocument.wordprocessingml.document/'
},
'xmlteip5': 'TEI%3Atext%3Axml/', }
self._params = {'properties': '<conversions><conversion index="0">'
f'<property id="oxgarage.lang">{lang}</property>'
'</conversion></conversions>'}
self.path = path
self.format = path.split('.')[-1]
if self.format not in self._convcodes['in']:
self.format = None
codekeys = ', '.join(self._convcodes['in'])
raise ValueError(f'Unknown input format. Expected one of the following: {codekeys}.')
def _request_conversion(self):
"""
Requests the conversion of the file to TEI P5 XML.
:return: requests.Response
"""
url = self._req_baseurl + self._convcodes['in'][self.format] + self._convcodes['xmlteip5']
with open(self.path, 'rb') as doc_file:
files = {'upload_file': doc_file}
response = requests.post(url, files=files, params=self._params)
if response.status_code == 200:
return response
response.raise_for_status()
@property
def tei_xml(self):
"""
Get TEI XML document as etree.Element
"""
self.response = self._request_conversion()
return etree.fromstring(self.response.content)
def convert_to_tei(self):
raise DeprecationWarning('This method has been deprecated and will be removed.')
def get_et_output(self):
raise DeprecationWarning('This method has been deprecated and will be removed.'
'Please use the "tei_xml" property instead.')
return self.tei_xml
``` |
{
"source": "03pie/SMPCUP2017",
"score": 3
} |
#### File: 03pie/SMPCUP2017/keras_classifier.py
```python
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
# return the best three results
def top_n(matrix_prob, label_map):
ans = []
for line in matrix_prob:
rank = [label_map[item[0]] for item in sorted(enumerate(line), key=lambda v:v[1], reverse=True)]
ans.append(rank[:3])
return ans
# basic neural network model
def basic_model():
model = Sequential()
model.add(Dense(output_dim=500, input_dim=100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(output_dim=42, input_dim=500, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
if __name__ == '__main__':
X = pd.read_csv('./data/triple_train_x_mean.txt', header=None, encoding='utf-8')
Y = pd.read_csv('./data/triple_train_y.txt', header=None, encoding='utf-8')
X_test = pd.read_csv('./data/triple_test_x_mean.txt', header=None, encoding='utf-8')
matrix_y = np_utils.to_categorical(Y,42)
# KerasClassifier analysis
classifier = KerasClassifier(build_fn=basic_model, nb_epoch=10, batch_size=500)
classifier.fit(X, Y)
pred_prob = classifier.predict_proba(X_test)
with open('./model/task2_label_space.txt', encoding='utf-8') as flabel:
label_map = flabel.read().split()
pd.DataFrame(top_n(pred_prob, label_map)).to_csv('./data/task2_ans_int_index.txt', index=None, header=None, encoding='utf-8')
```
#### File: 03pie/SMPCUP2017/segmentation.py
```python
import jieba
import re
# init jieba
jieba.load_userdict("./model/dict.txt")
with open("./model/chinese_stopwords.txt", encoding='utf-8') as fstop:
stop_words = fstop.read().split()
jieba.enable_parallel(4)
# segmentation
def segment(blogs):
seg_blogs = [filter([word for word in jieba.cut(article) if word not in stop_words]) for article in blogs]
return seg_blogs
# filter for words
def filter(tags):
# remove pure numbers
tags = [word for word in tags if re.match('^\d+(\.\d+)?$', word) == None]
# remove substring
for i in tags:
for j in tags:
if i != j and i in j:
tags.remove(i)
break
return tags
if __name__ == "__main__":
with open("./data/blog_article_original.txt", encoding='utf-8') as blog_in:
with open("./data/blog_segment.txt", "w", encoding='utf-8') as blog_out:
blog_out.writelines([' '.join(line) for line in segment(blog_in.readlines())])
```
#### File: 03pie/SMPCUP2017/tf_idf.py
```python
import jieba.analyse
import jieba
import pandas as pd
import re
from segmentation import filter
# tf_idf analysis
def tf_idf(texts):
jieba.load_userdict("./model/dict.txt")
jieba.analyse.set_idf_path("./model/idf.txt")
jieba.analyse.set_stop_words("./model/chinese_stopwords.txt")
jieba.enable_parallel(8)
corpus = [filter(jieba.analyse.extract_tags(s, topK = 15)) for s in texts]
return corpus
if __name__ == '__main__':
# Enter blog original text
blogs = pd.read_csv('./data/blog_article_original.txt', header=None, sep='\001', names=['id', 'title', 'text'])
# Increase the weight of the title and remove the ellipsis
texts = [re.sub('\.\.+', '.', str(row[1]['title']*6 + row[1]['text']).lower()) for row in blogs.iterrows()]
# Calculate the topic of each article
tfidf_corpus = pd.DataFrame(tf_idf(texts))
# Output the result
result = pd.DataFrame({'contentid':blogs['id'], 'keyword1':tfidf_corpus[0], 'keyword2':tfidf_corpus[1], 'keyword3':tfidf_corpus[2]})
result.to_csv('./data/ans_task1.txt', index=None, encoding='utf-8')
``` |
{
"source": "0411tony/airflow",
"score": 2
} |
#### File: databricks/hooks/test_databricks.py
```python
import itertools
import json
import unittest
from unittest import mock
import pytest
from requests import exceptions as requests_exceptions
from airflow import __version__
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.databricks.hooks.databricks import (
AZURE_DEFAULT_AD_ENDPOINT,
AZURE_MANAGEMENT_ENDPOINT,
AZURE_TOKEN_SERVICE_URL,
DEFAULT_DATABRICKS_SCOPE,
SUBMIT_RUN_ENDPOINT,
DatabricksHook,
RunState,
)
from airflow.utils.session import provide_session
TASK_ID = 'databricks-operator'
DEFAULT_CONN_ID = 'databricks_default'
NOTEBOOK_TASK = {'notebook_path': '/test'}
SPARK_PYTHON_TASK = {'python_file': 'test.py', 'parameters': ['--param', '123']}
NEW_CLUSTER = {'spark_version': '2.0.x-scala2.10', 'node_type_id': 'r3.xlarge', 'num_workers': 1}
CLUSTER_ID = 'cluster_id'
RUN_ID = 1
JOB_ID = 42
HOST = 'xx.cloud.databricks.com'
HOST_WITH_SCHEME = 'https://xx.cloud.databricks.com'
LOGIN = 'login'
PASSWORD = 'password'
TOKEN = 'token'
USER_AGENT_HEADER = {'user-agent': f'airflow-{__version__}'}
RUN_PAGE_URL = 'https://XX.cloud.databricks.com/#jobs/1/runs/1'
LIFE_CYCLE_STATE = 'PENDING'
STATE_MESSAGE = 'Waiting for cluster'
GET_RUN_RESPONSE = {
'job_id': JOB_ID,
'run_page_url': RUN_PAGE_URL,
'state': {'life_cycle_state': LIFE_CYCLE_STATE, 'state_message': STATE_MESSAGE},
}
NOTEBOOK_PARAMS = {"dry-run": "true", "oldest-time-to-consider": "1457570074236"}
JAR_PARAMS = ["param1", "param2"]
RESULT_STATE = None # type: None
LIBRARIES = [
{"jar": "dbfs:/mnt/libraries/library.jar"},
{"maven": {"coordinates": "org.jsoup:jsoup:1.7.2", "exclusions": ["slf4j:slf4j"]}},
]
def run_now_endpoint(host):
"""
Utility function to generate the run now endpoint given the host.
"""
return f'https://{host}/api/2.1/jobs/run-now'
def submit_run_endpoint(host):
"""
Utility function to generate the submit run endpoint given the host.
"""
return f'https://{host}/api/2.1/jobs/runs/submit'
def get_run_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f'https://{host}/api/2.1/jobs/runs/get'
def cancel_run_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f'https://{host}/api/2.1/jobs/runs/cancel'
def start_cluster_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f'https://{host}/api/2.0/clusters/start'
def restart_cluster_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f'https://{host}/api/2.0/clusters/restart'
def terminate_cluster_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f'https://{host}/api/2.0/clusters/delete'
def install_endpoint(host):
"""
Utility function to generate the install endpoint given the host.
"""
return f'https://{host}/api/2.0/libraries/install'
def uninstall_endpoint(host):
"""
Utility function to generate the uninstall endpoint given the host.
"""
return f'https://{host}/api/2.0/libraries/uninstall'
def create_valid_response_mock(content):
response = mock.MagicMock()
response.json.return_value = content
return response
def create_successful_response_mock(content):
response = mock.MagicMock()
response.json.return_value = content
response.status_code = 200
return response
def create_post_side_effect(exception, status_code=500):
if exception != requests_exceptions.HTTPError:
return exception()
else:
response = mock.MagicMock()
response.status_code = status_code
response.raise_for_status.side_effect = exception(response=response)
return response
def setup_mock_requests(mock_requests, exception, status_code=500, error_count=None, response_content=None):
side_effect = create_post_side_effect(exception, status_code)
if error_count is None:
# POST requests will fail indefinitely
mock_requests.post.side_effect = itertools.repeat(side_effect)
else:
# POST requests will fail 'error_count' times, and then they will succeed (once)
mock_requests.post.side_effect = [side_effect] * error_count + [
create_valid_response_mock(response_content)
]
class TestDatabricksHook(unittest.TestCase):
"""
Tests for DatabricksHook.
"""
@provide_session
def setUp(self, session=None):
conn = session.query(Connection).filter(Connection.conn_id == DEFAULT_CONN_ID).first()
conn.host = HOST
conn.login = LOGIN
conn.password = PASSWORD
conn.extra = None
session.commit()
self.hook = DatabricksHook(retry_delay=0)
def test_parse_host_with_proper_host(self):
host = self.hook._parse_host(HOST)
assert host == HOST
def test_parse_host_with_scheme(self):
host = self.hook._parse_host(HOST_WITH_SCHEME)
assert host == HOST
def test_init_bad_retry_limit(self):
with pytest.raises(ValueError):
DatabricksHook(retry_limit=0)
def test_do_api_call_retries_with_retryable_error(self):
for exception in [
requests_exceptions.ConnectionError,
requests_exceptions.SSLError,
requests_exceptions.Timeout,
requests_exceptions.ConnectTimeout,
requests_exceptions.HTTPError,
]:
with mock.patch('airflow.providers.databricks.hooks.databricks.requests') as mock_requests:
with mock.patch.object(self.hook.log, 'error') as mock_errors:
setup_mock_requests(mock_requests, exception)
with pytest.raises(AirflowException):
self.hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
assert mock_errors.call_count == self.hook.retry_limit
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_do_api_call_does_not_retry_with_non_retryable_error(self, mock_requests):
setup_mock_requests(mock_requests, requests_exceptions.HTTPError, status_code=400)
with mock.patch.object(self.hook.log, 'error') as mock_errors:
with pytest.raises(AirflowException):
self.hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
mock_errors.assert_not_called()
def test_do_api_call_succeeds_after_retrying(self):
for exception in [
requests_exceptions.ConnectionError,
requests_exceptions.SSLError,
requests_exceptions.Timeout,
requests_exceptions.ConnectTimeout,
requests_exceptions.HTTPError,
]:
with mock.patch('airflow.providers.databricks.hooks.databricks.requests') as mock_requests:
with mock.patch.object(self.hook.log, 'error') as mock_errors:
setup_mock_requests(
mock_requests, exception, error_count=2, response_content={'run_id': '1'}
)
response = self.hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
assert mock_errors.call_count == 2
assert response == {'run_id': '1'}
@mock.patch('airflow.providers.databricks.hooks.databricks.sleep')
def test_do_api_call_waits_between_retries(self, mock_sleep):
retry_delay = 5
self.hook = DatabricksHook(retry_delay=retry_delay)
for exception in [
requests_exceptions.ConnectionError,
requests_exceptions.SSLError,
requests_exceptions.Timeout,
requests_exceptions.ConnectTimeout,
requests_exceptions.HTTPError,
]:
with mock.patch('airflow.providers.databricks.hooks.databricks.requests') as mock_requests:
with mock.patch.object(self.hook.log, 'error'):
mock_sleep.reset_mock()
setup_mock_requests(mock_requests, exception)
with pytest.raises(AirflowException):
self.hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
assert len(mock_sleep.mock_calls) == self.hook.retry_limit - 1
calls = [mock.call(retry_delay), mock.call(retry_delay)]
mock_sleep.assert_has_calls(calls)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_do_api_call_patch(self, mock_requests):
mock_requests.patch.return_value.json.return_value = {'cluster_name': 'new_name'}
data = {'cluster_name': 'new_name'}
patched_cluster_name = self.hook._do_api_call(('PATCH', 'api/2.1/jobs/runs/submit'), data)
assert patched_cluster_name['cluster_name'] == 'new_name'
mock_requests.patch.assert_called_once_with(
submit_run_endpoint(HOST),
json={'cluster_name': 'new_name'},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_submit_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = {'run_id': '1'}
data = {'notebook_task': NOTEBOOK_TASK, 'new_cluster': NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == '1'
mock_requests.post.assert_called_once_with(
submit_run_endpoint(HOST),
json={
'notebook_task': NOTEBOOK_TASK,
'new_cluster': NEW_CLUSTER,
},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_spark_python_submit_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = {'run_id': '1'}
data = {'spark_python_task': SPARK_PYTHON_TASK, 'new_cluster': NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == '1'
mock_requests.post.assert_called_once_with(
submit_run_endpoint(HOST),
json={
'spark_python_task': SPARK_PYTHON_TASK,
'new_cluster': NEW_CLUSTER,
},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_run_now(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {'run_id': '1'}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID}
run_id = self.hook.run_now(data)
assert run_id == '1'
mock_requests.post.assert_called_once_with(
run_now_endpoint(HOST),
json={'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_get_run_page_url(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
run_page_url = self.hook.get_run_page_url(RUN_ID)
assert run_page_url == RUN_PAGE_URL
mock_requests.get.assert_called_once_with(
get_run_endpoint(HOST),
json=None,
params={'run_id': RUN_ID},
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_get_job_id(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
job_id = self.hook.get_job_id(RUN_ID)
assert job_id == JOB_ID
mock_requests.get.assert_called_once_with(
get_run_endpoint(HOST),
json=None,
params={'run_id': RUN_ID},
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_get_run_state(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
run_state = self.hook.get_run_state(RUN_ID)
assert run_state == RunState(LIFE_CYCLE_STATE, RESULT_STATE, STATE_MESSAGE)
mock_requests.get.assert_called_once_with(
get_run_endpoint(HOST),
json=None,
params={'run_id': RUN_ID},
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_cancel_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = GET_RUN_RESPONSE
self.hook.cancel_run(RUN_ID)
mock_requests.post.assert_called_once_with(
cancel_run_endpoint(HOST),
json={'run_id': RUN_ID},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_start_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
self.hook.start_cluster({"cluster_id": CLUSTER_ID})
mock_requests.post.assert_called_once_with(
start_cluster_endpoint(HOST),
json={'cluster_id': CLUSTER_ID},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_restart_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
self.hook.restart_cluster({"cluster_id": CLUSTER_ID})
mock_requests.post.assert_called_once_with(
restart_cluster_endpoint(HOST),
json={'cluster_id': CLUSTER_ID},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_terminate_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
self.hook.terminate_cluster({"cluster_id": CLUSTER_ID})
mock_requests.post.assert_called_once_with(
terminate_cluster_endpoint(HOST),
json={'cluster_id': CLUSTER_ID},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_install_libs_on_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'cluster_id': CLUSTER_ID, 'libraries': LIBRARIES}
self.hook.install(data)
mock_requests.post.assert_called_once_with(
install_endpoint(HOST),
json={'cluster_id': CLUSTER_ID, 'libraries': LIBRARIES},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_uninstall_libs_on_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'cluster_id': CLUSTER_ID, 'libraries': LIBRARIES}
self.hook.uninstall(data)
mock_requests.post.assert_called_once_with(
uninstall_endpoint(HOST),
json={'cluster_id': CLUSTER_ID, 'libraries': LIBRARIES},
params=None,
auth=(LOGIN, PASSWORD),
headers=USER_AGENT_HEADER,
timeout=self.hook.timeout_seconds,
)
class TestDatabricksHookToken(unittest.TestCase):
"""
Tests for DatabricksHook when auth is done with token.
"""
@provide_session
def setUp(self, session=None):
conn = session.query(Connection).filter(Connection.conn_id == DEFAULT_CONN_ID).first()
conn.extra = json.dumps({'token': TOKEN, 'host': HOST})
session.commit()
self.hook = DatabricksHook()
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_submit_run(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {'run_id': '1'}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'notebook_task': NOTEBOOK_TASK, 'new_cluster': NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == '1'
args = mock_requests.post.call_args
kwargs = args[1]
assert kwargs['auth'].token == TOKEN
class TestDatabricksHookTokenInPassword(unittest.TestCase):
"""
Tests for DatabricksHook.
"""
@provide_session
def setUp(self, session=None):
conn = session.query(Connection).filter(Connection.conn_id == DEFAULT_CONN_ID).first()
conn.host = HOST
conn.login = None
conn.password = <PASSWORD>
conn.extra = None
session.commit()
self.hook = DatabricksHook(retry_delay=0)
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_submit_run(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {'run_id': '1'}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'notebook_task': NOTEBOOK_TASK, 'new_cluster': NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == '1'
args = mock_requests.post.call_args
kwargs = args[1]
assert kwargs['auth'].token == TOKEN
class TestDatabricksHookTokenWhenNoHostIsProvidedInExtra(TestDatabricksHookToken):
@provide_session
def setUp(self, session=None):
conn = session.query(Connection).filter(Connection.conn_id == DEFAULT_CONN_ID).first()
conn.extra = json.dumps({'token': TOKEN})
session.commit()
self.hook = DatabricksHook()
class TestRunState(unittest.TestCase):
def test_is_terminal_true(self):
terminal_states = ['TERMINATED', 'SKIPPED', 'INTERNAL_ERROR']
for state in terminal_states:
run_state = RunState(state, '', '')
assert run_state.is_terminal
def test_is_terminal_false(self):
non_terminal_states = ['PENDING', 'RUNNING', 'TERMINATING']
for state in non_terminal_states:
run_state = RunState(state, '', '')
assert not run_state.is_terminal
def test_is_terminal_with_nonexistent_life_cycle_state(self):
run_state = RunState('blah', '', '')
with pytest.raises(AirflowException):
run_state.is_terminal
def test_is_successful(self):
run_state = RunState('TERMINATED', 'SUCCESS', '')
assert run_state.is_successful
def create_aad_token_for_resource(resource: str) -> dict:
return {
"token_type": "Bearer",
"expires_in": "599",
"ext_expires_in": "599",
"expires_on": "1575500666",
"not_before": "1575499766",
"resource": resource,
"access_token": TOKEN,
}
class TestDatabricksHookAadToken(unittest.TestCase):
"""
Tests for DatabricksHook when auth is done with AAD token for SP as user inside workspace.
"""
@provide_session
def setUp(self, session=None):
conn = session.query(Connection).filter(Connection.conn_id == DEFAULT_CONN_ID).first()
conn.login = '9ff815a6-4404-4ab8-85cb-cd0e6f879c1d'
conn.password = '<PASSWORD>'
conn.extra = json.dumps(
{
'host': HOST,
'azure_tenant_id': '3ff810a6-5504-4ab8-85cb-cd0e6f879c1d',
}
)
session.commit()
self.hook = DatabricksHook()
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_submit_run(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.side_effect = [
create_successful_response_mock(create_aad_token_for_resource(DEFAULT_DATABRICKS_SCOPE)),
create_successful_response_mock({'run_id': '1'}),
]
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'notebook_task': NOTEBOOK_TASK, 'new_cluster': NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == '1'
args = mock_requests.post.call_args
kwargs = args[1]
assert kwargs['auth'].token == TOKEN
class TestDatabricksHookAadTokenOtherClouds(unittest.TestCase):
"""
Tests for DatabricksHook when auth is done with AAD token for SP as user inside workspace and
using non-global Azure cloud (China, GovCloud, Germany)
"""
@provide_session
def setUp(self, session=None):
self.tenant_id = '3ff810a6-5504-4ab8-85cb-cd0e6f879c1d'
self.ad_endpoint = 'https://login.microsoftonline.de'
self.client_id = '9ff815a6-4404-4ab8-85cb-cd0e6f879c1d'
conn = session.query(Connection).filter(Connection.conn_id == DEFAULT_CONN_ID).first()
conn.login = self.client_id
conn.password = '<PASSWORD>'
conn.extra = json.dumps(
{
'host': HOST,
'azure_tenant_id': self.tenant_id,
'azure_ad_endpoint': self.ad_endpoint,
}
)
session.commit()
self.hook = DatabricksHook()
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_submit_run(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.side_effect = [
create_successful_response_mock(create_aad_token_for_resource(DEFAULT_DATABRICKS_SCOPE)),
create_successful_response_mock({'run_id': '1'}),
]
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'notebook_task': NOTEBOOK_TASK, 'new_cluster': NEW_CLUSTER}
run_id = self.hook.submit_run(data)
ad_call_args = mock_requests.method_calls[0]
assert ad_call_args[1][0] == AZURE_TOKEN_SERVICE_URL.format(self.ad_endpoint, self.tenant_id)
assert ad_call_args[2]['data']['client_id'] == self.client_id
assert ad_call_args[2]['data']['resource'] == DEFAULT_DATABRICKS_SCOPE
assert run_id == '1'
args = mock_requests.post.call_args
kwargs = args[1]
assert kwargs['auth'].token == TOKEN
class TestDatabricksHookAadTokenSpOutside(unittest.TestCase):
"""
Tests for DatabricksHook when auth is done with AAD token for SP outside of workspace.
"""
@provide_session
def setUp(self, session=None):
conn = session.query(Connection).filter(Connection.conn_id == DEFAULT_CONN_ID).first()
self.tenant_id = '3ff810a6-5504-4ab8-85cb-cd0e6f879c1d'
self.client_id = '9ff815a6-4404-4ab8-85cb-cd0e6f879c1d'
conn.login = self.client_id
conn.password = '<PASSWORD>'
conn.host = HOST
conn.extra = json.dumps(
{
'azure_resource_id': '/Some/resource',
'azure_tenant_id': '3ff810a6-5504-4ab8-85cb-cd0e6f879c1d',
}
)
session.commit()
self.hook = DatabricksHook()
@mock.patch('airflow.providers.databricks.hooks.databricks.requests')
def test_submit_run(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.side_effect = [
create_successful_response_mock(create_aad_token_for_resource(AZURE_MANAGEMENT_ENDPOINT)),
create_successful_response_mock(create_aad_token_for_resource(DEFAULT_DATABRICKS_SCOPE)),
create_successful_response_mock({'run_id': '1'}),
]
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {'notebook_task': NOTEBOOK_TASK, 'new_cluster': NEW_CLUSTER}
run_id = self.hook.submit_run(data)
ad_call_args = mock_requests.method_calls[0]
assert ad_call_args[1][0] == AZURE_TOKEN_SERVICE_URL.format(AZURE_DEFAULT_AD_ENDPOINT, self.tenant_id)
assert ad_call_args[2]['data']['client_id'] == self.client_id
assert ad_call_args[2]['data']['resource'] == AZURE_MANAGEMENT_ENDPOINT
ad_call_args = mock_requests.method_calls[1]
assert ad_call_args[1][0] == AZURE_TOKEN_SERVICE_URL.format(AZURE_DEFAULT_AD_ENDPOINT, self.tenant_id)
assert ad_call_args[2]['data']['client_id'] == self.client_id
assert ad_call_args[2]['data']['resource'] == DEFAULT_DATABRICKS_SCOPE
assert run_id == '1'
args = mock_requests.post.call_args
kwargs = args[1]
assert kwargs['auth'].token == TOKEN
assert kwargs['headers']['X-Databricks-Azure-Workspace-Resource-Id'] == '/Some/resource'
assert kwargs['headers']['X-Databricks-Azure-SP-Management-Token'] == TOKEN
``` |
{
"source": "0415070/AA-RNN",
"score": 2
} |
#### File: AA-RNN/src/AA-Decompose.py
```python
import numpy as np
import pandas as pd
from pandas.core.nanops import nanmean as pd_nanmean
from statsmodels.tsa.seasonal import DecomposeResult
from statsmodels.tsa.filters._utils import _maybe_get_pandas_wrapper_freq
import statsmodels.api as sm
import matplotlib.pyplot as plt
from matplotlib import rcParams
df = pd.read_excel('./dataset/tax-sales-hurricane.csv')
df_dec = df[df.region == 'Orange']
df_dec.set_index('Date', inplace=True)
df_dec = df_dec[['observed']]
df_dec
def anomaly_detection(points, thresh=3.5):
if len(points.shape) == 1:
points = points[:, None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def AA_decompose(df, period=12, lo_frac=0.6, lo_delta=0.01, thresh=3.5):
lowess = sm.nonparametric.lowess
_pandas_wrapper, _ = _maybe_get_pandas_wrapper_freq(df)
observed = np.asanyarray(df).squeeze()
trend = lowess(observed, [x for x in range(len(observed))],
frac=lo_frac,
delta=lo_delta * len(observed),
return_sorted=False)
detrended = observed / trend
period = min(period, len(observed))
period_median = np.array([pd_nanmean(detrended[i::period])
for i in range(period)])
seasonal = np.tile(period_median, len(observed) //
period + 1)[:len(observed)]
resid_inter = detrended / seasonal
resid_inter[0] = 1
resid = resid_inter.copy()
anomalies = resid_inter.copy()
b = anomaly_detection(resid, thresh=thresh)
for j in range(len(b)):
if b[j] == True:
resid[j] = 1
if b[j] == False:
anomalies[j] = 1
results = list(map(_pandas_wrapper, [seasonal, trend, resid, observed]))
fig, axes = plt.subplots(5, 1, sharex=True)
fig.tight_layout()
axes[0].plot(observed)
axes[0].set_ylabel('Observed')
axes[1].plot(trend)
axes[1].set_ylabel('Trend')
axes[2].plot(seasonal)
axes[2].set_ylabel('Seasonal')
axes[4].plot(anomalies, color='r')
axes[4].set_ylabel('Anomalies')
axes[4].set_xlabel('Time')
axes[3].plot(resid)
axes[3].set_ylabel('Residual')
axes[3].set_xlabel('Time')
return trend, seasonal, anomalies, resid
plt.rc('font', family='serif')
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
params = {
'axes.labelsize': 8,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'figure.figsize': [4.5, 4.5]
}
rcParams.update(params)
trend, seasonal, anomalies, resid = AA_decompose(df_dec, period=12, thresh=2)
```
#### File: AA-RNN/src/baseline-deepar-eeforecasting.py
```python
import util
from torch import nn
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error
from progressbar import *
from datetime import date
import argparse
from time import time
from torch.optim import Adam
import random
import os
from joblib import load, dump
import pickle
from collections import defaultdict
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import sklearn
from matplotlib import rc
from pylab import rcParams
from sklearn.preprocessing import MinMaxScaler
from torch import nn, optim
import torch.nn.functional as F
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
df = pd.read_csv('/data/electricity.csv', parse_dates=['Date'])
df = df[df.region == 'MT_200']
df.reset_index(drop=True, inplace=True)
feature = ['observed', 'weekday', 'month', 'year']
feature = ['observed']
target = ['observed']
df_og = df
scaler = MinMaxScaler(feature_range=(0, 1))
scaler = scaler.fit(df_og[feature])
df[feature] = scaler.transform(df_og[feature])
df
def create_seq(df, feature, target, seq_window, hor_window):
Xs = []
ys = []
for j in range(len(df)-seq_window-1):
X = df[feature][j:seq_window+j]
y = df[target][seq_window+j:seq_window+j+hor_window]
Xs.append(X)
ys.append(y)
return np.array(Xs), np.array(ys)
seq_window = 8
hor_window = 1
X, y = create_seq(df, feature, target, seq_window, hor_window)
X = torch.from_numpy(X).float()
y = torch.from_numpy(y).float()
print('X shape:', X.shape)
print('y shape:', y.shape)
def MAE(ytrue, ypred):
ytrue = np.array(ytrue).ravel()
ypred = np.array(ypred).ravel()
return np.mean(np.abs((ytrue - ypred)))
def MSE(ytrue, ypred):
ytrue = np.array(ytrue).ravel()
ypred = np.array(ypred).ravel()
return np.mean(np.square((ytrue - ypred)))
def RMSE(ypred, ytrue):
rsme = np.sqrt(np.mean(np.square(ypred - ytrue)))
return rsme
def get_data_path():
folder = os.path.dirname(__file__)
return os.path.join(folder, "data")
def RSE(ypred, ytrue):
rse = np.sqrt(np.square(ypred - ytrue).sum()) / \
np.sqrt(np.square(ytrue - ytrue.mean()).sum())
return rse
def quantile_loss(ytrue, ypred, qs):
L = np.zeros_like(ytrue)
for i, q in enumerate(qs):
yq = ypred[:, :, i]
diff = yq - ytrue
L += np.max(q * diff, (q - 1) * diff)
return L.mean()
def SMAPE(ytrue, ypred):
ytrue = np.array(ytrue).ravel()
ypred = np.array(ypred).ravel() + 1e-4
mean_y = (ytrue + ypred) / 2.
return np.mean(np.abs((ytrue - ypred)
/ mean_y))
def MAPE(ytrue, ypred):
ytrue = np.array(ytrue).ravel() + 1e-4
ypred = np.array(ypred).ravel()
return np.mean(np.abs((ytrue - ypred)
/ ytrue))
def train_test_split(X, y, train_ratio=0.7):
num_ts, num_periods, num_features = X.shape
train_periods = int(num_periods * train_ratio)
random.seed(2)
Xtr = X[:, :train_periods, :]
ytr = y[:, :train_periods]
Xte = X[:, train_periods:, :]
yte = y[:, train_periods:]
return Xtr, ytr, Xte, yte
class StandardScalerManual:
def fit_transform(self, y):
self.mean = np.mean(y)
self.std = np.std(y) + 1e-4
return (y - self.mean) / self.std
def inverse_transform(self, y):
return y * self.std + self.mean
def transform(self, y):
return (y - self.mean) / self.std
class MaxScaler:
def fit_transform(self, y):
self.max = np.max(y)
return y / self.max
def inverse_transform(self, y):
return y * self.max
def transform(self, y):
return y / self.max
class MeanScaler:
def fit_transform(self, y):
self.mean = np.mean(y)
return y / self.mean
def inverse_transform(self, y):
return y * self.mean
def transform(self, y):
return y / self.mean
class LogScaler:
def fit_transform(self, y):
return np.log1p(y)
def inverse_transform(self, y):
return np.expm1(y)
def transform(self, y):
return np.log1p(y)
def gaussian_likelihood_loss(z, mu, sigma):
negative_likelihood = torch.log(
sigma + 1) + (z - mu) ** 2 / (2 * sigma ** 2) + 6
return negative_likelihood.mean()
def negative_binomial_loss(ytrue, mu, alpha):
batch_size, seq_len = ytrue.size()
likelihood = torch.lgamma(ytrue + 1. / alpha) - torch.lgamma(ytrue + 1) - torch.lgamma(1. / alpha) \
- 1. / alpha * torch.log(1 + alpha * mu) \
+ ytrue * torch.log(alpha * mu / (1 + alpha * mu))
return - likelihood.mean()
def batch_generator(X, y, num_obs_to_train, seq_len, batch_size):
num_ts, num_periods, _ = X.shape
if num_ts < batch_size:
batch_size = num_ts
t = random.choice(range(num_obs_to_train, num_periods-seq_len))
batch = random.sample(range(num_ts), batch_size)
X_train_batch = X[batch, t-num_obs_to_train:t, :]
y_train_batch = y[batch, t-num_obs_to_train:t]
Xf = X[batch, t:t+seq_len]
yf = y[batch, t:t+seq_len]
return X_train_batch, y_train_batch, Xf, yf
class AutoEncoder(nn.Module):
def __init__(self, input_size, encoder_hidden_units):
super(AutoEncoder, self).__init__()
self.layers = []
self.dropout = nn.Dropout()
last_ehu = None
for idx, ehu in enumerate(encoder_hidden_units):
if idx == 0:
layer = nn.LSTM(input_size, ehu, 1,
bias=True, batch_first=True)
else:
layer = nn.LSTM(last_ehu, ehu, 1, bias=True, batch_first=True)
last_ehu = ehu
self.layers.append(layer)
def forward(self, x):
batch_size, seq_len, input_size = x.size()
for layer in self.layers:
hs = []
for s in range(seq_len):
_, (h, c) = layer(x)
h = h.permute(1, 0, 2)
h = F.relu(h)
h = self.dropout(h)
hs.append(h)
x = torch.cat(hs, dim=1)
return x
class Forecaster(nn.Module):
def __init__(self, input_size, hidden_size, n_layers):
super(Forecaster, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size,
n_layers, bias=True, batch_first=True)
self.fc = nn.Linear(hidden_size, 1)
self.dropout = nn.Dropout()
def forward(self, x, mu):
'''
Args:
x (tensor):
mu (tensor): model uncertainty
'''
batch_size, seq_len, hidden_size = x.size()
out = []
for s in range(seq_len):
xt = x[:, s, :].unsqueeze(1)
xt = torch.cat([xt, mu], dim=1)
_, (h, c) = self.lstm(xt)
ht = h[-1, :, :].unsqueeze(0)
h = ht.permute(1, 0, 2)
h = F.relu(h)
h = self.dropout(h)
out.append(h)
out = torch.cat(out, dim=1)
out = self.fc(out)
return out
class ExtremeModel(nn.Module):
def __init__(
self,
input_size,
encoder_hidden_units=[512, 128, 64],
hidden_size_forecaster=512,
n_layers_forecaster=3
):
super(ExtremeModel, self).__init__()
self.embed = nn.Linear(input_size, encoder_hidden_units[-1])
self.auto_encoder = AutoEncoder(
encoder_hidden_units[-1], encoder_hidden_units)
self.forecaster = Forecaster(encoder_hidden_units[-1],
hidden_size_forecaster, n_layers_forecaster)
def forward(self, xpast, xnew):
if isinstance(xpast, type(np.empty(1))):
xpast = torch.from_numpy(xpast).float()
if isinstance(xnew, type(np.empty(1))):
xnew = torch.from_numpy(xnew).float()
xpast = self.embed(xpast)
xnew = self.embed(xnew)
# auto-encoder
ae_out = self.auto_encoder(xpast)
ae_out = torch.mean(ae_out, dim=1).unsqueeze(1)
# concatenate x
# x = torch.cat([xnew, ae_out], dim=1)
x = self.forecaster(xnew, ae_out)
return x
def batch_generator(X, y, num_obs_to_train, seq_len, batch_size):
num_ts, num_periods, _ = X.shape
if num_ts < batch_size:
batch_size = num_ts
t = random.choice(range(num_obs_to_train, num_periods-seq_len))
batch = random.sample(range(num_ts), batch_size)
X_train_batch = X[batch, t-num_obs_to_train:t, :]
y_train_batch = y[batch, t-num_obs_to_train:t]
Xf = X[batch, t:t+seq_len]
yf = y[batch, t:t+seq_len]
return X_train_batch, y_train_batch, Xf, yf
def RMSELoss(yhat, y):
return torch.sqrt(torch.mean((yhat-y)**2))
def MAELoss(yhat, y):
loss = torch.nn.L1Loss()
output = loss(input, target)
return output.backward()
"""### Train"""
def train(
X,
y,
seq_len,
num_obs_to_train,
lr,
num_epoches,
step_per_epoch,
batch_size
):
num_ts, num_periods, num_features = X.shape
Xtr, ytr, Xte, yte = train_test_split(X, y)
yscaler = None
# if args.standard_scaler:
yscaler = StandardScalerManual()
# elif args.log_scaler:
# yscaler = LogScaler()
# elif args.mean_scaler:
# yscaler = util.MeanScaler()
if yscaler is not None:
ytr = yscaler.fit_transform(ytr)
progress = ProgressBar()
seq_len = seq_len
num_obs_to_train = num_obs_to_train
model = ExtremeModel(num_features)
optimizer = Adam(model.parameters(), lr=lr)
losses = []
MAE_losses = []
mape_list = []
mse_list = []
rmse_list = []
mae_list = []
cnt = 0
for epoch in progress(range(num_epoches)):
# print("Epoch {} starts...".format(epoch))
for step in range(step_per_epoch):
Xtrain, ytrain, Xf, yf = batch_generator(Xtr, ytr, num_obs_to_train,
seq_len, batch_size)
Xtrain_tensor = torch.from_numpy(Xtrain).float()
ytrain_tensor = torch.from_numpy(ytrain).float()
Xf = torch.from_numpy(Xf).float()
yf = torch.from_numpy(yf).float()
ypred = model(Xtrain_tensor, Xf)
# loss = F.mse_loss(ypred, yf)
loss = RMSELoss(ypred, yf)
loss_mae = F.l1_loss(ypred, yf)
MAE_losses.append(np.float(loss_mae))
losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cnt += 1
# select skus with most top K
X_test = Xte[:, -seq_len-num_obs_to_train:-
seq_len, :].reshape((num_ts, -1, num_features))
Xf_test = Xte[:, -seq_len:, :].reshape((num_ts, -1, num_features))
y_test = yte[:, -seq_len-num_obs_to_train:-seq_len].reshape((num_ts, -1))
yf_test = yte[:, -seq_len:].reshape((num_ts, -1))
if yscaler is not None:
y_test = yscaler.transform(y_test)
ypred = model(X_test, Xf_test)
ypred = ypred.data.numpy()
if yscaler is not None:
ypred = yscaler.inverse_transform(ypred)
mape = MAPE(yf_test, ypred)
mae = MAE(yf_test, ypred)
mse = MSE(yf_test, ypred)
rmse = RMSE(yf_test, ypred)
# print("MAE: {}".format(mae))
# print("RMSE: {}".format(rmse))
# print("MSE: {}".format(mse))
# print("MAPE: {}".format(mape))
mape_list.append(mape)
mse_list.append(mse)
mae_list.append(mae)
rmse_list.append(rmse)
plt.figure(1)
plt.plot([k + seq_len + num_obs_to_train - seq_len
for k in range(seq_len)], ypred[-1], "r-")
plt.title('EE-Forecasting')
yplot = yte[-1, -seq_len-num_obs_to_train:]
plt.plot(range(len(yplot)), yplot, "k-")
plt.legend(["forecast", "true"], loc="upper left")
plt.xlabel("Periods")
plt.ylabel("Y")
plt.show()
return yf_test, ypred, losses, MAE_losses, mape_list, mse_list, mae_list, rmse_list
df
X = np.c_[np.asarray(hours), np.asarray(dows)]
num_features = X.shape[1]
num_periods = len(df)
X = np.asarray(X).reshape((-1, num_periods, num_features))
y = np.asarray(df["observed"]).reshape((-1, num_periods))
y_test, y_pred, losses, MAE_losses, mape_list, mse_list, mae_list, rmse_list = train(X, y, seq_len=8,
num_obs_to_train=4,
lr=1e-3,
num_epoches=50,
step_per_epoch=2,
batch_size=32
)
plt.plot(range(len(losses)), losses, "k-")
plt.xlabel("Period")
plt.ylabel("RMSE")
plt.title('RMSE: '+str(np.average(losses))+'MAE:'+str(np.average(MAE_losses)))
plt.show()
plt.savefig('training_EE.png')
"""## DeepAR"""
# import util
class Gaussian(nn.Module):
def __init__(self, hidden_size, output_size):
super(Gaussian, self).__init__()
self.mu_layer = nn.Linear(hidden_size, output_size)
self.sigma_layer = nn.Linear(hidden_size, output_size)
def forward(self, h):
_, hidden_size = h.size()
sigma_t = torch.log(1 + torch.exp(self.sigma_layer(h))) + 1e-6
sigma_t = sigma_t.squeeze(0)
mu_t = self.mu_layer(h).squeeze(0)
return mu_t, sigma_t
class NegativeBinomial(nn.Module):
def __init__(self, input_size, output_size):
'''
Negative Binomial Supports Positive Count Data
Args:
input_size (int): hidden h_{i,t} column size
output_size (int): embedding size
'''
super(NegativeBinomial, self).__init__()
self.mu_layer = nn.Linear(input_size, output_size)
self.sigma_layer = nn.Linear(input_size, output_size)
def forward(self, h):
_, hidden_size = h.size()
alpha_t = torch.log(1 + torch.exp(self.sigma_layer(h))) + 1e-6
mu_t = torch.log(1 + torch.exp(self.mu_layer(h)))
return mu_t, alpha_t
def gaussian_sample(mu, sigma):
gaussian = torch.distributions.normal.Normal(mu, sigma)
ypred = gaussian.sample(mu.size())
return ypred
def negative_binomial_sample(mu, alpha):
var = mu + mu * mu * alpha
ypred = mu + torch.randn(mu.size()) * torch.sqrt(var)
return ypred
class DeepAR(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, lr=1e-3, likelihood="g"):
super(DeepAR, self).__init__()
# network
self.input_embed = nn.Linear(1, embedding_size)
self.encoder = nn.LSTM(embedding_size+input_size, hidden_size,
num_layers, bias=True, batch_first=True)
if likelihood == "g":
self.likelihood_layer = Gaussian(hidden_size, 1)
elif likelihood == "nb":
self.likelihood_layer = NegativeBinomial(hidden_size, 1)
self.likelihood = likelihood
def forward(self, X, y, Xf):
if isinstance(X, type(np.empty(2))):
X = torch.from_numpy(X).float()
y = torch.from_numpy(y).float()
Xf = torch.from_numpy(Xf).float()
num_ts, seq_len, _ = X.size()
_, output_horizon, num_features = Xf.size()
ynext = None
ypred = []
mus = []
sigmas = []
h, c = None, None
for s in range(seq_len + output_horizon):
if s < seq_len:
ynext = y[:, s].view(-1, 1)
yembed = self.input_embed(ynext).view(num_ts, -1)
x = X[:, s, :].view(num_ts, -1)
else:
yembed = self.input_embed(ynext).view(num_ts, -1)
x = Xf[:, s-seq_len, :].view(num_ts, -1)
# num_ts, num_features + embedding
x = torch.cat([x, yembed], dim=1)
inp = x.unsqueeze(1)
if h is None and c is None:
# h size (num_layers, num_ts, hidden_size)
out, (h, c) = self.encoder(inp)
else:
out, (h, c) = self.encoder(inp, (h, c))
hs = h[-1, :, :]
hs = F.relu(hs)
mu, sigma = self.likelihood_layer(hs)
mus.append(mu.view(-1, 1))
sigmas.append(sigma.view(-1, 1))
if self.likelihood == "g":
ynext = gaussian_sample(mu, sigma)
elif self.likelihood == "nb":
alpha_t = sigma
mu_t = mu
ynext = negative_binomial_sample(mu_t, alpha_t)
# if without true value, use prediction
if s >= seq_len - 1 and s < output_horizon + seq_len - 1:
ypred.append(ynext)
ypred = torch.cat(ypred, dim=1).view(num_ts, -1)
mu = torch.cat(mus, dim=1).view(num_ts, -1)
sigma = torch.cat(sigmas, dim=1).view(num_ts, -1)
return ypred, mu, sigma
def batch_generator(X, y, num_obs_to_train, seq_len, batch_size):
num_ts, num_periods, _ = X.shape
if num_ts < batch_size:
batch_size = num_ts
t = random.choice(range(num_obs_to_train, num_periods-seq_len))
batch = random.sample(range(num_ts), batch_size)
X_train_batch = X[batch, t-num_obs_to_train:t, :]
y_train_batch = y[batch, t-num_obs_to_train:t]
Xf = X[batch, t:t+seq_len]
yf = y[batch, t:t+seq_len]
return X_train_batch, y_train_batch, Xf, yf
def RMSELoss(yhat, y):
return torch.sqrt(torch.mean((yhat-y)**2))
def train(
X,
y,
seq_len,
num_obs_to_train,
lr,
num_epoches,
step_per_epoch,
batch_size,
likelihood,
embedding_size,
n_layers,
sample_size,
hidden_size
):
num_ts, num_periods, num_features = X.shape
model = DeepAR(num_features, embedding_size,
hidden_size, n_layers, lr, likelihood)
optimizer = Adam(model.parameters(), lr=lr)
random.seed(2)
# select sku with most top n quantities
Xtr, ytr, Xte, yte = util.train_test_split(X, y)
losses = []
cnt = 0
yscaler = None
# if args.standard_scaler:
yscaler = util.StandardScaler()
# elif args.log_scaler:
# yscaler = util.LogScaler()
# elif args.mean_scaler:
# yscaler = util.MeanScaler()
if yscaler is not None:
ytr = yscaler.fit_transform(ytr)
rmse_losses = []
mae_losses = []
# training
seq_len = seq_len
num_obs_to_train = num_obs_to_train
progress = ProgressBar()
for epoch in progress(range(num_epoches)):
# print("Epoch {} starts...".format(epoch))
for step in range(step_per_epoch):
Xtrain, ytrain, Xf, yf = batch_generator(
Xtr, ytr, num_obs_to_train, seq_len, batch_size)
Xtrain_tensor = torch.from_numpy(Xtrain).float()
ytrain_tensor = torch.from_numpy(ytrain).float()
Xf = torch.from_numpy(Xf).float()
yf = torch.from_numpy(yf).float()
ypred, mu, sigma = model(Xtrain_tensor, ytrain_tensor, Xf)
# ypred_rho = ypred
# e = ypred_rho - yf
# loss = torch.max(rho * e, (rho - 1) * e).mean()
# gaussian loss
loss_rmse_inter = RMSELoss(ypred, yf)
mae_losses_inter = mean_absolute_error(ypred, yf)
mae_losses.append(mae_losses_inter)
rmse_losses.append(loss_rmse_inter)
ytrain_tensor = torch.cat([ytrain_tensor, yf], dim=1)
if likelihood == "g":
loss = util.gaussian_likelihood_loss(ytrain_tensor, mu, sigma)
elif likelihood == "nb":
loss = util.negative_binomial_loss(ytrain_tensor, mu, sigma)
losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cnt += 1
# test
mape_list = []
# select skus with most top K
X_test = Xte[:, -seq_len-num_obs_to_train:-
seq_len, :].reshape((num_ts, -1, num_features))
Xf_test = Xte[:, -seq_len:, :].reshape((num_ts, -1, num_features))
y_test = yte[:, -seq_len-num_obs_to_train:-seq_len].reshape((num_ts, -1))
yf_test = yte[:, -seq_len:].reshape((num_ts, -1))
if yscaler is not None:
y_test = yscaler.transform(y_test)
result = []
n_samples = sample_size
for _ in tqdm(range(n_samples)):
y_pred, _, _ = model(X_test, y_test, Xf_test)
y_pred = y_pred.data.numpy()
if yscaler is not None:
y_pred = yscaler.inverse_transform(y_pred)
result.append(y_pred.reshape((-1, 1)))
result = np.concatenate(result, axis=1)
p50 = np.quantile(result, 0.5, axis=1)
p90 = np.quantile(result, 0.9, axis=1)
p10 = np.quantile(result, 0.1, axis=1)
mape = util.MAPE(yf_test, p50)
print("P50 MAPE: {}".format(mape))
mape_list.append(mape)
# if args.show_plot:
plt.figure(1, figsize=(20, 5))
plt.plot([k + seq_len + num_obs_to_train - seq_len
for k in range(seq_len)], p50, "r-")
plt.fill_between(x=[k + seq_len + num_obs_to_train - seq_len for k in range(seq_len)],
y1=p10, y2=p90, alpha=0.5)
plt.title('Prediction uncertainty')
yplot = yte[-1, -seq_len-num_obs_to_train:]
plt.plot(range(len(yplot)), yplot, "k-")
plt.legend(["P50 forecast", "true", "P10-P90 quantile"], loc="upper left")
ymin, ymax = plt.ylim()
plt.vlines(seq_len + num_obs_to_train - seq_len, ymin, ymax,
color="blue", linestyles="dashed", linewidth=2)
plt.ylim(ymin, ymax)
plt.xlabel("Periods")
plt.ylabel("Y")
plt.show()
return yf_test, ypred, losses, rmse_losses, mae_losses, mape_list, mse_list, mae_list, rmse_list
df["year"] = df["Date"].apply(lambda x: x.year)
df["day_of_week"] = df["Date"].apply(lambda x: x.dayofweek)
df["hour"] = df["Date"].apply(lambda x: x.hour)
features = ["hour", "day_of_week"]
hours = df["hour"]
dows = df["day_of_week"]
X = np.c_[np.asarray(hours), np.asarray(dows)]
num_features = X.shape[1]
num_periods = len(df)
X = np.asarray(X).reshape((-1, num_periods, num_features))
y = np.asarray(df["observed"]).reshape((-1, num_periods))
y_test, y_pred, losses, rmse_losses, mae_losses, mape_list, mse_list, mae_list, rmse_list = train(X, y, seq_len=7,
num_obs_to_train=1,
lr=1e-3,
num_epoches=1000,
step_per_epoch=2,
batch_size=32,
sample_size=100,
n_layers=3,
hidden_size=64,
embedding_size=64,
likelihood="g"
)
plt.plot(range(len(rmse_losses)), rmse_losses, "k-")
plt.xlabel("Period")
plt.ylabel("RMSE")
plt.title('RMSE: '+str(np.average(rmse_losses)) +
'MAE:' + str(np.average(mae_losses)))
plt.show()
plt.savefig('training_DeepAR.png')
rmse_losses
plt.title('RMSE average: '+str(np.average(rmse_losses)) +
'MAE average: ' + str(np.average(mae_losses)))
plt.plot(range(len(rmse_losses)), rmse_losses, "k-")
plt.xlabel("Period")
plt.ylabel("RMSE")
plt.title('RMSE average: '+str(np.average(rmse_losses)) +
'MAE average: ' + str(np.average(mae_losses)))
plt.show()
plt.savefig('training_EE.png')
``` |
{
"source": "0415070/Protoformer",
"score": 2
} |
#### File: Protoformer/src/model.py
```python
!pip install -qq shap==0.35.0
# !pip install -qq shap
import shap
# !pip install -qq torch==1.7.1
!pip install -qq transformers
!pip install -qq sentence-transformers
# !pip -qq install transformers==3.3.1
!pip install -qq torch==1.8.1
from tensorboard.plugins.hparams import api as hp
import tensorflow as tf
# !pip install -qq --upgrade wandb
# !pip install -qq torchviz
# !pip install -qq bertviz
# import sys
# !test -d bertviz_repo && echo "FYI: bertviz_repo directory already exists, to pull latest version uncomment this line: !rm -r bertviz_repo"
# # !rm -r bertviz_repo # Uncomment if you need a clean pull from repo
# !test -d bertviz_repo || git clone https://github.com/jessevig/bertviz bertviz_repo
# if not 'bertviz_repo' in sys.path:
# sys.path += ['bertviz_repo']
# !pip install -qq regex
# !pip install -qq transformers
# !pip install -qq boto3
# !wandb login 79c99cb8196ccfc85f75dd926f9e872da3ba85a8
# import wandb
# wandb.init(project="school",notes='Dec-26_BERT')
# %cd /content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/
!pwd
# https://machinelearningmastery.com/feature-importance-and-feature-selection-with-xgboost-in-python/
# Commented out IPython magic to ensure Python compatibility.
RANDOM_SEED =47
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import warnings
warnings.filterwarnings('ignore')
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,classification_report
from collections import defaultdict
from textwrap import wrap
import seaborn as sns
from joblib import load, dump
import pickle
from tqdm import tqdm
# import transformers
import datetime
PATH = '/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints'
# %load_ext tensorboard
log_dir = PATH + "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
from sklearn.metrics import accuracy_score
torch.manual_seed(RANDOM_SEED)
device = torch.device ("cuda:0" if torch.cuda.is_available() else "cpu")
# from bertviz import head_view
import shap
device
# Commented out IPython magic to ensure Python compatibility.
!pip install -qq watermark
# %reload_ext watermark
# %watermark -v -p numpy,tensorflow,torch,pandas,sklearn,seaborn,transformers
# Commented out IPython magic to ensure Python compatibility.
plt.rcParams['figure.figsize'] =(8,8)
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (8, 8),
'axes.labelsize': '8',
'axes.titlesize': '8',
'xtick.labelsize':'4',
'ytick.labelsize':'4',
'font.family': 'Times new roman'}
pylab.rcParams.update(params)
# %matplotlib inline
# %config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#93D30C", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
RANDOM_SEED =47
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device ("cuda:0" if torch.cuda.is_available() else "cpu")
# !git clone https://github.com/LiqunW/Long-document-dataset
# !pip install -qq pyunpack
# !pip install -qq patool
# PATH_arxiv = '/content/Long-document-dataset'
# from pyunpack import Archive
# Archive('cs.AI.rar').extractall(PATH_arxiv)
df_imdb = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/IMDB_Dataset.csv')
df_imdb
# # df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/dec_5_hand.csv')
# # df
# # df_clean = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_clean.csv',
# # lineterminator='\n')
# # df_origin = df_clean
# # df_origin= pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_college_tweets_standord.csv')
# # df_cmu = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_college_tweets_CMU.csv',parse_dates=['created_at'])
# df_origin= pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_clean.csv',lineterminator='\n')
# # df_stanford = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_college_tweets.csv')
# df_origin.columns = ['created_at','school_name','user_name','text','school_handle','tweet_id']
# # col='user_name'
sns.countplot(df_embeddings.predicted_raw_difference)
df_embeddings
# sns.countplot(df_embeddings[df_embeddings.wrong==0].predicted_raw_difference)
# plt.show()
sns.count(df_embeddf_embeddings[df_embeddings.wrong==1][:100]['predict_c_0'],)
plt.show()
df_embeddings[df_embeddings.wrong==1].predicted_raw_difference
params = {'legend.fontsize': 'x-large',
'figure.figsize': (18, 18),
'axes.labelsize': '18',
'axes.titlesize': '18',
'xtick.labelsize':'18',
'ytick.labelsize':'18',
'font.family': 'Times new roman'}
pylab.rcParams.update(params)
sns.countplot(df_imdb.sentiment)
plt.ylabel('Samples')
plt.xlabel('IMDB Movie Sentiments')
plt.show()
df= df_imdb
df_profile = df_imdb
# df_profile
"""# Profile Classificaiton"""
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df_profile['labels']= le.fit_transform(df_profile['sentiment'])
df_profile = df_profile.sample(len(df_profile),random_state=47)
df_profile.reset_index(drop=True,inplace=True)
mapping = dict(zip(le.classes_, range(len(le.classes_))))
# df_profile = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/profile_Feb_4.csv')
df_profile= df_imdb
# df_profile = df_profile.sample(5000,random_state=47)
df_profile.reset_index(drop=True,inplace=True)
news_groups = le.classes_.tolist()
import sklearn
X = df_profile.review
# X = df_profile[['text']]
y = df_profile.labels
# z = df_profile.user_name
X_train,X_test,y_train,y_test= train_test_split(X,y,stratify=y,test_size=0.2,
)
print('number of training samples:', len(X_train))
print('number of test samples:', len(X_test))
train_df = pd.DataFrame({'doc':X_train,
'labels':y_train})
test_df = pd.DataFrame({'doc':X_test,
'labels':y_test})
train_df.reset_index(drop=True,inplace=True)
test_df.reset_index(drop=True,inplace=True)
# test_df_og = test_df
# test_df = test_df[test_df.false_predicted == 0]
test_df.reset_index(drop=True,inplace=True)
# sns.countplot(test_df['labels'])
# plt.title('Test Profiles for UCF')
# plt.xlabel('Schools')
# plt.ylabel('Number of profiles')
# plt.show()
# !pip -qq install transformers==3.3.1
# !pip -qq install transformers==4.0.0
!pip -qq install pkbar
import os
import re
import collections
import timeit
import torch
import pandas as pd
import pkbar
import numpy
# import numpy.testing.decorators
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn
from sklearn.metrics import f1_score,classification_report
import transformers
# Uses GPU if available
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
# device = torch.device ("cuda:0" if torch.cuda.is_available() else "cpu")
device
# @@ hy
MAX_LEN = 128
TRAIN_BATCH_SIZE = 8
VALID_BATCH_SIZE = 8
EPOCHS = 10
LEARNING_RATE = 1e-06
WEIGHT_DECAY = 1e-05
num_of_batches_per_epoch = len(X_train)//TRAIN_BATCH_SIZE
# Distil-bert model parameters
from transformers import DistilBertConfig,DistilBertTokenizer,DistilBertModel
from transformers import BertConfig,BertTokenizer,BertModel
from transformers import BigBirdConfig,BigBirdTokenizer,BigBirdModel
from transformers import LongformerConfig,LongformerTokenizer,LongformerModel
# from transformers import Big
num_classes = len(df_profile.labels.unique())
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-large')
# BigBirdTokenizer
# tokenizer = DistilBertTokenizer.from_pretrained()
class BertDataFormat(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
cur_doc = str(self.data.doc[index])
cur_doc = " ".join(cur_doc.split())
inputs = self.tokenizer.encode_plus(
cur_doc,
None,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'targets': torch.tensor(self.data.labels[index], dtype=torch.long)
}
def __len__(self):
return self.len
training_set = BertDataFormat(train_df, tokenizer, MAX_LEN)
testing_set = BertDataFormat(test_df, tokenizer, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
test_params = {'batch_size': VALID_BATCH_SIZE,
'shuffle': False,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
testing_set = BertDataFormat(test_df, tokenizer, MAX_LEN)
testing_loader = DataLoader(testing_set, **test_params)
# Creating the customized model, by adding a drop out and a dense layer on top of distil bert to get the final output for the model.
history = defaultdict(list)
class DistillBERTClass(torch.nn.Module):
def __init__(self,num_classes):
super(DistillBERTClass, self).__init__()
self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
self.classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.6)
self.classifier = torch.nn.Linear(768, num_classes)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
bert_last = hidden_state[:, 0]
output = self.classifier(bert_last)
return output
class BERTClass(torch.nn.Module):
def __init__(self,num_classes):
super(BERTClass, self).__init__()
self.l1 = BertModel.from_pretrained("bert-base-uncased",output_hidden_states=True)
self.classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.6)
self.classifier = torch.nn.Linear(768, num_classes)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
bert_last = hidden_state[:, 0]
output = self.classifier(bert_last)
return output
class BibBirdClass(torch.nn.Module):
def __init__(self,num_classes):
super(BibBirdClass, self).__init__()
self.l1 = BigBirdModel.from_pretrained("google/bigbird-roberta-large",output_hidden_states=True)
self.classifier = torch.nn.Linear(4096, 1024)
self.dropout = torch.nn.Dropout(0.6)
self.classifier = torch.nn.Linear(1024, num_classes)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
bert_last = hidden_state[:, 0]
output = self.classifier(bert_last)
return output
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# # Copy model to device.
# # baseline_model = DistillBERTClass(num_classes)
# # baseline_model = BERTClass(num_classes)
# # baseline_model.to(device)
# baseline_model = BibBirdClass(num_classes)
# baseline_model.to(device)
# Create the loss function and optimizer
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params = baseline_model.parameters(), lr=LEARNING_RATE,weight_decay=WEIGHT_DECAY)
baseline_model.parameters
# Calcuate accuracy of the model
def acc_cal(big_idx, targets):
n_correct = (big_idx==targets).sum().item()
return n_correct
# train model
def train(epoch,model):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
model.train()
# progress bar
train_per_epoch = num_of_batches_per_epoch
kbar = pkbar.Kbar(target=train_per_epoch, epoch=epoch,
num_epochs=EPOCHS, width=8,
always_stateful=False)
for idx,data in enumerate(training_loader, 0):
# copy tensors to gpu
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
# get output and calculate loss.
outputs = model(ids, mask)
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += acc_cal(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
optimizer.zero_grad()
loss.backward()
# # When using GPU
optimizer.step()
kbar.update(idx, values=[("train_loss", tr_loss/(idx+1))])
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
# Comment them out for faster training
test_acc,test_loss,predicted_labels,true_labels, predicted_raw= valid(model, testing_loader)
print(" - ")
print("test accuracy:",round(test_acc,2))
print("test loss:",round(test_loss,2))
history['train_acc'].append(epoch_accu)
history['train_loss'].append(epoch_loss)
history['test_acc_while_training'].append(test_acc)
history['test_loss_while_training'].append(test_loss)
# print(f"Training Loss Epoch: {epoch_loss}")
# print(f"Training Accuracy Epoch: {epoch_accu}")
return
# function to predict output.
def valid(model, testing_loader):
predicted_raw = []
predicted_labels = []
true_labels = []
nb_tr_steps = 0
tr_loss =0
nb_tr_examples=0
model.eval()
n_correct = 0; n_wrong = 0; total = 0
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
# copy tensors to gpu.
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask).squeeze()
# calculate loss
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
predicted_raw += outputs
predicted_labels += big_idx
true_labels += targets
n_correct += acc_cal(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
history['val_acc'].append(epoch_accu)
history['val_loss'].append(epoch_loss)
return epoch_accu,epoch_loss,predicted_labels,true_labels,predicted_raw
# with torch.no_grad():
# for _, data in enumerate(testing_loader, 0):
# ids = data['ids'].to(device, dtype = torch.long)
# mask = data['mask'].to(device, dtype = torch.long)
# targets = data['targets'].to(device, dtype = torch.long)
# outputs =baseline_model(ids, mask).squeeze()
# print(outputs)
# big_val, big_idx = torch.max(outputs.data, dim=1)
# function to predict output.
def test_model(model, testing_loader):
predicted_labels = []
true_labels = []
nb_tr_steps = 0
tr_loss =0
nb_tr_examples=0
model.eval()
n_correct = 0; n_wrong = 0; total = 0
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
# copy tensors to gpu.
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask).squeeze()
# calculate loss
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
torch.max
predicted_labels += big_idx
true_labels += targets
n_correct += acc_cal(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
return epoch_accu,epoch_loss,predicted_labels,true_labels
# import wandb
# wandb.login()
# args = dict(
# epochs=5,
# classes=10,
# batch_size=32,
# learning_rate=0.005,
# dataset="Twitter_Clean",
# architecture="Baseline")
# wandb.init(config=args)
# wandb.watch(baseline_model)
hp_batch_train = hp.HParam('train_batch', hp.Discrete([16,32,64]))
hp_batch_valid = hp.HParam('valid_batch', hp.Discrete([16,32]))
hp_learning_rate = hp.HParam('learning_rate',hp.RealInterval(1e-06,1e-03))
hp_max_len = hp.HParam('length', hp.Discrete([128,256,512]))
METRIC_ACCURACY ='accuracy'
with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
hp.hparams_config(
hparams=[hp_batch_train, hp_batch_valid, hp_learning_rate,hp_max_len],
metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
)
def run(run_dir, hparams):
with tf.summary.create_file_writer(run_dir).as_default():
hp.hparams(hparams) # record the values used in this trial
accuracy = model(hparams)
tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)
df_profile
from torch.utils.tensorboard import SummaryWriter
# from torchvision import datasets, transforms
writer = SummaryWriter()
for n_iter in range(100):
writer.add_scalar('Loss/train', np.random.random(), n_iter)
writer.add_scalar('Loss/test', np.random.random(), n_iter)
writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
``` |
{
"source": "0417taehyun/ringle-tutor-analysis",
"score": 3
} |
#### File: src/util/worker.py
```python
def update_interests(tutors: list[dict], interests: dict[str, int]) -> None:
for tutor in tutors:
for interest in tutor["interests"]:
if interest in interests:
interests[interest] += 1
else:
interests[interest] = 1
``` |
{
"source": "0417taehyun/simple-ATM-controller",
"score": 4
} |
#### File: src/controller/atm.py
```python
from src.fake import fake_bank_api, fake_deposit_slot
class ATMController:
def __init__(self, id, latitude, longitude):
"""
id: the unique id of each ATM
latitude: for the location of each ATM
longitude: for the location of each ATM
is_validated: to check the PIN number is validated
card_info: to communicate with bank API for transactions
"""
self.id = id
self.latitude = latitude
self.longitude = longitude
self.is_validated = False
self.card_info = None
def key_pad(self, number):
"""
It receives PIN number that user enters for validation and return it.
"""
return number
def balance(self):
"""
To see balances with Bank API
Users are required to insert card and validate the PIN number first
"""
if not (self.card_info and self.is_validated):
return {"message": "Authenticate your card first"}
balance = fake_bank_api.see_balance(card_info=self.card_info)
return {"message": f"The amounts of balance are {balance}"}
def deposit(self):
"""
To deposit with Bank API, deposit slot
Users are required to insert card and validate the PIN number first
Deposit slot would pick out conterfeit cash
Using flag to check Bank API success
"""
if not (self.card_info and self.is_validated):
return {"message": "Authenticate your card first"}
validated_cash = fake_deposit_slot.collect_cash()
if not validated_cash:
return {"message": "Nothing in deposit slot"}
flag = fake_bank_api.deposit(
card_info=self.card_info, amounts=validated_cash
)
if flag:
balance = fake_bank_api.see_balance(card_info=self.card_info)
return {
"message": f"The amounts of balance after deposit are {balance}" # noqa
}
def withdraw(self, amounts):
"""
To withdraw with Bank API, deposit slot
Users are required to insert card and validate the PIN number first
Deposit slot would return cash that user requested
Using flag to check Bank API success
If balances are less than user's requests, it would return False
"""
if not (self.card_info and self.is_validated):
return {"message": "Authenticate your card first"}
if not fake_deposit_slot.return_cash(amounts):
return {"message": "Cash bin empty"}
flag = fake_bank_api.withdraw(
card_info=self.card_info, amounts=amounts
)
if flag:
balance = fake_bank_api.see_balance(card_info=self.card_info)
return {
"message": f"The amounts of balance after withdrawal are {balance}" # noqa
}
else:
return {"message": f"Your balances are less than {amounts}"}
```
#### File: src/test/test_atm.py
```python
import unittest
from src.controller import ATMController
from src.fake import account, fake_bank_api, fake_card_reader
class TestATMController(unittest.TestCase):
def setUp(self):
self.atm = ATMController(
id="test1234",
latitude="37.547076399306",
longitude="127.04020267241",
)
self.atm.card_info = fake_card_reader.validate_card()
PIN_number = self.atm.key_pad(number="0000")
flag = fake_bank_api.validate_PIN(PIN=PIN_number)
self.atm.is_validated = flag
account.balance = 100
def test_balance_success(self):
print("Balance Success Test")
message = self.atm.balance()
self.assertEqual(
first=message,
second={
"message": f"The amounts of balance are {account.balance}"
},
)
def test_deposit_success(self):
print("Deposit Success Test")
DEPOSITED_CASH = 100
balance = account.balance + DEPOSITED_CASH
message = self.atm.deposit()
self.assertEqual(
first=message,
second={
"message": f"The amounts of balance after deposit are {balance}" # noqa
},
)
def test_withdraw_success(self):
print("Withdraw Test")
AMOUNTS = 50
balance = account.balance - AMOUNTS
message = self.atm.withdraw(amounts=AMOUNTS)
self.assertEqual(
first=message,
second={
"message": f"The amounts of balance after withdrawal are {balance}" # noqa
},
)
def test_withdraw_not_enough_balance(self):
print("Withdraw Not Enough Balance Test")
AMOUNTS = 120
message = self.atm.withdraw(amounts=AMOUNTS)
self.assertEqual(
first=message,
second={"message": f"Your balances are less than {AMOUNTS}"},
)
``` |
{
"source": "0417taehyun/studeep-backend",
"score": 2
} |
#### File: src/crud/statuses.py
```python
from fastapi.encoders import jsonable_encoder
from sqlalchemy import and_
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models import Statuses
from app.schemas import StatusCreate, StatusUpdate
class CRUDStatus(CRUDBase[Statuses, StatusCreate, StatusUpdate]):
def create(self, db: Session, statuses: dict):
try:
# instance = db.bulk_insert_mappings(self.model, statuses)
db.commit()
except Exception as error:
print(error)
raise Exception
finally:
db.close()
def update_or_create(
self,
db: Session,
type: str,
cnt: int,
time: int,
my_study_id: int,
report_id: int
):
try:
instance = db.query(self.model).filter(and_(
self.model.type == type,
self.model.my_study_id == my_study_id,
self.model.report_id == report_id
)).first()
if instance:
instance.count += cnt
instance.time += time
else:
instance = self.model(
type = type,
count = cnt,
time = time,
my_study_id = my_study_id,
report_id = report_id
)
db.add(instance)
db.commit()
db.refresh(instance)
return jsonable_encoder(instance)
except:
raise Exception
finally:
db.close()
statuses = CRUDStatus(Statuses)
```
#### File: src/service/auth.py
```python
import logging
import traceback
import requests
from datetime import datetime, timedelta
from typing import Optional
from fastapi import status, Header
from fastapi.responses import JSONResponse
from jose import jwt, JWTError, ExpiredSignatureError
from jose.exceptions import JWTClaimsError
from app.core import user_settings
from app.errors import get_detail
def parsing_token_decorator(func):
def wrapper(token: str, **kwargs):
try:
return func(token.split(" ")[1], **kwargs)
except IndexError:
raise JWTError()
return wrapper
# DI
def auth_token(authorization: Optional[str] = Header(None)):
try:
check_access_token_valid(authorization)
except JWTError:
message = traceback.format_exc()
detail = get_detail(param='token', field='authorize', message=message, err='invalid Google token')
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content={'detail': detail})
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, user_settings.SECRET_KEY, algorithm=user_settings.ALGORITHM)
return encoded_jwt
@parsing_token_decorator
def auth_google_token(token: str):
result = requests.get("https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=" + token)
if result.status_code == 200:
return result.json()["email"]
else:
raise JWTError
@parsing_token_decorator
def check_access_token_valid(token: str, on_board=False):
try:
decode_token = jwt.decode(token, user_settings.SECRET_KEY, algorithms=[user_settings.ALGORITHM])
if on_board:
return decode_token["on_board"]
return decode_token["sub"]
except ExpiredSignatureError as err:
logging.info("Token has expired")
# todo: Refresh Token Check
raise JWTError()
except JWTClaimsError:
logging.info("token has any claims")
raise JWTError()
except JWTError:
logging.info("Invalid Signature token")
raise JWTError()
def check_refresh_token(param):
# todo : 레디스 연결 이후
pass
```
#### File: src/test/conftest.py
```python
from typing import Dict
from fastapi.testclient import TestClient
from app.database.session import SessionLocal
from app.database.base import Base
from app.api.deps import get_db
from app.main import app
Base.metadata
def overried_get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
def client():
with TestClient(app) as client:
yield client
app.dependency_overrides[get_db] = overried_get_db
def test_user() -> Dict[str, str]:
return {
"id": 1,
"provider": "Google",
"email": "<EMAIL>",
"nickname": "test"
}
client = TestClient(app)
``` |
{
"source": "04301998/Lupe_project-",
"score": 2
} |
#### File: 04301998/Lupe_project-/ACC.py
```python
import pynbody
import pylab
import numpy as np
import matplotlib.pylab as plt
import readcol
import itertools as it
from itertools import tee
import pandas as pd
import warnings
import decimal
import statistics
# Loading files
Hfiles = readcol.readcol('/media/jillian/cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/supersample/highres/cptmarvel.test.orbit')
Ffiles = readcol.readcol('/media/jillian/cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/supersample/fatso/cptmarvel.fatso.orbit')
# DATA
# Convertions
m_sol= 2.31e15
l_kpc = 25000
m_g = 1.989e33
l_cm = 3.086e21
timee = 38.78
d_timee = 1.22386438e18
t_square = 1.49784401e36
# Highes Accretion
Denergy =( Ffiles[:,13]* m_sol*( l_kpc**2) *m_g *(l_cm**2))/t_square
Dtime = Ffiles[:,14]*d_timee
dEdt = Denergy/Dtime
Time =((Ffiles[:,1])-0.9999999)*timee
print(Time)
print(timee)
# Functions
def pair(iterable):
"c -> (c0,c1), (c1,c2), (c2, c3), ..." # This function creates ordered pairs
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def float_range(start, stop, step):
while start < stop: # Float Range function
yield float(start)
start += decimal.Decimal(step)
intervals = pair(float_range(0,2,0.25))
centers = [(tmin+tmax)/2. for tmin, tmax in intervals]
def combining(Time,dEdt,intervals):
# Calculate median valuea given intervals
warnings.simplefilter("ignore")
out = []
for tmin, tmax in intervals:
mask = (Time >= tmin) & (Time < tmax)
out.append(np.mean(dEdt[mask]))
return np.array(out)
b = len(intervals)
print(centers)
print(combining(Time, dEdt, intervals))
plt.title(" $\Delta$E/$\Delta$t vs Time")
plt.plot(centers, combining(Time, dEdt, intervals),'ro', label=('FATSO Simulation')) # FATSO
#plt.scatter(Time, dEdt)
plt.legend(loc = 'upper right')
plt.xlabel("Time(Gyrs)")
plt.ylabel("$\Delta$E/$\Delta$t(Erg/s)")
plt.yscale('log')
plt.ylim(10e35,10e38)
#plt.xlim(0,3)
plt.show()
plt.savefig("A_FATSO.png")
```
#### File: 04301998/Lupe_project-/highres.py
```python
import pynbody
import pylab
import numpy as np
import matplotlib.pylab as plt
import readcol
'''
r = (G * BH_Mass) / (stars_vel**2)
G = 6.674e-11
'''
#Now I need a code that will load the snapshots(s will stand for )
Path = "/media/jillian//cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/supersample/highres/"
files = readcol.readcol('/media/jillian/cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/supersample/highres/files.list')
all_files = files[:,0]
#Tell where BH is Function
def findBH(s):
BH = s.stars[pynbody.filt.LowPass('tform', 0.0)]
return BH
#std stands for standard deviation (velocity dispersion)
def DispersionVelocity(s):
velocity = s.stars['vel']
x = np.std(velocity[0])
y = np.std(velocity[1])
z = np.std(velocity[2])
#print(x_direct)
#print(y_direct)
#print(z_direct)
dispersion_velocity = np.sqrt( (x)**2 + (y)**2 + (z)**2)
print("Dispersion velocity: ",dispersion_velocity)
return dispersion_velocity
#Need my units to match up so the calculations go correctly
#Couldn't find a way to convert G, so i converted everything else and then converte back to KPC
def RadInfluence(s):
G = 6.674e-11
#G is in m**3 * kg**-1 * s**-2
BH = findBH(s)
BH_Mass = BH['mass'].in_units('kg')
#Kg mtches kg in G
stars_vel = DispersionVelocity(s) * 1e3
r = (G * BH_Mass) / (stars_vel**2)
return r * 3.24e-20*3
f = open("highres.txt","w+")
f.write("Mass ,BH velocity, BHx direction , BHy direction , BHz direction, Position BH,Radius Influence, Velocity stars sphere,Velocity stars with respect BH '\n' ")
#Finally converted back to KPC (the conversion is * 3.24e-20)
for i in all_files:
s = pynbody.load(Path + i)
s.physical_units()
#Don't forget to center the galaxy with this
pynbody.analysis.angmom.faceon(s)
BH = findBH(s)
BH_pos = BH['pos']
BHx = BH_pos[:,0]
BHy = BH_pos[:,1]
BHz = BH_pos[:,2]
BH_position = np.array([BHx[0], BHy[0], BHz[0]])
pos_magnitude = np.sqrt((BHx)**2 + (BHy)**2 + (BHz)**2)
Mass_Msol = BH['mass']
MassBH = Mass_Msol[0]
pos_magni = pos_magnitude[0]
#print(BH_pos)
#dispersion = DispersionVelocity(s)
#print(dispersion)
#The radius here is an array, we need the center to be an integer
radius = RadInfluence(s)
radius_influence = radius[0]
print(radius)
#BH_pos is a three int array so it will be the center
sphere = pynbody.filt.Sphere(radius_influence, cen = BH_position)
#print(sphere)
stars = s.stars[np.where(s.stars["tform"]>0)]
in_sphere = stars[sphere]
total_stars = len(in_sphere)
print("Total stars: ",total_stars)
#This find their velocity
velocity = in_sphere['vel']
#Now we need to find the velocity of these stars in x,y,z
x = np.array([vel[0] for vel in velocity])
y = np.array([vel[1] for vel in velocity])
z = np.array([vel[2] for vel in velocity])
#Now we can find the average of these by dividing by the total
vel_answer = np.sqrt((x)**2 + (y)**2 + (z)**2)
#Now divide by total number of stars
vel_stars_sphere= vel_answer.sum() / total_stars
print("Velocity of the stars in the sphere: ", vel_stars_sphere)
# Velocity of the stars with respect of the BH
stars_BH_X = x - BH['vel'][:,0]
stars_BH_Y = y - BH['vel'][:,1]
stars_BH_Z = z - BH['vel'][:,2]
stars_xyz = np.sqrt((stars_BH_X)**2 + (stars_BH_Y)**2 +(stars_BH_Z)**2)
stars_magnitude = np.sum(stars_xyz)/total_stars
print("Velocity of the stars with respect the BH: ", stars_magnitude)
# VELOCITY OF THE GALAXY
galaxy = s['vel']
Galaxy_vx = galaxy[:,0]
Galaxy_vy = galaxy[:,1]
Galaxy_vz = galaxy[:,2]
mass = s['mass']
# AVERAGE VELOCITY
Av_Vx = sum(Galaxy_vx*mass)/(sum(mass))
#print(Av_Vx)
Av_Vy = sum(Galaxy_vy*mass)/(sum(mass))
#print(Av_Vy)
Av_Vz = sum(Galaxy_vz*mass)/(sum(mass))
#print(Av_Vz)
# BLACK HOLE VELOCITY
BH_x = Av_Vx - BH['vel'][:,0]
#print(BH_x)
BH_y= Av_Vy - BH['vel'][:,1]
#print(BH_y)
BH_z= Av_Vz - BH['vel'][:,2]
#print(BH_z)
BH_MAGNITUDE= np.sqrt((BH_x)**2 + (BH_y)**2 + (BH_z)**2)
BH_VEL = BH_MAGNITUDE[0]
print("Velocity of the black hole: ",BH_MAGNITUDE)
data = str(MassBH)+" "+str(BH_VEL)+" "+str(BHx)+" "+str(BHy)+" "+str(BHz)+" "+str(pos_magni)+" "+str(radius_influence)+" "+str(vel_stars_sphere)+" "+str(stars_magnitude)+"\n"
f.write(data)
print(data)
f.close()
``` |
{
"source": "0486/ioflo-python-multiversion",
"score": 3
} |
#### File: aio/http/serving.py
```python
from __future__ import absolute_import, division, print_function
import sys
import os
import socket
import errno
import io
from collections import deque
import codecs
import json
import ssl
import copy
import random
import datetime
if sys.version > '3':
from urllib.parse import urlsplit, quote, quote_plus, unquote, unquote_plus
else:
from urlparse import urlsplit
from urllib import quote, quote_plus, unquote, unquote_plus
try:
import simplejson as json
except ImportError:
import json
from email.parser import HeaderParser
# Import ioflo libs
from ...aid.sixing import *
from ...aid.odicting import odict, lodict, modict
from ...aid import aiding
from ...aid.consoling import getConsole
from ...base import excepting, storing
from ..tcp import Server, ServerTls
from . import httping
console = getConsole()
CRLF = b"\r\n"
LF = b"\n"
CR = b"\r"
# Class Definitions
class Requestant(httping.Parsent):
"""
Nonblocking HTTP Server Requestant class
Parses request msg
"""
def __init__(self, incomer=None, **kwa):
"""
Initialize Instance
Parameters:
incomer = Incomer connection instance
"""
super(Requestant, self).__init__(**kwa)
self.incomer = incomer
self.url = u'' # full path in request line either relative or absolute
self.scheme = u'' # scheme used in request line path
self.hostname = u'' # hostname used in request line path
self.port = u'' # port used in request line path
self.path = u'' # partial path in request line without scheme host port query fragment
self.query = u'' # query string from full path
self.fragment = u'' # fragment from full path
def checkPersisted(self):
"""
Checks headers to determine if connection should be kept open until
client closes it
Sets the .persisted flag
"""
connection = self.headers.get("connection") # check connection header
if self.version == (1, 1): # rules for http v1.1
self.persisted = True # connections default to persisted
connection = self.headers.get("connection")
if connection and "close" in connection.lower():
self.persisted = False # unless connection set to close.
# non-chunked but persistent connections should have non None for
# content-length Otherwise assume not persisted
elif (not self.chunked and self.length is None):
self.persisted = False
elif self.version == (1, 0): # rules for http v1.0
self.persisted = False # connections default to non-persisted
# HTTP/1.0 Connection: keep-alive indicates persistent connection.
if connection and "keep-alive" in connection.lower():
self.persisted = True
def parseHead(self):
"""
Generator to parse headers in heading of .msg
Yields None if more to parse
Yields True if done parsing
"""
if self.headed:
return # already parsed the head
self.headers = lodict()
# create generator
lineParser = httping.parseLine(raw=self.msg, eols=(CRLF, LF), kind="status line")
while True: # parse until we get full start line
if self.closed: # connection closed prematurely
raise httping.PrematureClosure("Connection closed unexpectedly "
"while parsing request start line")
line = next(lineParser)
if line is None:
(yield None)
continue
lineParser.close() # close generator
break
method, url, version = httping.parseRequestLine(line)
self.method = method
self.url = url.strip()
if not version.startswith(u"HTTP/1."):
raise httping.UnknownProtocol(version)
if version.startswith(u"HTTP/1.0"):
self.version = (1, 0)
else:
self.version = (1, 1) # use HTTP/1.1 code for HTTP/1.x where x>=1
pathSplits = urlsplit(self.url)
self.path = unquote(pathSplits.path) # unquote non query path portion here
self.scheme = pathSplits.scheme
self.hostname = pathSplits.hostname
self.port = pathSplits.port
self.query = pathSplits.query # WSGI spec leaves it quoted do not unquote
#self.query = httping.unquoteQuery(pathSplits.query) # unquote only the values
self.fragment = pathSplits.fragment
leaderParser = httping.parseLeader(raw=self.msg,
eols=(CRLF, LF),
kind="leader header line")
while True:
if self.closed: # connection closed prematurely
raise httping.PrematureClosure("Connection closed unexpectedly "
"while parsing request header")
headers = next(leaderParser)
if headers is not None:
leaderParser.close()
break
(yield None)
self.headers.update(headers)
# are we using the chunked-style of transfer encoding?
transferEncoding = self.headers.get("transfer-encoding")
if transferEncoding and transferEncoding.lower() == "chunked":
self.chunked = True
else:
self.chunked = False
# NOTE: RFC 2616, S4.4, #3 says ignore if transfer-encoding is "chunked"
contentLength = self.headers.get("content-length")
if not self.chunked:
if contentLength:
try:
self.length = int(contentLength)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else: # if no body then neither content-length or chunked required
self.length = 0 # assume no body so length 0
else: # ignore content-length if chunked
self.length = None
contentType = self.headers.get("content-type")
if contentType:
if u';' in contentType: # should also parse out charset for decoding
contentType, sep, encoding = contentType.rpartition(u';')
if encoding:
self.encoding = encoding
if 'application/json' in contentType.lower():
self.jsoned = True
else:
self.jsoned = False
# Should connection be kept open until client closes
self.checkPersisted() # sets .persisted
self.headed = True
yield True
return
def parseBody(self):
"""
Parse body
"""
if self.bodied:
return # already parsed the body
if self.length and self.length < 0:
raise ValueError("Invalid content length of {0}".format(self.length))
del self.body[:] # self.body.clear() clear body python2 bytearrays don't clear
if self.chunked: # chunked takes precedence over length
self.parms = odict()
while True: # parse all chunks here
if self.closed: # connection closed prematurely
raise httping.PrematureClosure("Connection closed unexpectedly"
" while parsing request body chunk")
chunkParser = httping.parseChunk(raw=self.msg)
while True: # parse another chunk
result = next(chunkParser)
if result is not None:
chunkParser.close()
break
(yield None)
size, parms, trails, chunk = result
if parms: # chunk extension parms
self.parms.update(parms)
if size: # size non zero so append chunk but keep iterating
self.body.extend(chunk)
if self.closed: # no more data so finish
chunkParser.close()
break
else: # last chunk when empty chunk so done
if trails:
self.trails = trails
chunkParser.close()
break
elif self.length != None: # known content length
while len(self.msg) < self.length:
if self.closed: # connection closed prematurely
raise httping.PrematureClosure("Connection closed unexpectedly"
" while parsing request body")
(yield None)
self.body = self.msg[:self.length]
del self.msg[:self.length]
else: # unknown content length invalid
raise httping.HTTPException("Invalid body, content-length not provided!")
# only gets to here once content length has become finite
# closed or not chunked or chunking has ended
self.length = len(self.body)
self.bodied = True
(yield True)
return
class Responder(object):
"""
Nonblocking HTTP WSGI Responder class
"""
HttpVersionString = httping.HTTP_11_VERSION_STRING # http version string
Delay = 1.0
def __init__(self,
incomer,
app,
environ,
chunkable=False,
delay=None):
"""
Initialize Instance
Parameters:
incomer = incomer connection instance
app = wsgi app callable
environ = wsgi environment dict
chunkable = True if may send body in chunks
"""
status = "200 OK" # integer or string with reason, WSGI is string with reason
self.incomer = incomer
self.app = app
self.environ = environ
self.chunkable = True if chunkable else False
self.started = False # True once start called (start_response)
self.headed = False # True once headers sent
self.chunked = False # True if should send in chunks
self.ended = False # True if response body completely sent
self.closed = False # True if connection closed by far side
self.iterator = None # iterator on application body
self.status = status
self.headers = lodict() # headers
self.length = None # if content-length provided must not exceed
self.size = 0 # number of body bytes sent so far
self.evented = False # True if response is event-stream
def close(self):
"""
Close any resources
"""
self.closed = True
def reset(self, environ, chunkable=None):
"""
Reset attributes for another request-response
"""
self.environ = environ
if self.chunkable is not None:
self.chunkable = chunkable
self.started = False
self.headed = False
self.chunked = False
self.ended = False
self.iterator = None
self.status = "200 OK"
self.headers = lodict()
self.length = None
self.size = 0
def build(self):
"""
Return built head bytes from .status and .headers
"""
lines = []
_status = getattr(self.iterator, '_status', None) # if AttributiveGenerator
status = _status if _status is not None else self.status # override
if isinstance(status, (int, long)):
status = "{0} {1}".format(status, httping.STATUS_DESCRIPTIONS[status])
startLine = "{0} {1}".format(self.HttpVersionString, status)
try:
startLine = startLine.encode('ascii')
except UnicodeEncodeError:
startLine = startLine.encode('idna')
lines.append(startLine)
# Override if AttributiveGenerator
self.headers.update(getattr(self.iterator, '_headers', lodict()))
if u'server' not in self.headers: # create Server header
self.headers[u'server'] = "Ioflo WSGI Server"
if u'date' not in self.headers: # create Date header
self.headers[u'date'] = httping.httpDate1123(datetime.datetime.utcnow())
if self.chunkable and 'transfer-encoding' not in self.headers:
self.chunked = True
self.headers[u'transfer-encoding'] = u'chunked'
for name, value in self.headers.items():
lines.append(httping.packHeader(name, value))
lines.extend((b"", b""))
head = CRLF.join(lines) # b'/r/n'
return head
def write(self, msg):
"""
WSGI write callback This writes out the headers the first time its called
otherwise writes the msg bytes
"""
if not self.started:
raise AssertionError("WSGI write() before start_response()")
if not self.headed: # head not written yet
head = self.build()
self.incomer.tx(head)
self.headed = True
if self.chunked:
msg = httping.packChunk(msg)
if self.length is not None: # limit total size to length
size = self.size + len(msg)
if size > self.length:
msg = msg[:self.length - size]
self.size += len(msg)
if msg:
self.incomer.tx(msg)
def start(self, status, response_headers, exc_info=None):
"""
WSGI application start_response callable
Parameters:
status is string of status code and status reason '200 OK'
response_headers is list of tuples of strings of the form (field, value)
one tuple for each header example:
[
('Content-type', 'text/plain'),
('X-Some-Header', 'value')
]
exc_info is optional exception info if exception occurred while
processing request in wsgi application
If exc_info is supplied, and no HTTP headers have been output yet,
start_response should replace the currently-stored
HTTP response headers with the newly-supplied ones,
thus allowing the application to "change its mind" about
the output when an error has occurred.
However, if exc_info is provided, and the HTTP headers
have already been sent, start_response must raise an error,
and should re-raise using the exc_info tuple. That is:
raise exc_info[1].with_traceback(exc_info[2]) (python3)
raise exc_info[0], exc_info[1], exc_info[2] (python2)
Use six.reraise to work for both
Nonstandard modifiction to allow for iterable/generator of body to change
headers and status before first write to support async processing of
responses whose iterator/generator yields empty before first non-empty
yield. In .service yielding empty does not cause write so status line
and headers are not sent until first non-empty write.
The mode is that the app.headers and app.status are consulted to see
if changed from when .start = wsgi start_response was first called.
"""
if exc_info:
try:
if self.headed:
# Re-raise original exception if headers sent
reraise(*exc_info) # sixing.reraise
finally:
exc_info = None # avoid dangling circular ref
elif self.started: # may not call start_response again without exc_info
raise AssertionError("Already started!")
self.status = status
self.headers = lodict(response_headers)
if u'content-length' in self.headers:
self.length = int(self.headers['content-length'])
self.chunkable = False # cannot use chunking with finite content-length
else:
self.length = None
if u'content-type' in self.headers:
if self.headers['content-type'].startswith('text/event-stream'):
self.evented = True
self.started = True
return self.write
def service(self):
"""
Service application
"""
if not self.closed and not self.ended:
if self.iterator is None: # initiate application
self.iterator = iter(self.app(self.environ, self.start))
try:
msg = next(self.iterator)
except StopIteration as ex:
if hasattr(ex, "value") and ex.value:
self.write(ex.value) # new style generators in python3.3+
self.write(b'') # in case chunked send empty chunk to terminate
self.ended = True
except httping.HTTPError as ex:
if not self.headed:
headers = lodict()
headers.update(ex.headers.items())
if 'content-type' not in headers:
headers['content-type'] = 'text/plain'
msg = ex.render()
headers['content-length'] = str(len(msg))
# WSGI status is string of status code and reason
status = "{} {}".format(ex.status, ex.reason)
self.start(status, headers.items(), sys.exc_info())
self.write(msg)
self.ended = True
else:
console.terse("HTTPError streaming body after headers sent.\n"
"{}\n".format(ex))
except Exception as ex: # handle http exceptions not caught by app
console.terse("Unexcepted Server Error.\n"
"{}\n".format(ex))
else:
if msg: # only write if not empty allows async processing
self.write(msg)
if self.length is not None and self.size >= self.length:
self.ended = True
class Valet(object):
"""
Valet WSGI Server Class
"""
Timeout = 5.0 # default server connection timeout
def __init__(self,
store=None,
app=None,
reqs=None,
reps=None,
servant=None,
name='',
bufsize=8096,
wlog=None,
ha=None,
host=u'',
port=None,
eha=None,
scheme=u'',
timeout=None,
**kwa):
"""
Initialization method for instance.
Parameters:
store is Datastore for timers
app is wsgi application callable
reqs is odict of Requestant instances keyed by ca
reps is odict of running Wsgi Responder instances keyed by ca
servant is instance of Server or ServerTls or None
name is user friendly name for servant
bufsize is buffer size for servant
wlog is WireLog instance if any for servant
ha is host address duple (host, port) for local servant listen socket
host is host address for local servant listen socket,
'' means any interface on host
port is socket port for local servant listen socket
eha is external destination address for servant
for incoming connections used in TLS
scheme is http scheme u'http' or u'https' or empty
for servant and WSGI environment
kwa needed to pass additional parameters to servant
timeout is timeout in seconds for dropping idle connections
Attributes:
.store is Datastore for timers
.app is wsgi application callable
.reqs is odict of Requestant instances keyed by ca
.reps is odict of running Wsgi Responder instances keyed by ca
.servant is instance of Server or ServerTls or None
.timeout is timeout in seconds for dropping idle connections
.scheme is http scheme http or https for servant and environment
.secured is Boolean true if TLS
"""
self.app = app
self.reqs = reqs if reqs is not None else odict() # allows external view
self.reqs.clear() # items should only be assigned by valet
self.reps = reps if reps is not None else odict() # allows external view
self.reps.clear() # items should only be assigned by valet
self.store = store or storing.Store(stamp=0.0)
if not name:
name = "Ioflo_WSGI_server"
self.timeout = timeout if timeout is not None else self.Timeout
ha = ha or (host, port) # ha = host address takes precendence over host, port
if servant:
if isinstance(servant, ServerTls):
if scheme and scheme != u'https':
raise ValueError("Provided scheme '{0}' incompatible with servant".format(scheme))
secured = True
scheme = u'https'
defaultPort = 443
elif isinstance(servant, Server):
if scheme and scheme != u'http':
raise ValueError("Provided scheme '{0}' incompatible with servant".format(scheme))
secured = False
scheme = 'http'
defaultPort = 80
else:
raise ValueError("Invalid servant type {0}".format(type(servant)))
else:
scheme = u'https' if scheme == u'https' else u'http'
if scheme == u'https':
secured = True # use tls socket connection
defaultPort = 443
else:
secured = False # non tls socket connection
defaultPort = 80
self.scheme = scheme
host, port = ha
port = port or defaultPort # if port not specified
ha = (host, port)
if servant:
if servant.ha != ha:
ValueError("Provided ha '{0}:{1}' incompatible with servant".format(ha[0], ha[1]))
# at some point may want to support changing the ha of provided servant
if name:
servant.name = name
else: # what about timeouts for servant connections
if secured:
servant = ServerTls(store=self.store,
name=name,
ha=ha,
eha=eha,
bufsize=bufsize,
wlog=wlog,
timeout=self.timeout,
**kwa)
else:
servant = Server(store=self.store,
name=name,
ha=ha,
eha=eha,
bufsize=bufsize,
wlog=wlog,
timeout=self.timeout,
**kwa)
self.secured = secured
self.servant = servant
def open(self):
"""
Return result of .servant.reopen()
"""
return self.servant.reopen()
def close(self):
"""
Call .servant.closeAll()
"""
self.servant.closeAll()
def idle(self):
"""
Returns True if no connections have requests in process
Useful for debugging
"""
idle = True
for requestant in self.reqs.values():
if not requestant.ended:
idle = False
break
if idle:
for responder in self.reps.values():
if not responder.ended:
idle = False
break
return idle
def buildEnviron(self, requestant):
"""
Returns wisgi environment dictionary for supplied requestant
"""
environ = odict() # maybe should be modict for cookies or other repeated headers
# WSGI variables
environ['wsgi.version'] = (1, 0)
environ['wsgi.url_scheme'] = self.scheme
environ['wsgi.input'] = io.BytesIO(requestant.body)
environ['wsgi.errors'] = sys.stderr
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = False
environ['wsgi.run_once'] = False
environ["wsgi.server_name"] = self.servant.name
environ["wsgi.server_version"] = (1, 0)
# Required CGI variables
environ['REQUEST_METHOD'] = requestant.method # GET
environ['SERVER_NAME'] = self.servant.eha[0] # localhost
environ['SERVER_PORT'] = str(self.servant.eha[1]) # 8888
environ['SERVER_PROTOCOL'] = "HTTP/{0}.{1}".format(*requestant.version) # used by request http/1.1
environ['SCRIPT_NAME'] = u''
environ['PATH_INFO'] = requestant.path # /hello?name=john
# Optional CGI variables
environ['QUERY_STRING'] = requestant.query # name=john
environ['REMOTE_ADDR'] = requestant.incomer.ca
environ['CONTENT_TYPE'] = requestant.headers.get('content-type', '')
if requestant.length is not None:
environ['CONTENT_LENGTH'] = str(requestant.length)
# recieved http headers mapped to all caps with HTTP_ prepended
for key, value in requestant.headers.items():
key = "HTTP_" + key.replace("-", "_").upper()
environ[key] = value
return environ
def closeConnection(self, ca):
"""
Close and remove connection given by ca
"""
self.servant.removeIx(ca)
if ca in self.reqs:
self.reqs[ca].close() # this signals request parser
del self.reqs[ca]
if ca in self.reps:
self.reps[ca].close() # this signals response handler
del self.reps[ca]
def serviceConnects(self):
"""
Service new incoming connections
Create requestants
Timeout stale connections
"""
self.servant.serviceConnects()
for ca, ix in self.servant.ixes.items():
if ix.cutoff:
self.closeConnection(ca)
continue
if ca not in self.reqs: # point requestant.msg to incomer.rxbs
self.reqs[ca] = Requestant(msg=ix.rxbs, incomer=ix)
if ix.timeout > 0.0 and ix.timer.expired:
self.closeConnection(ca)
def serviceReqs(self):
"""
Service pending requestants
"""
for ca, requestant in self.reqs.items():
if requestant.parser:
try:
requestant.parse()
except httping.HTTPException as ex: # this may be superfluous
#requestant.errored = True
#requestant.error = str(ex)
#requestant.ended = True
sys.stderr.write(str(ex))
self.closeConnection(ca)
continue
if requestant.ended:
if requestant.errored: # parse may swallow error but set .errored and .error
sys.stderr.write(requestant.error)
self.closeConnection(ca)
continue
console.concise("Parsed Request:\n{0} {1} {2}\n"
"{3}\n{4}\n".format(requestant.method,
requestant.path,
requestant.version,
requestant.headers,
requestant.body))
# create or restart wsgi app responder here
environ = self.buildEnviron(requestant)
if ca not in self.reps:
chunkable = True if requestant.version >= (1, 1) else False
responder = Responder(incomer=requestant.incomer,
app=self.app,
environ=environ,
chunkable=chunkable)
self.reps[ca] = responder
else: # reuse
responder = self.reps[ca]
responder.reset(environ=environ)
def serviceReps(self):
"""
Service pending responders
"""
for ca, responder in self.reps.items():
if responder.closed:
self.closeConnection(ca)
continue
if not responder.ended:
responder.service()
if responder.ended:
requestant = self.reqs[ca]
if requestant.persisted:
if requestant.parser is None: # reuse
requestant.makeParser() # resets requestant parser
else: # not persistent so close and remove requestant and responder
ix = self.servant.ixes[ca]
if not ix.txes: # wait for outgoing txes to be empty
self.closeConnection(ca)
def serviceAll(self):
"""
Service request response
"""
self.serviceConnects()
self.servant.serviceReceivesAllIx()
self.serviceReqs()
self.serviceReps()
self.servant.serviceTxesAllIx()
class CustomResponder(object):
"""
Nonblocking HTTP Server Response class
HTTP/1.1 200 OK\r\n
Content-Length: 122\r\n
Content-Type: application/json\r\n
Date: Thu, 30 Apr 2015 19:37:17 GMT\r\n
Server: IoBook.local\r\n\r\n
"""
HttpVersionString = httping.HTTP_11_VERSION_STRING # http version string
def __init__(self,
steward=None,
status=200, # integer
headers=None,
body=b'',
data=None):
"""
Initialize Instance
steward = managing Steward instance
status = response status code
headers = http response headers
body = http response body
data = dict to jsonify as body if provided
"""
self.steward = steward
self.status = status
self.headers = lodict(headers) if headers else lodict()
if body and isinstance(body, unicode): # use default
# RFC 2616 Section 3.7.1 default charset of iso-8859-1.
body = body.encode('iso-8859-1')
self.body = body or b''
self.data = data
self.ended = False # True if response generated completed
self.msg = b"" # for debugging
self.lines = [] # for debugging
self.head = b"" # for debugging
def reinit(self,
status=None, # integer
headers=None,
body=None,
data=None):
"""
Reinitialize anything that is not None
This enables creating another response on a connection
"""
if status is not None:
self.status = status
if headers is not None:
self.headers = lodict(headers)
if body is not None: # body should be bytes
if isinstance(body, unicode):
# RFC 2616 Section 3.7.1 default charset of iso-8859-1.
body = body.encode('iso-8859-1')
self.body = body
else:
self.body = b''
if data is not None:
self.data = data
else:
self.data = None
def build(self,
status=None,
headers=None,
body=None,
data=None):
"""
Build and return response message
"""
self.reinit(status=status,
headers=headers,
body=body,
data=data)
self.lines = []
startLine = "{0} {1} {2}".format(self.HttpVersionString,
self.status,
httping.STATUS_DESCRIPTIONS[self.status])
try:
startLine = startLine.encode('ascii')
except UnicodeEncodeError:
startLine = startLine.encode('idna')
self.lines.append(startLine)
if u'server' not in self.headers: # create Server header
self.headers[u'server'] = "Ioflo Server"
if u'date' not in self.headers: # create Date header
self.headers[u'date'] = httping.httpDate1123(datetime.datetime.utcnow())
if self.data is not None:
body = ns2b(json.dumps(self.data, separators=(',', ':')))
self.headers[u'content-type'] = u'application/json; charset=utf-8'
else:
body = self.body
if body and (u'content-length' not in self.headers):
self.headers[u'content-length'] = str(len(body))
for name, value in self.headers.items():
self.lines.append(httping.packHeader(name, value))
self.lines.extend((b"", b""))
self.head = CRLF.join(self.lines) # b'/r/n'
self.msg = self.head + body
self.ended = True
return self.msg
class Steward(object):
"""
Manages the associated requestant and responder for an incoming connection
"""
def __init__(self,
incomer,
requestant=None,
responder=None,
dictable=False):
"""
incomer = Incomer instance for connection
requestant = Requestant instance for connection
responder = Responder instance for connection
dictable = True if should attempt to convert request body as json
"""
self.incomer = incomer
if requestant is None:
requestant = Requestant(msg=self.incomer.rxbs,
incomer=incomer,
dictable=dictable)
self.requestant = requestant
if responder is None:
responder = CustomResponder(steward=self)
self.responder = responder
self.waited = False # True if waiting for reponse to finish
self.msg = b"" # outgoing msg bytes
def refresh(self):
"""
Restart incomer timer
"""
incomer.timer.restart()
def respond(self):
"""
Respond to request Override in subclass
Echo request
"""
console.concise("Responding to Request:\n{0} {1} {2}\n"
"{3}\n{4}\n".format(self.requestant.method,
self.requestant.path,
self.requestant.version,
self.requestant.headers,
self.requestant.body))
data = odict()
data['version'] = "HTTP/{0}.{1}".format(*self.requestant.version)
data['method'] = self.requestant.method
pathSplits = urlsplit(unquote(self.requestant.url))
path = pathSplits.path
data['path'] = path
query = pathSplits.query
qargs = odict()
qargs, query = httping.updateQargsQuery(qargs, query)
data['qargs'] = qargs
fragment = pathSplits.fragment
data['fragment'] = fragment
data['headers'] = copy.copy(self.requestant.headers) # make copy
data['body'] = self.requestant.body.decode('utf-8')
data['data'] = copy.copy(self.requestant.data) # make copy
msg = self.responder.build(status=200, data=data)
self.incomer.tx(msg)
self.waited = not self.responder.ended
def pour(self):
"""
Run generator to stream response message
"""
# putnext generator here
if self.responder.ended:
self.waited = False
else:
self.refresh()
class Porter(object):
"""
Porter class nonblocking HTTP server
"""
Timeout = 5.0 # default server connection timeout
def __init__(self,
servant=None,
store=None,
stewards=None,
name='',
bufsize=8096,
wlog=None,
ha=None,
host=u'',
port=None,
eha=None,
scheme=u'',
dictable=False,
timeout=None,
**kwa):
"""
Initialization method for instance.
servant = instance of Server or ServerTls or None
store = Datastore for timers
stewards = odict of Steward instances
kwa needed to pass additional parameters to servant
if servantinstances are not provided (None)
some or all of these parameters will be used for initialization
name = user friendly name for servant
bufsize = buffer size
wlog = WireLog instance if any
ha = host address duple (host, port) for local servant listen socket
host = host address for local servant listen socket, '' means any interface on host
port = socket port for local servant listen socket
eha = external destination address for incoming connections used in TLS
scheme = http scheme u'http' or u'https' or empty
dictable = Boolean flag If True attempt to convert body from json for requestants
"""
self.store = store or storing.Store(stamp=0.0)
self.stewards = stewards if stewards is not None else odict()
self.dictable = True if dictable else False # for stewards
self.timeout = timeout if timeout is not None else self.Timeout
ha = ha or (host, port) # ha = host address takes precendence over host, port
if servant:
if isinstance(servant, ServerTls):
if scheme and scheme != u'https':
raise ValueError("Provided scheme '{0}' incompatible with servant".format(scheme))
secured = True
scheme = u'https'
defaultPort = 443
elif isinstance(servant, Server):
if scheme and scheme != u'http':
raise ValueError("Provided scheme '{0}' incompatible with servant".format(scheme))
secured = False
scheme = 'http'
defaultPort = 80
else:
raise ValueError("Invalid servant type {0}".format(type(servant)))
else:
scheme = u'https' if scheme == u'https' else u'http'
if scheme == u'https':
secured = True # use tls socket connection
defaultPort = 443
else:
secured = False # non tls socket connection
defaultPort = 80
host, port = ha
port = port or defaultPort # if port not specified
ha = (host, port)
if servant:
if servant.ha != ha:
ValueError("Provided ha '{0}:{1}' incompatible with servant".format(ha[0], ha[1]))
# at some point may want to support changing the ha of provided servant
else: # what about timeouts for servant connections
if secured:
servant = ServerTls(store=self.store,
name=name,
ha=ha,
eha=eha,
bufsize=bufsize,
wlog=wlog,
timeout=self.timeout,
**kwa)
else:
servant = Server(store=self.store,
name=name,
ha=ha,
eha=eha,
bufsize=bufsize,
wlog=wlog,
timeout=self.timeout,
**kwa)
self.secured = secured
self.servant = servant
def idle(self):
"""
Returns True if no connections have requests in process
Useful for debugging
"""
idle = True
for steward in self.stewards.values():
if not steward.requestant.ended:
idle = False
break
return idle
def closeConnection(self, ca):
"""
Close and remove connection and associated steward given by ca
"""
self.servant.removeIx(ca)
del self.stewards[ca]
def serviceConnects(self):
"""
Service new incoming connections
Create requestants
Timeout stale connections
"""
self.servant.serviceConnects()
for ca, ix in self.servant.ixes.items():
# check for and handle cutoff connections by client here
if ca not in self.stewards:
self.stewards[ca] = Steward(incomer=ix, dictable=self.dictable)
if ix.timeout > 0.0 and ix.timer.expired:
self.closeConnection(ca)
def serviceStewards(self):
"""
Service pending requestants and responders
"""
for ca, steward in self.stewards.items():
if not steward.waited:
steward.requestant.parse()
if steward.requestant.ended:
steward.requestant.dictify()
console.concise("Parsed Request:\n{0} {1} {2}\n"
"{3}\n{4}\n".format(steward.requestant.method,
steward.requestant.path,
steward.requestant.version,
steward.requestant.headers,
steward.requestant.body))
steward.respond()
if steward.waited:
steward.pour()
if not steward.waited and steward.requestant.ended:
if steward.requestant.persisted:
steward.requestant.makeParser() #set up for next time
else: # remove and close connection
self.closeConnection(ca)
def serviceAll(self):
"""
Service request response
"""
self.serviceConnects()
self.servant.serviceReceivesAllIx()
self.serviceStewards()
self.servant.serviceTxesAllIx()
```
#### File: aio/tcp/serving.py
```python
from __future__ import absolute_import, division, print_function
import sys
import os
import socket
import errno
import platform
from collections import deque
from binascii import hexlify
try:
import ssl
except ImportError:
pass
# Import ioflo libs
from ...aid.sixing import *
from ...aid.odicting import odict
from ...aid.timing import StoreTimer
from ...aid.consoling import getConsole
from .. import aioing
from ...base import storing
console = getConsole()
def initServerContext(context=None,
version=None,
certify=None,
keypath=None,
certpath=None,
cafilepath=None
):
"""
Initialize and return context for TLS Server
IF context is None THEN create a context
IF version is None THEN create context using ssl library default
ELSE create context with version
If certify is not None then use certify value provided Otherwise use default
context = context object for tls/ssl If None use default
version = ssl version If None use default
certify = cert requirement If None use default
ssl.CERT_NONE = 0
ssl.CERT_OPTIONAL = 1
ssl.CERT_REQUIRED = 2
keypath = pathname of local server side PKI private key file path
If given apply to context
certpath = pathname of local server side PKI public cert file path
If given apply to context
cafilepath = Cert Authority file path to use to verify client cert
If given apply to context
"""
if context is None: # create context
if not version: # use default context
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
context.verify_mode = certify if certify is not None else ssl.CERT_REQUIRED
else: # create context with specified protocol version
context = ssl.SSLContext(version)
# disable bad protocols versions
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
context.options |= getattr(ssl._ssl, "OP_NO_COMPRESSION", 0)
# Prefer the server's ciphers by default fro stronger encryption
context.options |= getattr(ssl._ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
# Use single use keys in order to improve forward secrecy
context.options |= getattr(ssl._ssl, "OP_SINGLE_DH_USE", 0)
context.options |= getattr(ssl._ssl, "OP_SINGLE_ECDH_USE", 0)
# disallow ciphers with known vulnerabilities
context.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
context.verify_mode = certify if certify is not None else ssl.CERT_REQUIRED
if cafilepath:
context.load_verify_locations(cafile=cafilepath,
capath=None,
cadata=None)
elif context.verify_mode != ssl.CERT_NONE:
context.load_default_certs(purpose=ssl.Purpose.CLIENT_AUTH)
if keypath or certpath:
context.load_cert_chain(certfile=certpath, keyfile=keypath)
return context
class Incomer(object):
"""
Manager class for incoming nonblocking TCP connections.
"""
Timeout = 0.0 # timeout in seconds
def __init__(self,
name=u'',
uid=0,
ha=None,
bs=None,
ca=None,
cs=None,
wlog=None,
store=None,
timeout=None,
refreshable=True):
"""
Initialization method for instance.
name = user friendly name for connection
uid = unique identifier for connection
ha = host address duple (host, port) near side of connection
ca = virtual host address duple (host, port) far side of connection
cs = connection socket object
wlog = WireLog object if any
store = data store reference
timeout = timeout for .timer
refreshable = True if tx/rx activity refreshes timer False otherwise
"""
self.name = name
self.uid = uid
self.ha = ha
self.bs = bs
self.ca = ca
self.cs = cs
self.wlog = wlog
self.cutoff = False # True when detect connection closed on far side
self.txes = deque() # deque of data to send
self.rxbs = bytearray() # bytearray of data received
if self.cs:
self.cs.setblocking(0) # linux does not preserve blocking from accept
self.store = store or storing.Store(stamp=0.0)
self.timeout = timeout if timeout is not None else self.Timeout
self.timer = StoreTimer(self.store, duration=self.timeout)
self.refreshable = refreshable
def shutdown(self, how=socket.SHUT_RDWR):
"""
Shutdown connected socket .cs
"""
if self.cs:
try:
self.cs.shutdown(how) # shutdown socket
except socket.error as ex:
pass
def shutdownSend(self):
"""
Shutdown send on connected socket .cs
"""
if self.cs:
try:
self.shutdown(how=socket.SHUT_WR) # shutdown socket
except socket.error as ex:
pass
def shutdownReceive(self):
"""
Shutdown receive on connected socket .cs
"""
if self.cs:
try:
self.shutdown(how=socket.SHUT_RD) # shutdown socket
except socket.error as ex:
pass
def shutclose(self):
"""
Shutdown and close connected socket .cs
"""
if self.cs:
self.shutdown()
self.cs.close() #close socket
self.cs = None
close = shutclose # alias
def refresh(self):
"""
Restart timer
"""
self.timer.restart()
def receive(self):
"""
Perform non blocking receive on connected socket .cs
If no data then returns None
If connection closed then returns ''
Otherwise returns data
data is string in python2 and bytes in python3
"""
try:
data = self.cs.recv(self.bs)
except socket.error as ex:
# ex.args[0] is always ex.errno for better compat
if ex.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
return None
elif ex.args[0] in (errno.ECONNRESET,
errno.ENETRESET,
errno.ENETUNREACH,
errno.EHOSTUNREACH,
errno.ENETDOWN,
errno.EHOSTDOWN,
errno.ETIMEDOUT,
errno.ECONNREFUSED):
emsg = ("socket.error = {0}: Incomer at {1} while receiving "
"from {2}\n".format(ex, self.ca, self.ha))
console.profuse(emsg)
self.cutoff = True # this signals need to close/reopen connection
return bytes() # data empty
else:
emsg = ("socket.error = {0}: Incomer at {1} while receiving"
" from {2}\n".format(ex, self.ha, self.ca))
console.profuse(emsg)
raise # re-raise
if data: # connection open
if console._verbosity >= console.Wordage.profuse: # faster to check
try:
load = data.decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data).decode("ASCII"))
cmsg = ("Incomer at {0}, received from {1}:\n------------\n"
"{2}\n\n".format(self.ha, self.ca, load))
console.profuse(cmsg)
if self.wlog: # log over the wire rx
self.wlog.writeRx(self.ca, data)
if self.refreshable:
self.refresh()
else: # data empty so connection closed on other end
self.cutoff = True
return data
def serviceReceives(self):
"""
Service receives until no more
"""
while not self.cutoff:
data = self.receive()
if not data:
break
self.rxbs.extend(data)
def serviceReceiveOnce(self):
'''
Retrieve from server only one reception
'''
if not self.cutoff:
data = self.receive()
if data:
self.rxbs.extend(data)
def clearRxbs(self):
"""
Clear .rxbs
"""
del self.rxbs[:]
def catRxbs(self):
"""
Return copy and clear .rxbs
"""
rx = self.rxbs[:]
self.clearRxbs()
return rx
def tailRxbs(self, index):
"""
Returns duple of (bytes(self.rxbs[index:]), len(self.rxbs))
slices the tail from index to end and converts to bytes
also the length of .rxbs to be used to update index
"""
return (bytes(self.rxbs[index:]), len(self.rxbs))
def send(self, data):
"""
Perform non blocking send on connected socket .cs.
Return number of bytes sent
data is string in python2 and bytes in python3
"""
try:
result = self.cs.send(data) #result is number of bytes sent
except socket.error as ex:
# ex.args[0] is always ex.errno for better compat
if ex.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
result = 0 # blocked try again
elif ex.args[0] in (errno.ECONNRESET,
errno.ENETRESET,
errno.ENETUNREACH,
errno.EHOSTUNREACH,
errno.ENETDOWN,
errno.EHOSTDOWN,
errno.ETIMEDOUT,
errno.ECONNREFUSED):
emsg = ("socket.error = {0}: Outgoer at {1} while sending "
"to {2} \n".format(ex, self.ca, self.ha))
console.profuse(emsg)
self.cutoff = True # this signals need to close/reopen connection
result = 0
else:
emsg = ("socket.error = {0}: Incomer at {1} while "
"sending to {2}\n".format(ex, self.ha, self.ca))
console.profuse(emsg)
raise
if result:
if console._verbosity >= console.Wordage.profuse:
try:
load = data[:result].decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data[:result]).decode("ASCII"))
cmsg = ("Incomer at {0}, sent {1} bytes to {2}:\n------------\n"
"{3}\n\n".format(self.ha, result, self.ca, load))
console.profuse(cmsg)
if self.wlog:
self.wlog.writeTx(self.ca, data[:result])
if self.refreshable:
self.refresh()
return result
def tx(self, data):
'''
Queue data onto .txes
'''
self.txes.append(data)
def serviceTxes(self):
"""
Service transmits
For each tx if all bytes sent then keep sending until partial send
or no more to send
If partial send reattach and return
"""
while self.txes and not self.cutoff:
data = self.txes.popleft()
count = self.send(data)
if count < len(data): # put back unsent portion
self.txes.appendleft(data[count:])
break # try again later
class IncomerTls(Incomer):
"""
Incomer with Nonblocking TLS/SSL support
Manager class for incoming nonblocking TCP connections.
"""
def __init__(self,
context=None,
version=None,
certify=None,
keypath=None,
certpath=None,
cafilepath=None,
**kwa):
"""
Initialization method for instance.
context = context object for tls/ssl If None use default
version = ssl version If None use default
certify = cert requirement If None use default
ssl.CERT_NONE = 0
ssl.CERT_OPTIONAL = 1
ssl.CERT_REQUIRED = 2
keypath = pathname of local server side PKI private key file path
If given apply to context
certpath = pathname of local server side PKI public cert file path
If given apply to context
cafilepath = Cert Authority file path to use to verify client cert
If given apply to context
"""
super(IncomerTls, self).__init__(**kwa)
self.connected = False # True once ssl handshake completed
self.context = initServerContext(context=context,
version=version,
certify=certify,
keypath=keypath,
certpath=certpath,
cafilepath=cafilepath
)
self.wrap()
def shutclose(self):
"""
Shutdown and close connected socket .cs
"""
if self.cs:
self.shutdown()
self.cs.close() #close socket
self.cs = None
self.connected = False
close = shutclose # alias
def wrap(self):
"""
Wrap socket .cs in ssl context
"""
self.cs = self.context.wrap_socket(self.cs,
server_side=True,
do_handshake_on_connect=False)
def handshake(self):
"""
Attempt nonblocking ssl handshake to .ha
Returns True if successful
Returns False if not so try again later
"""
try:
self.cs.do_handshake()
except ssl.SSLError as ex:
if ex.errno in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
return False
elif ex.errno in (ssl.SSL_ERROR_EOF, ):
self.shutclose()
raise # should give up here nicely
else:
self.shutclose()
raise
except OSError as ex:
self.shutclose()
if ex.errno in (errno.ECONNABORTED, ):
raise # should give up here nicely
raise
except Exception as ex:
self.shutclose()
raise
self.connected = True
return True
def serviceHandshake(self):
"""
Service connection and handshake attempt
If not already accepted and handshaked Then
make nonblocking attempt
Returns .handshaked
"""
if not self.connected:
self.handshake()
return self.connected
def receive(self):
"""
Perform non blocking receive on connected socket .cs
If no data then returns None
If connection closed then returns ''
Otherwise returns data
data is string in python2 and bytes in python3
"""
try:
data = self.cs.recv(self.bs)
except socket.error as ex: # ssl.SSLError is a subtype of socket.error
# ex.args[0] is always ex.errno for better compat
if ex.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
return None # blocked waiting for data
elif ex.args[0] in (errno.ECONNRESET,
errno.ENETRESET,
errno.ENETUNREACH,
errno.EHOSTUNREACH,
errno.ENETDOWN,
errno.EHOSTDOWN,
errno.ETIMEDOUT,
errno.ECONNREFUSED,
ssl.SSLEOFError):
emsg = ("socket.error = {0}: IncomerTLS at {1} while receiving"
" from {2}\n".format(ex, self.ha, self.ca))
console.profuse(emsg)
self.cutoff = True # this signals need to close/reopen connection
return bytes() # data empty
else:
emsg = ("socket.error = {0}: IncomerTLS at {1} while receiving"
" from {2}\n".format(ex, self.ha, self.ca))
console.profuse(emsg)
raise # re-raise
if data: # connection open
if console._verbosity >= console.Wordage.profuse: # faster to check
try:
load = data.decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data).decode("ASCII"))
cmsg = ("Incomer at {0}, received from {1}:\n------------\n"
"{2}\n\n".format(self.ha, self.ca, load))
console.profuse(cmsg)
if self.wlog: # log over the wire rx
self.wlog.writeRx(self.ca, data)
else: # data empty so connection closed on other end
self.cutoff = True
return data
def send(self, data):
"""
Perform non blocking send on connected socket .cs.
Return number of bytes sent
data is string in python2 and bytes in python3
"""
try:
result = self.cs.send(data) #result is number of bytes sent
except socket.error as ex: # ssl.SSLError is a subtype of socket.error
# ex.args[0] is always ex.errno for better compat
if ex.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
result = 0 # blocked try again
elif ex.args[0] in (errno.ECONNRESET,
errno.ENETRESET,
errno.ENETUNREACH,
errno.EHOSTUNREACH,
errno.ENETDOWN,
errno.EHOSTDOWN,
errno.ETIMEDOUT,
errno.ECONNREFUSED,
ssl.SSLEOFError):
emsg = ("socket.error = {0}: IncomerTLS at {1} while "
"sending to {2}\n".format(ex, self.ha, self.ca))
console.profuse(emsg)
self.cutoff = True # this signals need to close/reopen connection
result = 0
else:
emsg = ("socket.error = {0}: IncomerTLS at {1} while "
"sending to {2}\n".format(ex, self.ha, self.ca))
console.profuse(emsg)
raise
if result:
if console._verbosity >= console.Wordage.profuse:
try:
load = data[:result].decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data[:result]).decode("ASCII"))
cmsg = ("Incomer at {0}, sent {1} bytes to {2}:\n------------\n"
"{3}\n\n".format(self.ha, result, self.ca, load))
console.profuse(cmsg)
if self.wlog:
self.wlog.writeTx(self.ca, data[:result])
return result
class Acceptor(object):
"""
Nonblocking TCP Socket Acceptor Class.
Listen socket for incoming TCP connections
"""
def __init__(self,
name=u'',
ha=None,
host=u'',
port=56000,
eha=None,
bufsize=8096,
wlog=None):
"""
Initialization method for instance.
name = user friendly name string for Acceptor
ha = host address duple (host, port) for listen socket
host = host address for listen socket, '' means any interface on host
port = socket port for listen socket
eha = external destination address for incoming connections used in tls
bufsize = buffer size
wlog = WireLog object if any
"""
self.name = name
self.ha = ha or (host, port) # ha = host address
eha = eha or self.ha
if eha:
host, port = eha
host = aioing.normalizeHost(host)
if host in ('0.0.0.0',):
host = '127.0.0.1'
elif host in ("::", "0:0:0:0:0:0:0:0"):
host = "::1"
eha = (host, port)
self.eha = eha
self.bs = bufsize
self.wlog = wlog
self.ss = None # listen socket for accepts
self.axes = deque() # deque of duple (ca, cs) accepted connections
self.opened = False
def actualBufSizes(self):
"""
Returns duple of the the actual socket send and receive buffer size
(send, receive)
"""
if not self.ss:
return (0, 0)
return (self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF),
self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
def open(self):
"""
Opens binds listen socket in non blocking mode.
if socket not closed properly, binding socket gets error
socket.error: (48, 'Address already in use')
"""
#create server socket ss to listen on
self.ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# make socket address reusable.
# the SO_REUSEADDR flag tells the kernel to reuse a local socket in
# TIME_WAIT state, without waiting for its natural timeout to expire.
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Linux TCP allocates twice the requested size
if sys.platform.startswith('linux'):
bs = 2 * self.bs # get size is twice the set size
else:
bs = self.bs
if self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) < bs:
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.bs)
if self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) < bs:
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.bs)
self.ss.setblocking(0) #non blocking socket
try: # bind to listen socket (host, port) to receive connections
self.ss.bind(self.ha)
self.ss.listen(5)
except socket.error as ex:
console.terse("socket.error = {0}\n".format(ex))
return False
self.ha = self.ss.getsockname() # get resolved ha after bind
self.opened = True
return True
def reopen(self):
"""
Idempotently opens listen socket
"""
self.close()
return self.open()
def close(self):
"""
Closes listen socket.
"""
if self.ss:
try:
self.ss.shutdown(socket.SHUT_RDWR) # shutdown socket
except socket.error as ex:
#console.terse("socket.error = {0}\n".format(ex))
pass
self.ss.close() #close socket
self.ss = None
self.opened = False
def accept(self):
"""
Accept new connection nonblocking
Returns duple (cs, ca) of connected socket and connected host address
Otherwise if no new connection returns (None, None)
"""
# accept new virtual connected socket created from server socket
try:
cs, ca = self.ss.accept() # virtual connection (socket, host address)
except socket.error as ex:
if ex.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
return (None, None) # nothing yet
emsg = ("socket.error = {0}: server at {1} while "
"accepting \n".format(ex, self.ha))
console.profuse(emsg)
raise # re-raise
return (cs, ca)
def serviceAccepts(self):
"""
Service any accept requests
Adds to .cxes odict key by ca
"""
while True:
cs, ca = self.accept()
if not cs:
break
self.axes.append((cs, ca))
class Server(Acceptor):
"""
Nonblocking TCP Socket Server Class.
Listen socket for incoming TCP connections
Incomer sockets for accepted connections
"""
Timeout = 1.0 # timeout in seconds
def __init__(self,
store=None,
timeout=None,
**kwa):
"""
Initialization method for instance.
store = data store reference if any
timeout = default timeout for incoming connections
"""
super(Server, self).__init__(**kwa)
self.store = store or storing.Store(stamp=0.0)
self.timeout = timeout if timeout is not None else self.Timeout
self.ixes = odict() # ready to rx tx incoming connections, Incomer instances
def serviceAxes(self):
"""
Service axes
For each newly accepted connection in .axes create Incomer
and add to .ixes keyed by ca
"""
self.serviceAccepts() # populate .axes
while self.axes:
cs, ca = self.axes.popleft()
if ca != cs.getpeername(): #or self.eha != cs.getsockname():
raise ValueError("Accepted socket host addresses malformed for "
"peer. eha {0} != {1}, ca {2} != {3}\n".format(
self.eha, cs.getsockname(), ca, cs.getpeername()))
incomer = Incomer(ha=cs.getsockname(),
bs=self.bs,
ca=cs.getpeername(),
cs=cs,
wlog=self.wlog,
store=self.store,
timeout=self.timeout)
if ca in self.ixes and self.ixes[ca] is not incomer:
self.shutdownIx[ca]
self.ixes[ca] = incomer
def serviceConnects(self):
"""
Service connects is method name to be used
"""
self.serviceAxes()
def shutdownIx(self, ca, how=socket.SHUT_RDWR):
"""
Shutdown incomer given by connection address ca
"""
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
self.ixes[ca].shutdown(how=how)
def shutdownSendIx(self, ca):
"""
Shutdown send on incomer given by connection address ca
"""
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
self.ixes[ca].shutdownSend()
def shutdownReceiveIx(self, ca):
"""
Shutdown send on incomer given by connection address ca
"""
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
self.ixes[ca].shutdownReceive()
def closeIx(self, ca):
"""
Shutdown and close incomer given by connection address ca
"""
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
self.ixes[ca].close()
def closeAllIx(self):
"""
Shutdown and close all incomer connections
"""
for ix in self.ixes.values():
ix.close()
def closeAll(self):
"""
Close all sockets
"""
self.close()
self.closeAllIx()
def removeIx(self, ca, shutclose=True):
"""
Remove incomer given by connection address ca
"""
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
if shutclose:
self.ixes[ca].shutclose()
del self.ixes[ca]
def catRxbsIx(self, ca):
"""
Return copy and clear rxbs for incomer given by connection address ca
"""
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
return (self.ixes[ca].catRxbs())
def serviceReceivesIx(self, ca):
"""
Service receives for incomer by connection address ca
"""
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
self.ixes[ca].serviceReceives()
def serviceReceivesAllIx(self):
"""
Service receives for all incomers in .ixes
"""
for ix in self.ixes.values():
ix.serviceReceives()
def transmitIx(self, data, ca):
'''
Queue data onto .txes for incomer given by connection address ca
'''
if ca not in self.ixes:
emsg = "Invalid connection address '{0}'".format(ca)
raise ValueError(emsg)
self.ixes[ca].tx(data)
def serviceTxesAllIx(self):
"""
Service transmits for all incomers in .ixes
"""
for ix in self.ixes.values():
ix.serviceTxes()
def serviceAll(self):
"""
Service connects and service receives and txes for all ix.
"""
self.serviceConnects()
self.serviceReceivesAllIx()
self.serviceTxesAllIx()
class ServerTls(Server):
"""
Server with Nonblocking TLS/SSL support
Nonblocking TCP Socket Server Class.
Listen socket for incoming TCP connections
IncomerTLS sockets for accepted connections
"""
def __init__(self,
context=None,
version=None,
certify=None,
keypath=None,
certpath=None,
cafilepath=None,
**kwa):
"""
Initialization method for instance.
"""
super(ServerTls, self).__init__(**kwa)
self.cxes = odict() # accepted incoming connections, IncomerTLS instances
self.context = context
self.version = version
self.certify = certify
self.keypath = keypath
self.certpath = certpath
self.cafilepath = cafilepath
self.context = initServerContext(context=context,
version=version,
certify=certify,
keypath=keypath,
certpath=certpath,
cafilepath=cafilepath
)
def serviceAxes(self):
"""
Service accepteds
For each new accepted connection create IncomerTLS and add to .cxes
Not Handshaked
"""
self.serviceAccepts() # populate .axes
while self.axes:
cs, ca = self.axes.popleft()
if ca != cs.getpeername() or self.eha != cs.getsockname():
raise ValueError("Accepted socket host addresses malformed for "
"peer ha {0} != {1}, ca {2} != {3}\n".format(
self.ha, cs.getsockname(), ca, cs.getpeername()))
incomer = IncomerTls(ha=cs.getsockname(),
bs=self.bs,
ca=cs.getpeername(),
cs=cs,
wlog=self.wlog,
store=self.store,
timeout=self.timeout,
context=self.context,
version=self.version,
certify=self.certify,
keypath=self.keypath,
certpath=self.certpath,
cafilepath=self.cafilepath,
)
self.cxes[ca] = incomer
def serviceCxes(self):
"""
Service handshakes for every incomer in .cxes
If successful move to .ixes
"""
for ca, cx in self.cxes.items():
if cx.serviceHandshake():
self.ixes[ca] = cx
del self.cxes[ca]
def serviceConnects(self):
"""
Service accept and handshake attempts
If not already accepted and handshaked Then
make nonblocking attempt
For each successful handshaked add to .ixes
Returns handshakeds
"""
self.serviceAxes()
self.serviceCxes()
class Peer(Server):
"""
Nonblocking TCP Socket Peer Class.
Supports both incoming and outgoing connections.
"""
def __init__(self, **kwa):
"""
Initialization method for instance.
"""
super(Peer, self).init(**kwa)
self.oxes = odict() # outgoers indexed by ha
```
#### File: aio/test/_test_httping_w_ext_server.py
```python
import sys
if sys.version > '3':
xrange = range
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import os
import time
import tempfile
import shutil
import socket
import errno
if sys.version > '3':
from http.client import HTTPConnection
else:
from httplib import HTTPConnection
if sys.version > '3':
from urllib.parse import urlsplit
else:
from urlparse import urlsplit
try:
import simplejson as json
except ImportError:
import json
# Import ioflo libs
from ioflo.aid.sixing import *
from ioflo.aid.odicting import odict
#from ioflo.test import testing
from ioflo.aio import nonblocking
from ioflo.aid import httping
from ioflo.aid.timing import Timer
from ioflo.aid.consoling import getConsole
console = getConsole()
from ioflo.aid import httping
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class BasicTestCase(unittest.TestCase):
"""
Test Case
"""
def setUp(self):
"""
"""
pass
def tearDown(self):
"""
"""
pass
def testBasic(self):
"""
Test Basic
"""
console.terse("{0}\n".format(self.testBasic.__doc__))
console.terse("{0}\n".format("Connecting ...\n"))
hc = HTTPConnection('127.0.0.1', port=8080, timeout=1.0,)
hc.connect()
console.terse("{0}\n".format("Get '/echo?name=fame' ...\n"))
headers = odict([('Accept', 'application/json')])
hc.request(method='GET', path='/echo?name=fame', body=None, headers=headers )
response = hc.getresponse()
console.terse(str(response.fileno()) + "\n") # must call this before read
console.terse(str(response.getheaders()) + "\n")
console.terse(str(response.msg) + "\n")
console.terse(str(response.version) + "\n")
console.terse(str(response.status) + "\n")
console.terse(response.reason + "\n")
console.terse(str(response.read()) + "\n")
console.terse("{0}\n".format("Post ...\n"))
headers = odict([('Accept', 'application/json'), ('Content-Type', 'application/json')])
body = odict([('name', 'Peter'), ('occupation', 'Engineer')])
body = ns2b(json.dumps(body, separators=(',', ':')))
hc.request(method='POST', path='/demo', body=body, headers=headers )
response = hc.getresponse()
console.terse(str(response.fileno()) + "\n") # must call this before read
console.terse(str(response.getheaders()) + "\n")
console.terse(str(response.msg) + "\n")
console.terse(str(response.version) + "\n")
console.terse(str(response.status) + "\n")
console.terse(response.reason+ "\n")
console.terse(str(response.read()) + "\n")
#console.terse("{0}\n".format("SSE stream ...\n"))
#body = b''
#headers = odict([('Accept', 'application/json'), ('Content-Type', 'application/json')])
#hc.request(method='GET', path='/stream', body=body, headers=headers )
#response = hc.getresponse()
#console.terse(str(response.fileno()) + "\n") # must call this before read
#console.terse(str(response.getheaders()) + "\n")
#console.terse(str(response.msg) + "\n")
#console.terse(str(response.version) + "\n")
#console.terse(str(response.status) + "\n")
#console.terse(response.reason+ "\n")
#console.terse(str(response.read()) + "\n")
hc.close()
def testNonBlockingRequestEcho(self):
"""
Test NonBlocking Http client
"""
console.terse("{0}\n".format(self.testNonBlockingRequestEcho.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
wireLogBeta = nonblocking.WireLog(buffify=True)
result = wireLogBeta.reopen()
eha = ('127.0.0.1', 8080)
beta = nonblocking.Outgoer(ha=eha, bufsize=131072)
self.assertIs(beta.reopen(), True)
self.assertIs(beta.accepted, False)
self.assertIs(beta.cutoff, False)
console.terse("Connecting beta to server ...\n")
while True:
beta.serviceConnect()
#alpha.serviceAccepts()
if beta.accepted: # and beta.ca in alpha.ixes
break
time.sleep(0.05)
self.assertIs(beta.accepted, True)
self.assertIs(beta.cutoff, False)
self.assertEqual(beta.ca, beta.cs.getsockname())
self.assertEqual(beta.ha, beta.cs.getpeername())
self.assertEqual(eha, beta.ha)
console.terse("{0}\n".format("Building Request ...\n"))
host = u'127.0.0.1'
port = 8080
method = u'GET'
path = u'/echo?name=fame'
console.terse("{0} from {1}:{2}{3} ...\n".format(method, host, port, path))
headers = odict([('Accept', 'application/json')])
request = httping.Requester(hostname=host,
port=port,
method=method,
path=path,
headers=headers)
msgOut = request.rebuild()
lines = [
b'GET /echo?name=fame HTTP/1.1',
b'Host: 127.0.0.1:8080',
b'Accept-Encoding: identity',
b'Content-Length: 0',
b'Accept: application/json',
b'',
b'',
]
for i, line in enumerate(lines):
self.assertEqual(line, request.lines[i])
self.assertEqual(request.head, b'GET /echo?name=fame HTTP/1.1\r\nHost: 127.0.0.1:8080\r\nAccept-Encoding: identity\r\nContent-Length: 0\r\nAccept: application/json\r\n\r\n')
self.assertEqual(msgOut, b'GET /echo?name=fame HTTP/1.1\r\nHost: 127.0.0.1:8080\r\nAccept-Encoding: identity\r\nContent-Length: 0\r\nAccept: application/json\r\n\r\n')
beta.tx(msgOut)
while beta.txes or not beta.rxbs:
beta.serviceTxes()
beta.serviceReceives()
time.sleep(0.05)
beta.serviceReceives()
msgIn, index = beta.tailRxbs(0)
self.assertTrue(msgIn.endswith(b'{"content": null, "query": {"name": "fame"}, "verb": "GET", "path": "http://127.0.0.1:8080/echo?name=fame", "action": null}'))
#response = httping.HttpResponseNb(msgIn, method=method, path=path)
response = httping.Respondent(msg=beta.rxbs, method=method, path=path)
while response.parser:
response.parse()
response.dictify()
self.assertEqual(bytes(response.body), b'{"content": null, "query": {"name": "fame"}, "verb": "GET", "url": "http://127.0.0.1:8080/echo?name=fame", "action": null}')
self.assertEqual(len(beta.rxbs), 0)
#alpha.close()
beta.close()
wireLogBeta.close()
console.reinit(verbosity=console.Wordage.concise)
def testNonBlockingRequestStream(self):
"""
Test NonBlocking Http client
"""
console.terse("{0}\n".format(self.testNonBlockingRequestStream.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
wireLogBeta = nonblocking.WireLog(buffify=True)
result = wireLogBeta.reopen()
eha = ('127.0.0.1', 8080)
beta = nonblocking.Outgoer(ha=eha, bufsize=131072, wlog=wireLogBeta)
self.assertIs(beta.reopen(), True)
self.assertIs(beta.accepted, False)
self.assertIs(beta.cutoff, False)
console.terse("Connecting beta to server ...\n")
while True:
beta.serviceConnect()
#alpha.serviceAccepts()
if beta.accepted: # and beta.ca in alpha.ixes
break
time.sleep(0.05)
self.assertIs(beta.accepted, True)
self.assertIs(beta.cutoff, False)
self.assertEqual(beta.ca, beta.cs.getsockname())
self.assertEqual(beta.ha, beta.cs.getpeername())
self.assertEqual(eha, beta.ha)
console.terse("{0}\n".format("Building Request ...\n"))
host = u'127.0.0.1'
port = 8080
method = u'GET'
path = u'/stream'
console.terse("{0} from {1}:{2}{3} ...\n".format(method, host, port, path))
headers = odict([('Accept', 'application/json')])
request = httping.Requester(hostname=host,
port=port,
method=method,
path=path,
headers=headers)
msgOut = request.rebuild()
lines = [
b'GET /stream HTTP/1.1',
b'Host: 127.0.0.1:8080',
b'Accept-Encoding: identity',
b'Content-Length: 0',
b'Accept: application/json',
b'',
b'',
]
for i, line in enumerate(lines):
self.assertEqual(line, request.lines[i])
self.assertEqual(request.head, b'GET /stream HTTP/1.1\r\nHost: 127.0.0.1:8080\r\nAccept-Encoding: identity\r\nContent-Length: 0\r\nAccept: application/json\r\n\r\n')
self.assertEqual(msgOut, request.head)
beta.tx(msgOut)
while beta.txes or not beta.rxbs:
beta.serviceTxes()
beta.serviceReceives()
time.sleep(0.05)
beta.serviceReceives()
msgIn, index = beta.tailRxbs(0)
#self.assertTrue(msgIn.endswith(b'{"content": null, "query": {"name": "fame"}, "verb": "GET", "url": "http://127.0.0.1:8080/echo?name=fame", "action": null}'))
#response = httping.HttpResponseNb(msgIn, method=method, path=path)
response = httping.Respondent(msg=beta.rxbs, method=method, wlog=wireLogBeta)
timer = Timer(duration=3.0)
while response.parser and not timer.expired:
response.parse()
beta.serviceReceives()
time.sleep(0.01)
if response.parser:
response.parser.close()
response.parser = None
response.dictify()
#self.assertTrue(response.body.startswith(b'retry: 1000\n\ndata: START\n\ndata: 1\n\ndata: 2\n\ndata: 3\n\n'))
self.assertEqual(response.eventSource.retry, 1000)
self.assertTrue(len(response.events) > 2)
event = response.events.popleft()
self.assertEqual(event, {'id': None, 'name': '', 'data': 'START', 'json': None})
event = response.events.popleft()
self.assertEqual(event, {'id': None, 'name': '', 'data': '1', 'json': None})
event = response.events.popleft()
self.assertEqual(event, {'id': None, 'name': '', 'data': '2', 'json': None})
self.assertTrue(len(response.body) == 0)
self.assertTrue(len(response.eventSource.raw) == 0)
#self.assertEqual(len(beta.rxbs), 0)
#alpha.close()
beta.close()
wireLogBeta.close()
console.reinit(verbosity=console.Wordage.concise)
def testNonBlockingRequestStreamFancy(self):
"""
Test NonBlocking Http client
"""
console.terse("{0}\n".format(self.testNonBlockingRequestStreamFancy.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
wireLogBeta = nonblocking.WireLog(buffify=True)
result = wireLogBeta.reopen()
#alpha = nonblocking.Server(port = 6101, bufsize=131072, wlog=wireLog)
#self.assertIs(alpha.reopen(), True)
#self.assertEqual(alpha.ha, ('0.0.0.0', 6101))
eha = ('127.0.0.1', 8080)
beta = nonblocking.Outgoer(ha=eha, bufsize=131072, wlog=wireLogBeta)
self.assertIs(beta.reopen(), True)
self.assertIs(beta.accepted, False)
self.assertIs(beta.cutoff, False)
console.terse("Connecting beta to server ...\n")
while True:
beta.serviceConnect()
#alpha.serviceAccepts()
if beta.accepted: # and beta.ca in alpha.ixes
break
time.sleep(0.05)
self.assertIs(beta.accepted, True)
self.assertIs(beta.cutoff, False)
self.assertEqual(beta.ca, beta.cs.getsockname())
self.assertEqual(beta.ha, beta.cs.getpeername())
self.assertEqual(eha, beta.ha)
console.terse("{0}\n".format("Building Request ...\n"))
host = u'127.0.0.1'
port = 8080
method = u'GET'
path = u'/fancy?idify=true;multiply=true'
console.terse("{0} from {1}:{2}{3} ...\n".format(method, host, port, path))
headers = odict([('Accept', 'application/json')])
request = httping.Requester(hostname=host,
port=port,
method=method,
path=path,
headers=headers)
msgOut = request.rebuild()
lines = [
b'GET /fancy?idify=true;multiply=true HTTP/1.1',
b'Host: 127.0.0.1:8080',
b'Accept-Encoding: identity',
b'Content-Length: 0',
b'Accept: application/json',
b'',
b'',
]
for i, line in enumerate(lines):
self.assertEqual(line, request.lines[i])
self.assertEqual(request.head, b'GET /fancy?idify=true;multiply=true HTTP/1.1\r\nHost: 127.0.0.1:8080\r\nAccept-Encoding: identity\r\nContent-Length: 0\r\nAccept: application/json\r\n\r\n')
self.assertEqual(msgOut, request.head)
beta.tx(msgOut)
while beta.txes or not beta.rxbs:
beta.serviceTxes()
beta.serviceReceives()
time.sleep(0.05)
beta.serviceReceives()
msgIn, index = beta.tailRxbs(0)
#self.assertTrue(msgIn.endswith(b'{"content": null, "query": {"name": "fame"}, "verb": "GET", "url": "http://127.0.0.1:8080/echo?name=fame", "action": null}'))
#response = httping.HttpResponseNb(msgIn, method=method, path=path)
response = httping.Respondent(msg=beta.rxbs, method=method, path=path)
timer = Timer(duration=3.0)
while response.parser and not timer.expired:
response.parse()
beta.serviceReceives()
time.sleep(0.01)
if response.parser:
response.parser.close()
response.parser = None
response.dictify()
#self.assertTrue(response.body.startswith(b'retry: 1000\n\ndata: START\n\ndata: 1\n\ndata: 2\n\ndata: 3\n\n'))
self.assertEqual(response.eventSource.retry, 1000)
self.assertTrue(len(response.events) > 2)
event = response.events.popleft()
self.assertEqual(event, {'id': '0', 'name': '', 'data': 'START', 'json': None})
event = response.events.popleft()
self.assertEqual(event, {'id': '1', 'name': '', 'data': '1\n2', 'json': None})
event = response.events.popleft()
self.assertEqual(event, {'id': '2', 'name': '', 'data': '3\n4', 'json': None})
self.assertTrue(len(response.body) == 0)
self.assertTrue(len(response.eventSource.raw) == 0)
#self.assertEqual(len(beta.rxbs), 0)
#alpha.close()
beta.close()
wireLogBeta.close()
console.reinit(verbosity=console.Wordage.concise)
def testNonBlockingRequestStreamFancyJson(self):
"""
Test NonBlocking Http client
"""
console.terse("{0}\n".format(self.testNonBlockingRequestStreamFancyJson.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
wireLogBeta = nonblocking.WireLog(buffify=True)
result = wireLogBeta.reopen()
eha = ('127.0.0.1', 8080)
beta = nonblocking.Outgoer(ha=eha, bufsize=131072, wlog=wireLogBeta)
self.assertIs(beta.reopen(), True)
self.assertIs(beta.accepted, False)
self.assertIs(beta.cutoff, False)
console.terse("Connecting beta to server ...\n")
while True:
beta.serviceConnect()
#alpha.serviceAccepts()
if beta.accepted: # and beta.ca in alpha.ixes
break
time.sleep(0.05)
self.assertIs(beta.accepted, True)
self.assertIs(beta.cutoff, False)
self.assertEqual(beta.ca, beta.cs.getsockname())
self.assertEqual(beta.ha, beta.cs.getpeername())
self.assertEqual(eha, beta.ha)
console.terse("{0}\n".format("Building Request ...\n"))
host = u'127.0.0.1'
port = 8080
method = u'GET'
path = u'/fancy?idify=true;jsonify=true'
console.terse("{0} from {1}:{2}{3} ...\n".format(method, host, port, path))
headers = odict([('Accept', 'application/json')])
request = httping.Requester(hostname=host,
port=port,
method=method,
path=path,
headers=headers)
msgOut = request.rebuild()
lines = [
b'GET /fancy?idify=true;jsonify=true HTTP/1.1',
b'Host: 127.0.0.1:8080',
b'Accept-Encoding: identity',
b'Content-Length: 0',
b'Accept: application/json',
b'',
b'',
]
for i, line in enumerate(lines):
self.assertEqual(line, request.lines[i])
self.assertEqual(request.head, b'GET /fancy?idify=true;jsonify=true HTTP/1.1\r\nHost: 127.0.0.1:8080\r\nAccept-Encoding: identity\r\nContent-Length: 0\r\nAccept: application/json\r\n\r\n')
self.assertEqual(msgOut, request.head)
beta.tx(msgOut)
while beta.txes or not beta.rxbs:
beta.serviceTxes()
beta.serviceReceives()
time.sleep(0.05)
beta.serviceReceives()
msgIn, index = beta.tailRxbs(0)
response = httping.Respondent(msg=beta.rxbs,
method=method,
path=path,
reconnectable=True)
timer = Timer(duration=3.0)
while response.parser and not timer.expired:
response.parse()
beta.serviceReceives()
time.sleep(0.01)
if response.parser:
response.parser.close()
response.parser = None
response.dictify()
self.assertEqual(response.eventSource.retry, 1000)
self.assertTrue(len(response.events) > 2)
event = response.events.popleft()
self.assertEqual(event, {'id': '0', 'name': '', 'data': 'START', 'json': None})
event = response.events.popleft()
self.assertEqual(event, {'id': '1', 'name': '', 'data': None, 'json': {'count': 1}})
event = response.events.popleft()
self.assertEqual(event, {'id': '2', 'name': '', 'data': None, 'json': {'count': 2}})
self.assertTrue(len(response.body) == 0)
self.assertTrue(len(response.eventSource.raw) == 0)
beta.close()
wireLogBeta.close()
console.reinit(verbosity=console.Wordage.concise)
def testNonBlockingRequestMultipart(self):
"""
Test NonBlocking Http client
"""
console.terse("{0}\n".format(self.testNonBlockingRequestMultipart.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
wireLogBeta = nonblocking.WireLog(buffify=True)
result = wireLogBeta.reopen()
eha = ('127.0.0.1', 8080)
beta = nonblocking.Outgoer(ha=eha, bufsize=131072)
self.assertIs(beta.reopen(), True)
self.assertIs(beta.accepted, False)
self.assertIs(beta.cutoff, False)
console.terse("Connecting beta to server ...\n")
while True:
beta.serviceConnect()
#alpha.serviceAccepts()
if beta.accepted: # and beta.ca in alpha.ixes
break
time.sleep(0.05)
self.assertIs(beta.accepted, True)
self.assertIs(beta.cutoff, False)
self.assertEqual(beta.ca, beta.cs.getsockname())
self.assertEqual(beta.ha, beta.cs.getpeername())
self.assertEqual(eha, beta.ha)
console.terse("{0}\n".format("Building Request ...\n"))
host = u'127.0.0.1'
port = 8080
method = u'POST'
path = u'/echo'
console.terse("{0} from {1}:{2}{3} ...\n".format(method, host, port, path))
headers = odict([(u'Accept', u'application/json'),
(u'Content-Type', u'multipart/form-data')])
fargs = odict([("text", "This is the life,\nIt is the best.\n"),
("html", "<html><body></body><html>")])
request = httping.Requester(hostname=host,
port=port,
method=method,
path=path,
headers=headers)
msgOut = request.rebuild(fargs=fargs)
beta.tx(msgOut)
while beta.txes or not beta.rxbs:
beta.serviceTxes()
beta.serviceReceives()
time.sleep(0.05)
beta.serviceReceives()
msgIn, index = beta.tailRxbs(0)
response = httping.Respondent(msg=beta.rxbs, method=method)
while response.parser:
response.parse()
response.dictify()
self.assertEqual(response.data, {'action': None,
'content': None,
'form': [['text', 'This is the life,\nIt is the best.\n'],
['html', '<html><body></body><html>']],
'query': {},
'url': 'http://127.0.0.1:8080/echo',
'verb': 'POST'}
)
self.assertEqual(len(beta.rxbs), 0)
#alpha.close()
beta.close()
wireLogBeta.close()
console.reinit(verbosity=console.Wordage.concise)
def runOne(test):
'''
Unittest Runner
'''
test = BasicTestCase(test)
suite = unittest.TestSuite([test])
unittest.TextTestRunner(verbosity=2).run(suite)
def runSome():
""" Unittest runner """
tests = []
names = ['testBasic',
'testNonBlockingRequestEcho',
'testNonBlockingRequestStream',
'testNonBlockingRequestStreamFancy',
'testNonBlockingRequestStreamFancyJson',
'testNonBlockingRequestStreamFirebase',
'testNonBlockingRequestMultipart']
tests.extend(map(BasicTestCase, names))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
""" Unittest runner """
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BasicTestCase))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
#runAll() #run all unittests
#runSome()#only run some
#runOne('testBasic')
#runOne('testNonBlockingRequestEcho')
#runOne('testNonBlockingRequestStream')
#runOne('testNonBlockingRequestStreamFancy')
#runOne('testNonBlockingRequestStreamFancyJson')
#runOne('testNonBlockingRequestStreamFirebase')
runOne('testNonBlockingRequestMultipart')
```
#### File: ioflo/test/__init__.py
```python
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import os
from ioflo.aid.consoling import getConsole
console = getConsole()
console.reinit(verbosity=console.Wordage.concise)
start = os.path.dirname(os.path.dirname
(os.path.abspath
(sys.modules.get(__name__).__file__)))
# need top to be above root for relative imports to not go above top level
top = os.path.dirname(start)
def run(top, start=None, failfast=False):
"""
Run unittests starting at directory given by start within the package rooted at top
"""
if not start:
start = top
console.terse("\nRunning ioflo tests starting at '{0}' from '{1}', \n".format(start, top))
loader = unittest.TestLoader()
suite = loader.discover(start, 'test_*.py', top )
unittest.TextTestRunner(verbosity=2, failfast=failfast).run(suite)
if __name__ == "__main__":
run(top, start)
``` |
{
"source": "04mayukh/google-research",
"score": 2
} |
#### File: robust_fill/dataset/write_data.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import sys
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer.tasks.robust_fill import sample_random
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
sys.path.append('../../../../')
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_work_units', 1, 'Total number of work units.')
flags.DEFINE_integer('seed', 42, 'Fixed random seed.')
flags.DEFINE_integer('num_tasks', 100000, 'Number of tasks to write.')
flags.DEFINE_integer('num_strings_per_task', 4,
'Number of input/output strings per task.')
flags.DEFINE_integer('max_expressions', 10,
'Maximum number of expressions in program.')
flags.DEFINE_integer('min_expressions', 1,
'Maximum number of expressions in program.')
flags.DEFINE_integer('max_input_length', 20,
'Maximum number of characters in input strings.')
flags.DEFINE_string('save_dir', None, 'Directory to save results to.')
flags.DEFINE_boolean('split_program', False,
'Whether to split program by parial program.')
flags.DEFINE_boolean('split_outputs', False,
'Whether to split outputs by partial program.')
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_example(task,
token_id_table):
"""Creates a tf.Example message to be written to a file."""
# Create a dictionary mapping the feature name to the tf.Example-compatible
# data type.
io_string = ''
if FLAGS.split_outputs:
for inp in task.inputs:
io_string += inp + '<'
for expr in task.program.expressions:
io_string += expr(inp) + '|'
io_string = io_string[:-1] + '>'
io_string = io_string[:-1]
else:
for inp, out in zip(task.inputs, task.outputs):
io_string += inp + '<' + out + '>'
io_string = io_string[:-1]
program_string = ''
if FLAGS.split_program:
for expr in task.program.expressions:
program_string += ' '.join(map(str, expr.encode(token_id_table)))
program_string += '|'
program_string = program_string[:-1]
else:
program_string = ' '.join(
map(str, task.program.encode(token_id_table)[:-1]))
feature = {
'i/o': _bytes_feature(str.encode(io_string)),
'program_encoding': _bytes_feature(str.encode(program_string)),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def main(_):
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
_, token_id_table = dsl_tokens.build_token_tables()
if not gfile.isdir(FLAGS.save_dir):
gfile.mkdir(FLAGS.save_dir)
worker_fname = os.path.join(FLAGS.save_dir,
'program_tasks.tf_records-00000-of-00001')
# Write the `tf.Example` observations to the file.
with tf.io.TFRecordWriter(worker_fname) as writer:
for _ in range(FLAGS.num_tasks):
task = sample_random.random_task(
max_expressions=FLAGS.max_expressions,
min_expressions=FLAGS.min_expressions,
max_k=3,
max_input_tokens=5,
max_input_length=FLAGS.max_input_length,
max_output_length=FLAGS.max_input_length * FLAGS.max_expressions,
num_examples=FLAGS.num_strings_per_task,
)
example = serialize_example(task, token_id_table)
writer.write(example)
if __name__ == '__main__':
app.run(main)
```
#### File: non_semantic_speech_benchmark/distillation/models_test.py
```python
import os
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import tensorflow as tf
from non_semantic_speech_benchmark.distillation import models
from non_semantic_speech_benchmark.distillation.compression_lib import compression_op as compression
from non_semantic_speech_benchmark.distillation.compression_lib import compression_wrapper
class ModelsTest(parameterized.TestCase):
@parameterized.parameters(
{'frontend': True, 'bottleneck': 3, 'tflite': True},
{'frontend': False, 'bottleneck': 3, 'tflite': True},
{'frontend': True, 'bottleneck': 3, 'tflite': False},
{'frontend': False, 'bottleneck': 3, 'tflite': False},
{'frontend': True, 'bottleneck': 0, 'tflite': False},
)
def test_model_frontend(self, frontend, bottleneck, tflite):
if frontend:
input_tensor_shape = [1 if tflite else 2, 32000] # audio signal.
else:
input_tensor_shape = [3, 96, 64, 1] # log Mel spectrogram.
input_tensor = tf.zeros(input_tensor_shape, dtype=tf.float32)
output_dimension = 5
m = models.get_keras_model(
'mobilenet_debug_1.0_False', bottleneck, output_dimension,
frontend=frontend, tflite=tflite)
o_dict = m(input_tensor)
emb, o = o_dict['embedding'], o_dict['embedding_to_target']
emb.shape.assert_has_rank(2)
if bottleneck:
self.assertEqual(emb.shape[1], bottleneck)
o.shape.assert_has_rank(2)
self.assertEqual(o.shape[1], 5)
def test_invalid_model(self):
invalid_mobilenet_size = 'huuuge'
with self.assertRaises(KeyError) as exception_context:
models.get_keras_model(
f'mobilenet_{invalid_mobilenet_size}_1.0_False', 3, 5)
if not isinstance(exception_context.exception, KeyError):
self.fail()
@parameterized.parameters(
{'model_type': 'mobilenet_small_1.0_False'},
{'model_type': 'mobilenet_debug_1.0_False'},
{'model_type': 'efficientnetb0'},
{'model_type': 'efficientnetb1'},
{'model_type': 'efficientnetb2'},
{'model_type': 'efficientnetb3'},
)
@flagsaver.flagsaver
def test_valid_model_type(self, model_type):
# Frontend flags.
flags.FLAGS.frame_hop = 5
flags.FLAGS.num_mel_bins = 80
flags.FLAGS.frame_width = 5
input_tensor = tf.zeros([2, 16000], dtype=tf.float32)
m = models.get_keras_model(model_type, 3, 5, frontend=True)
o_dict = m(input_tensor)
emb, o = o_dict['embedding'], o_dict['embedding_to_target']
emb.shape.assert_has_rank(2)
self.assertEqual(emb.shape[1], 3)
o.shape.assert_has_rank(2)
self.assertEqual(o.shape[1], 5)
@parameterized.parameters({'add_compression': True},
{'add_compression': False})
def test_tflite_model(self, add_compression):
compressor = None
bottleneck_dimension = 3
if add_compression:
compression_params = compression.CompressionOp.get_default_hparams(
).parse('')
compressor = compression_wrapper.get_apply_compression(
compression_params, global_step=0)
m = models.get_keras_model(
'mobilenet_debug_1.0_False',
bottleneck_dimension,
5,
frontend=False,
compressor=compressor,
tflite=True)
input_tensor = tf.zeros([1, 96, 64, 1], dtype=tf.float32)
o_dict = m(input_tensor)
emb, o = o_dict['embedding'], o_dict['embedding_to_target']
emb.shape.assert_has_rank(2)
self.assertEqual(emb.shape[0], 1)
self.assertEqual(emb.shape[1], bottleneck_dimension)
o.shape.assert_has_rank(2)
self.assertEqual(o.shape[0], 1)
self.assertEqual(o.shape[1], 5)
if add_compression:
self.assertIsNone(m.get_layer('distilled_output').kernel)
self.assertIsNone(
m.get_layer('distilled_output').compression_op.a_matrix_tfvar)
if __name__ == '__main__':
assert tf.executing_eagerly()
absltest.main()
``` |
{
"source": "04n0/jenkins-configuration",
"score": 2
} |
#### File: e2e/pages/credential_store_page.py
```python
from __future__ import absolute_import
from . import JENKINS_HOST
from bok_choy.page_object import PageObject
class CredentialStorePage(PageObject):
url = "http://{}:8080/credentials/store/system/domain/_/".format(JENKINS_HOST)
def is_browser_on_page(self):
return "Global credentials (unrestricted)" in self.q(css='[id="main-panel"] > h1').text
def get_number_credentials(self):
# Find the number of rows in the table, but subtract one for the headers
return len(self.q(css='[class="sortable pane bigtable"] > tbody > tr')) - 1
def get_list_of_credentials_table(self):
# Get all text from the table's rows
return self.q(css='[class="sortable pane bigtable"] > tbody > tr > td').text
```
#### File: e2e/pages/dashboard_page.py
```python
from __future__ import absolute_import
from . import JENKINS_HOST
from bok_choy.page_object import PageObject
class JenkinsDashboardPage(PageObject):
url = "http://{}:8080".format(JENKINS_HOST)
def is_browser_on_page(self):
return 'dashboard [jenkins]' in self.browser.title.lower()
def get_jobs_list(self):
return self.q(css='[id="projectstatus"] > tbody > tr').attrs('id')
```
#### File: e2e/pages/slack_config_subpage.py
```python
from __future__ import absolute_import
from . import JENKINS_HOST
from bok_choy.page_object import PageObject
from .configuration_page import ConfigurationSubPageMixIn
class SlackConfigSubPage(ConfigurationSubPageMixIn, PageObject):
def __init__(self, *args, **kwargs):
super(SlackConfigSubPage, self).__init__(*args, **kwargs)
self.name = "jenkins-plugins-slack-SlackNotifier"
def get_room(self):
return self.value_of_first_element_named('_.room')
```
#### File: jenkins-configuration/e2e/test_mailer_configuration.py
```python
from __future__ import absolute_import
import unittest
import yaml
import os
from bok_choy.web_app_test import WebAppTest
from .pages.mailer_subpage import MailerConfigurationSubPage
class TestMailerConfiguration(WebAppTest):
def setUp(self):
super(TestMailerConfiguration, self).setUp()
config_path = os.getenv('CONFIG_PATH')
try:
yaml_contents = open(
"{}/mailer_config.yml".format(config_path), 'r'
).read()
except IOError:
pass
self.mailer_config = yaml.safe_load(yaml_contents)
self.config_page = MailerConfigurationSubPage(self.browser)
def test_mailer_config(self):
self.config_page.visit()
# test a component of the main mailer cofig
assert self.mailer_config['SMTP_SERVER'] == self.config_page.get_smtp_server()
self.config_page.expand_advanced()
# test an advanced component of the mailer config
assert str(self.mailer_config['SMTP_PORT']) == self.config_page.get_smtp_port()
```
#### File: jenkins-configuration/e2e/test_timestamper_configuration.py
```python
from __future__ import absolute_import
import unittest
import yaml
import os
from bok_choy.web_app_test import WebAppTest
from .pages.timestamper_config_subpage import TimestamperConfigSubPage
class TestTimestamperConfig(WebAppTest):
def test_timestamper_config(self):
"""
Verify a couple of the configuration options of the timestamper plugin
from the Jenkins configuration console
"""
config_path = os.getenv('CONFIG_PATH')
try:
yaml_contents = open(
"{}/timestamper_config.yml".format(config_path), 'r'
).read()
except IOError:
pass
timestamper_config = yaml.safe_load(yaml_contents)
config_page = TimestamperConfigSubPage(self.browser)
config_page.visit()
assert timestamper_config['ENABLED_ON_PIPELINES'] == config_page.enabled_on_all_pipelines()
assert timestamper_config['ELAPSED_TIME_FORMAT'] == config_page.get_elapsed_time_format()
``` |
{
"source": "04t02/maslite",
"score": 3
} |
#### File: maslite/demos/scheduling.py
```python
from maslite import Agent, AgentMessage
from collections import namedtuple
__description__ = """The scheduling demo presented in <NAME>'s PhD thesis (https://goo.gl/YbHVzi).
"""
class Order(AgentMessage):
def __init__(self, sender, receiver, order_items):
"""
:param sender: Agent class or Agent uuid
:param receiver: Agent class or Agent uuid
:param order_items: A dictionary of SKUs & quantities, eg: {"A": 1, "B": 1,"C": 1, ...}
:param wanted_sequence: optional if a wanted sequence is declared it should exhaust all the sku's
"""
super().__init__(sender, receiver)
self.order_items = order_items
def get_ordered_items(self):
return self.order_items
# I've created a nice light-weight object for storing the data in the supply schedule
SupplyLine = namedtuple('SupplyLine', ('time', 'sku', 'qty'))
class SupplySchedule(AgentMessage):
def __init__(self, sender, receiver, schedule):
"""
:param sender: Agent class or Agent uuid
:param receiver: Agent class or Agent uuid
:param schedule: list of items with [[time, sku, qty], [time, sku, qty], ... , [time, sku, qty]]
eg: [[2, "A", 1],
[2, "B", 3],
...
[22, "G", 1]]
"""
super().__init__(sender, receiver)
for row in schedule:
assert isinstance(row, SupplyLine)
self.schedule = schedule
def get_schedule(self):
return self.schedule
class Job(object):
def __init__(self, order_sku, resource_sku,
supply_time, run_time, idle_time, start_time, finish_time,
quantity, customer):
self.order_sku = order_sku
self.resource_sku = resource_sku
self.supply_time = supply_time
self.run_time = run_time
self.idle_time = idle_time
self.start_time = start_time
self.finish_time = finish_time
self.quantity = quantity
self.customer = customer
def __str__(self):
names = [self.order_sku, self.resource_sku,
self.supply_time, self.run_time, self.idle_time, self.start_time, self.finish_time,
self.quantity]
return "<{} {}>".format(self.__class__.__name__, " ".join([str(n) for n in names]))
def __repr__(self):
return self.__str__()
class JobsWithIdleTime(AgentMessage):
def __init__(self, sender, receiver, jobs_with_idle_time):
"""
A specialised message for communicating jobs with idletime.
:param sender: Agent class or Agent uuid
:param receiver: Agent class or Agent uuid
:param current_supply_schedule: The current schedule in which jobs processed.
:param jobs_with_idle_time: list of jobs with idle time
"""
super().__init__(sender, receiver)
self.jobs_with_idle_time = jobs_with_idle_time
def get_jobs_with_idle_time(self):
return self.jobs_with_idle_time
class Machine(Agent):
def __init__(self, name, run_times, transformations):
"""
:param run_times: run_times as a dictionary of skus & times in seconds
for example: {"A":14, "B":7,"C":3, "D": 6}
:param transformations: dict of skus(in):skus(out) required for the machine to
create a sku(out) from a sku(in)
"""
super().__init__()
self.name = name
self.transformations = transformations
self.run_times = run_times
self.customer = None
self.supplier = None
self.stock = {}
self.jobs = []
self.finish_time = -1
self.operations.update({Order.__name__: self.process_order, # new order arrives.
SupplySchedule.__name__: self.update_schedule_with_supply_schedule,
JobsWithIdleTime.__name__: self.deal_with_idle_time}) # supply schedule arrives.
def __str__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
def setup(self):
pass
def teardown(self):
pass
def update(self):
while self.messages:
msg = self.receive()
operation = self.operations.get(msg.topic, None)
if operation is not None:
operation(msg)
self.update_finish_time()
def set_customer(self, agent):
assert isinstance(agent, Agent)
self.customer = agent.uuid
def set_supplier(self, agent):
assert isinstance(agent, Agent)
self.supplier = agent.uuid
def update_finish_time(self):
"""
This function updates the finish time as a KPI to show the user.
"""
if self.jobs:
last_job = self.jobs[-1]
assert isinstance(last_job, Job)
if last_job.finish_time:
self.finish_time = last_job.finish_time
def process_order(self, msg):
"""
Process order registers any new order in the joblist.
:param msg: Order class.
"""
assert isinstance(msg, Order)
ordered_items = msg.get_ordered_items()
# we register the order as jobs:
for sku, qty in ordered_items.items():
job = Job(order_sku=sku,
resource_sku=self.transformations.get(sku, None),
supply_time=None,
run_time=self.run_times.get(sku, None),
idle_time=None,
start_time=None,
finish_time=None,
quantity=qty,
customer=msg.sender)
self.jobs.append(job)
# if it's a brand new schedule, then we'll have to update sort the jobs first.
if any([j.supply_time is None for j in self.jobs]):
self.schedule_jobs_using_shortest_run_time_first()
# after registering the order we need the materials...
self.order_materials()
def order_materials(self):
# below we transformation the order of {abcdefg} using the material {M1a, M1b, .... M1f}
# so that a revised order can be sent to the supplier
supplies_required = {} # SKU: qty (following the order class)
for job in self.jobs:
supplies_required[job.resource_sku] = job.quantity
else:
pass
assert self.supplier is not None, "supplier must be assigned before it can receive messages..!"
new_order = Order(sender=self, receiver=self.supplier, order_items=supplies_required)
self.send(new_order)
def schedule_jobs_using_shortest_run_time_first(self):
jobs = [(j.run_time, j.order_sku, j) for j in self.jobs]
jobs.sort()
self.jobs = [j for run_time, order_sku, j in jobs]
def schedule_jobs_using_supply_time(self):
jobs = [(j.supply_time, j.run_time, j) for j in self.jobs]
jobs.sort()
self.jobs = [j for supply_time, run_time, j in jobs]
def update_schedule_with_supply_schedule(self, msg):
"""
:param msg: SupplySchedule
When we receive a SupplySchedule, we are getting an update to our jobs
which we'll need to process.
"""
assert isinstance(msg, SupplySchedule)
supply_schedule = msg.get_schedule()
for row in supply_schedule:
assert isinstance(row, SupplyLine)
for job in self.jobs:
assert isinstance(job, Job)
if row.sku == job.resource_sku:
job.supply_time = row.time
else:
pass
# now we'll need to sort the jobs as supplied.
self.schedule_jobs_using_supply_time()
# when we've received an updated supply schedule, we will need to update the jobs.
self.update_jobs_table()
def update_jobs_table(self):
if any([j.supply_time is None for j in self.jobs]):
# then we can't continue as we're waiting for supplies.
return
# else:
previous_job = None
for idx, job in enumerate(self.jobs):
assert isinstance(job, Job)
if previous_job is None:
job.start_time = max(0, job.supply_time)
job.idle_time = job.start_time
else:
job.start_time = max(previous_job.finish_time, job.supply_time)
job.idle_time = job.start_time - previous_job.finish_time
job.finish_time = job.start_time + job.run_time
previous_job = job
if any([j.supply_time is None for j in self.jobs]):
# then we can't continue as we're waiting for supplies.
return
else: # we have a complete schedule and can communicate any idle time to peers
self.communicate_to_peers()
def communicate_to_peers(self):
jobs_with_idle_time = []
total_idle_time = 0
for idx, job in enumerate(self.jobs):
if job.idle_time != 0:
total_idle_time += job.idle_time
jobs_with_idle_time.append(idx)
# if there's a supplier, we'll send the idle time to it.
if sum(jobs_with_idle_time) > 0: # sum of jobs with idle time will be zero if only index zero is present.
new_msg = JobsWithIdleTime(sender=self, receiver=self.supplier,
jobs_with_idle_time=jobs_with_idle_time) # and the index that's not good.
self.send(new_msg)
# if there's a customer, we'll send the new supply schedule to it.
if self.customer and self.jobs:
customer_supply_schedule = []
for job in self.jobs:
sl = SupplyLine(time=job.finish_time,
sku=job.order_sku,
qty=job.quantity)
customer_supply_schedule.append(sl)
new_msg = SupplySchedule(sender=self, receiver=self.customer, schedule=customer_supply_schedule)
self.send(new_msg)
def deal_with_idle_time(self, msg):
assert isinstance(msg, JobsWithIdleTime)
jobs_with_idle_time = msg.get_jobs_with_idle_time()
while jobs_with_idle_time:
index = jobs_with_idle_time.pop(0)
if index == 0:
pass # can't move before index zero
else: # swap positions with the previous job.
self.jobs[index - 1], self.jobs[index] = self.jobs[index], self.jobs[index - 1]
# finally:
self.update_jobs_table()
class StockAgent(Agent):
def __init__(self, name=''):
super().__init__()
self.customer = None
self.name = name
self.operations.update({Order.__name__: self.process_order})
def __str__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
def setup(self):
pass
def teardown(self):
pass
def update(self):
while self.messages:
msg = self.receive()
operation = self.operations.get(msg.topic, None)
if operation is not None:
operation(msg)
def set_customer(self, agent):
assert isinstance(agent, Agent)
self.customer = agent.uuid
def process_order(self, msg):
assert isinstance(msg, Order)
ordered_items = msg.get_ordered_items()
supplies = []
for sku, qty in ordered_items.items():
sl = SupplyLine(time=0, # should really be self.now() to use the schedulers clock.
sku=sku,
qty=qty)
supplies.append(sl)
new_msg = SupplySchedule(sender=self, receiver=self.customer, schedule=supplies)
self.send(new_msg)
```
#### File: maslite/tests/auction_demo_tests.py
```python
from itertools import product
from demos.auction_model import demo
def test02():
sellers = [4]
buyers = [100]
results = demo(sellers, buyers, time_limit=False)
expected_results = {100: 4, 4: 100}
for k, v in results.items():
assert expected_results[k] == v, "Hmmm... That's not right {}={}".format(k, v)
def test03():
expected_results = {101: 5, 5: 101, 6: None, 7: None}
sellers = [k for k in expected_results if k < 100]
buyers = [k for k in expected_results if k >= 100]
results = demo(sellers, buyers)
for k, v in results.items():
assert expected_results[k] == v, "Hmmm... That's not right {}={}".format(k, v)
def test04():
expected_results = {0: 101, 101: 0, 102: None,
103: None} # result: 0 enters contract with price 334.97 (the highest price)
sellers = [k for k in expected_results if k < 100]
buyers = [k for k in expected_results if k >= 100]
results = demo(sellers, buyers)
for k, v in results.items():
assert expected_results[k] == v, "Hmmm... That's not right {}={}".format(k, v)
def test05():
expected_results = {101: 7, 102: 6, 103: 5, 5: 103, 6: 102, 7: 101}
sellers = [k for k in expected_results if k < 100]
buyers = [k for k in expected_results if k >= 100]
results = demo(sellers, buyers)
for k, v in results.items():
assert expected_results[k] == v, "Hmmm... That's not right {}={}".format(k, v)
def test06():
expected_results = {0: 102, 1: 108, 2: 105, 3: 107, 4: 100, 5: 106, 6: 112, 7: 111, 8: 103, 9: 109, 10: 104, 100: 4, 101: None, 102: 0, 103: 8, 104: 10, 105: 2, 106: 5, 107: 3, 108: 1, 109: 9, 110: None, 111: 7, 112: 6}
sellers = [k for k in expected_results if k < 100]
buyers = [k for k in expected_results if k >= 100]
error_sets = []
for s_init, b_init in list(product([True, False], repeat=2)):
if not s_init and not b_init:
continue # if neither seller or buyer initialise, obviously nothing will happen.
results = demo(sellers=sellers, buyers=buyers, seller_can_initialise=s_init, buyer_can_initialise=b_init)
errors = []
for k, v in results.items():
if not expected_results[k] == v: # , "Hmmm... That's not right {}={}".format(k, v)
errors.append((k, v))
if errors:
error_sets.append(errors)
if error_sets:
print("-" * 80)
for i in error_sets:
print(",".join(str(i) for i in sorted(i)), flush=True)
raise AssertionError("output does not reflect expected results.")
``` |
{
"source": "050644zf/sccl",
"score": 3
} |
#### File: sccl/dataloader/dataloader.py
```python
from argparse import Namespace
import os
import pandas as pd
import torch.utils.data as util_data
from torch.utils.data import Dataset
class TextClustering(Dataset):
def __init__(self, train_x, train_y):
assert len(train_x) == len(train_y)
self.train_x = train_x
self.train_y = train_y
def __len__(self):
return len(self.train_x)
def __getitem__(self, idx):
return {'text': self.train_x[idx], 'label': self.train_y[idx]}
class AugmentPairSamples(Dataset):
def __init__(self, train_x, train_x1, train_x2, train_y):
assert len(train_y) == len(train_x) == len(train_x1) == len(train_x2)
self.train_x = train_x
self.train_x1 = train_x1
self.train_x2 = train_x2
self.train_y = train_y
def __len__(self):
return len(self.train_y)
def __getitem__(self, idx):
return {'text': self.train_x[idx], 'text1': self.train_x1[idx], 'text2': self.train_x2[idx], 'label': self.train_y[idx]}
def augment_loader(args:Namespace):
if args.dataset == "searchsnippets":
train_data = pd.read_csv(os.path.join(args.data_path, args.dataname))
train_text = train_data['text'].fillna('.').values
train_text1 = train_data['text1'].fillna('.').values
train_text2 = train_data['text2'].fillna('.').values
train_label = train_data['label'].astype(int).values
elif args.dataset == "bili":
DATALEN = args.datalen
data_path = args.data_path
aug_path = data_path+args.aug_path
#sub_areas = ['science','social_science','humanity_history','business','campus','career','design','skill']
#sub_areas = ['douga','music','dance','game','knowledge','tech','sports','car','life','food','animal','fashion','information','ent']
sub_areas = ['music','tech','knowledge','car','food','animal','fashion','information']
train_text = []
train_text1 = []
train_text2 = []
train_label = []
for idx,sub_area in enumerate(sub_areas):
with open(data_path+sub_area+'.txt',encoding='utf-8') as dataFile:
dataList = dataFile.read().split('\n')
if DATALEN:
dataList = dataList[:DATALEN]
dataList = [i[13:] for i in dataList]
train_text.extend(dataList)
with open(aug_path+sub_area+'1.txt',encoding='utf-8') as dataFile:
dataList = dataFile.read().split('\n')
if DATALEN:
dataList = dataList[:DATALEN]
train_text1.extend(dataList)
with open(aug_path+sub_area+'2.txt',encoding='utf-8') as dataFile:
dataList = dataFile.read().split('\n')
if DATALEN:
dataList = dataList[:DATALEN]
train_text2.extend(dataList)
for i in range(len(dataList)):
train_label.append(idx)
assert len(train_text) == len(train_text1) == len(train_text2) == len(train_label)
else:
DATALEN = args.datalen
with open('data/stackoverflow/title_StackOverflow.txt',encoding='utf-8') as dataFile:
train_text = dataFile.read().split('\n')[:DATALEN]
with open('data/stackoverflow/text1.txt',encoding='utf-8') as dataFile:
train_text1 = dataFile.read().split('\n')[:DATALEN]
with open('data/stackoverflow/text2.txt',encoding='utf-8') as dataFile:
train_text2 = dataFile.read().split('\n')[:DATALEN]
with open('data/stackoverflow/label_StackOverflow.txt',encoding='utf-8') as dataFile:
train_label = [int(i)-1 for i in dataFile.read().split('\n')[:DATALEN]]
print(max(train_label), min(train_label))
print(len(train_text) , len(train_text1) , len(train_text2) , len(train_label))
train_dataset = AugmentPairSamples(train_text, train_text1, train_text2, train_label)
train_loader = util_data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=2)
return train_loader
def train_unshuffle_loader(args):
if args.dataset == "searchsnippets":
train_data = pd.read_csv(os.path.join(args.data_path, args.dataname))
train_text = train_data['text'].fillna('.').values
train_label = train_data['label'].astype(int).values
elif args.dataset == "bili":
DATALEN = args.datalen
data_path = args.data_path
#sub_areas = ['science','social_science','humanity_history','business','campus','career','design','skill']
sub_areas = ['music','tech','sports','car','food','animal','fashion','information']
train_text = []
train_label = []
for idx,sub_area in enumerate(sub_areas):
with open(data_path+sub_area+'.txt',encoding='utf-8') as dataFile:
dataList = dataFile.read().split('\n')
if DATALEN:
dataList = dataList[:DATALEN]
train_text.extend(dataList)
if DATALEN:
for i in range(DATALEN):
train_label.append(idx)
else:
for i in range(len(dataList)):
train_label.append(idx)
assert len(train_text) == len(train_label)
else:
DATALEN = args.datalen
with open('data/stackoverflow/title_StackOverflow.txt',encoding='utf-8') as dataFile:
train_text = dataFile.read().split('\n')[:DATALEN]
with open('data/stackoverflow/label_StackOverflow.txt',encoding='utf-8') as dataFile:
train_label = [int(i)-1 for i in dataFile.read().split('\n')[:DATALEN]]
train_dataset = TextClustering(train_text, train_label)
train_loader = util_data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=1)
return train_loader
```
#### File: 050644zf/sccl/main.py
```python
import sys
sys.path.append( './' )
import torch
import argparse
from sentence_transformers import SentenceTransformer
from models.Transformers import SCCLBert
from learners.cluster import ClusterLearner
from dataloader.dataloader import augment_loader
from training import training
from utils.kmeans import get_kmeans_centers
from utils.logger import setup_path
from utils.randomness import set_global_random_seed
import os
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
MODEL_CLASS = {
"distil": 'distilbert-base-nli-stsb-mean-tokens',
"robertabase": 'roberta-base-nli-stsb-mean-tokens',
"robertalarge": 'roberta-large-nli-stsb-mean-tokens',
"msmarco": 'distilroberta-base-msmarco-v2',
"xlm": "xlm-r-distilroberta-base-paraphrase-v1",
"bertlarge": 'bert-large-nli-stsb-mean-tokens',
"bertbase": 'bert-base-nli-stsb-mean-tokens',
"cn": 'data/distiluse-base-multilingual-cased-v1',
"cndl": 'distiluse-base-multilingual-cased-v1',
"cn2": 'data/bert-base-chinese',
"cn2dl": 'bert-base-chinese',
"cn3": 'data/distiluse-base-multilingual-cased-v2',
"cn3dl": 'distiluse-base-multilingual-cased-v2',
"cn4": 'data/paraphrase-multilingual-MiniLM-L12-v2',
"cn4dl": 'paraphrase-multilingual-MiniLM-L12-v2'
}
def run(args):
resPath, tensorboard = setup_path(args)
args.resPath, args.tensorboard = resPath, tensorboard
set_global_random_seed(args.seed)
# dataset loader
train_loader = augment_loader(args)
# model
torch.cuda.set_device(args.gpuid[0])
#配置 Sentence Transformer
sbert = SentenceTransformer(MODEL_CLASS[args.bert])
#获取每个聚类的中心
cluster_centers = get_kmeans_centers(sbert, train_loader, args.num_classes)
model = SCCLBert(sbert, cluster_centers=cluster_centers, alpha=args.alpha)
model = model.cuda()
# optimizer
optimizer = torch.optim.Adam([
{'params':model.sentbert.parameters()},
{'params':model.head.parameters(), 'lr': args.lr*args.lr_scale},
{'params':model.cluster_centers, 'lr': args.lr*args.lr_scale}], lr=args.lr)
print(optimizer)
# set up the trainer
learner = ClusterLearner(model, optimizer, args.temperature, args.base_temperature)
training(train_loader, learner, args)
return None
def get_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--gpuid', nargs="+", type=int, default=[0], help="The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only")
parser.add_argument('--seed', type=int, default=0, help="")
parser.add_argument('--print_freq', type=float, default=250, help="")
parser.add_argument('--result_path', type=str, default='./results/')
parser.add_argument('--bert', type=str, default='cn4', help="")
# Dataset
parser.add_argument('--dataset', type=str, default='bili', help="")
parser.add_argument('--datalen', type=int, default=100, help="")
parser.add_argument('--data_path', type=str, default='./data/')
parser.add_argument('--aug_path', type=str, default='augdata/p0.5/')
parser.add_argument('--dataname', type=str, default='searchsnippets.csv', help="")
parser.add_argument('--num_classes', type=int, default=8, help="")
parser.add_argument('--max_length', type=int, default=32)
# Learning parameters
parser.add_argument('--lr', type=float, default=1e-5, help="")
parser.add_argument('--lr_scale', type=int, default=100, help="")
parser.add_argument('--max_iter', type=int, default=10)
# contrastive learning
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--temperature', type=float, default=0.5, help="temperature required by contrastive loss")
parser.add_argument('--base_temperature', type=float, default=0.07, help="temperature required by contrastive loss")
# Clustering
parser.add_argument('--use_perturbation', action='store_true', help="")
parser.add_argument('--alpha', type=float, default=1.0)
args = parser.parse_args(argv)
#args = parser.parse_args('--result_path ./restest/searchsnippets/ --num_classes 8 --dataset bili --bert cn --alpha 1 --lr 1e-05 --lr_scale 100 --batch_size 10 --temperature 0.5 --base_temperature 0.07 --max_iter 10 --print_freq 250 --seed 0 --gpuid 0 '.split(' '))
args.use_gpu = args.gpuid[0] >= 0
args.resPath = None
args.tensorboard = None
return args
if __name__ == '__main__':
run(get_args(sys.argv[1:]))
```
#### File: sccl/manim/anime.py
```python
from typing_extensions import runtime
from debugpy.common.json import enum
from manimlib import *
import numpy as np
import matplotlib.pyplot as plt
class WriteStuff(Scene):
def construct(self):
example_text = Text(
"这是文本",
t2c={"text": YELLOW},
font='Noto Sans CJK SC Bold'
)
example_tex = Tex(
"\\sum_{k=1}^\\infty {1 \\over k^2} = {\\pi^2 \\over 6}",
)
group = VGroup(example_text, example_tex)
group.arrange(DOWN)
#group.set_width(FRAME_WIDTH - 2 *MED_SMALL_BUFF)
self.play(Write(example_text))
self.play(Write(example_tex))
self.wait()
class pre(Scene):
def construct(self):
def normal(mu, sigma, n):
return np.random.normal(mu,sigma,n).reshape([-1,1])
def getp(vec, centers, k):
_a = (1+np.linalg.norm(vec - centers[k])**2) ** -1
_b = 0
for _k in range(len(centers)):
_b += (1+np.linalg.norm(vec - centers[_k])**2) ** -1
return _a / _b
def getap(qmat, f_k, j, k):
_a = qmat[j,k]**2 / f_k[k]
_b = 0
for idx,_f in enumerate(f_k):
_b += qmat[j,idx]**2/f_k[idx]
return _a/_b
def getLoss(pmat, qmat, j):
l = 0
for k in range(pmat.shape[1]):
l += pmat[j,k] * np.log(pmat[j,k]/qmat[j,k])
return l
def getCL(args):
samples, c0, c1 = args
centers = np.array([[c0],[c1]])
qmat = np.zeros([len(samples),len(centers)])
for j in range(qmat.shape[0]):
for k in range(qmat.shape[1]):
qmat[j,k] = getp(samples[j], centers, k)
f_k = qmat.sum(axis=0)
pmat = np.zeros([len(samples),len(centers)])
for j in range(pmat.shape[0]):
for k in range(pmat.shape[1]):
pmat[j,k] = getap(qmat, f_k, j, k)
loss = np.zeros([len(samples)])
for j in range(loss.shape[0]):
loss[j] = getLoss(pmat,qmat,j)
totalLoss = loss.sum()/len(samples)
return qmat, pmat, loss, totalLoss
samples = np.concatenate((normal(0.0,0.5,100),normal(5.0,0.5,100)))
samples.sort(axis=0)
hbins, bins, _ = plt.hist(samples.reshape([-1]), bins=20)
hbins /= np.max(hbins)
class result():
def __init__(self,samples, axes,axes2,c0=0,c1=5):
self.samples = samples
self.axes = axes
self.axes2 = axes2
self.qmat, self.pmat, self.loss, self.totalLoss = getCL((self.samples,c0,c1))
self.qdots = VGroup()
self.q2dots = VGroup()
self.pdots = VGroup()
self.ldots = VGroup()
self.tdot = Dot()
self.tdot.set_color(PURPLE_C)
self.tdot.move_to(self.axes2.c2p(c1 - 5,self.totalLoss))
for idx in range(len(samples)):
dot = SmallDot()
dot.set_color(YELLOW_D)
dot.axes = self.axes
dot.move_to(self.axes.c2p(self.samples[idx,0],self.qmat[idx,0]))
self.qdots.add(dot)
dot = SmallDot()
dot.set_color(TEAL_D)
dot.axes = self.axes
dot.move_to(self.axes.c2p(self.samples[idx,0],self.qmat[idx,1]))
self.q2dots.add(dot)
dot = SmallDot()
dot.set_color(BLUE_D)
dot.axes = self.axes
dot.move_to(self.axes.c2p(self.samples[idx,0],self.pmat[idx,0]))
self.pdots.add(dot)
dot = SmallDot()
dot.set_color(GREEN_D)
dot.axes = self.axes
dot.move_to(self.axes.c2p(self.samples[idx,0],self.loss[idx]))
self.ldots.add(dot)
def update(self,c0,c1):
self.qmat, self.pmat, self.loss, self.totalLoss = getCL((self.samples,c0,c1))
actList = []
for idx in range(len(self.samples)):
self.qdots[idx].generate_target()
self.qdots[idx].target.move_to(self.axes.c2p(self.samples[idx,0],self.qmat[idx,0]))
self.pdots[idx].generate_target()
self.pdots[idx].target.move_to(self.axes.c2p(self.samples[idx,0],self.pmat[idx,0]))
self.ldots[idx].generate_target()
self.ldots[idx].target.move_to(self.axes.c2p(self.samples[idx,0],self.loss[idx]))
#actList.append(self.qdots[idx].animate.move_to(self.axes.c2p(self.samples[idx,0],self.qmat[idx,0])))
#actList.append(self.pdots[idx].animate.move_to(self.axes.c2p(self.samples[idx,0],self.pmat[idx,0])))
#return actList
self.tdot.generate_target()
self.tdot.target.move_to(self.axes2.c2p(c1 - 5,self.totalLoss))
axes = Axes(
x_range=(-2, 8,1),
y_range=(-0.1, 1.2,10),
# 坐标系将会伸缩来匹配指定的height和width
height=6,
width=10,
# Axes由两个NumberLine组成,你可以通过axis_config来指定它们的样式
x_axis_config={
"stroke_color": GREY_A,
"stroke_width": 1,
"include_numbers": True,
"numbers_to_exclude": []
},
y_axis_config={
"include_tip": False,
"stroke_width": 0,
"include_ticks": False
}
)
axes2 = Axes(
x_range=(-3, 3,1),
y_range=(-0.01, 0.1,0.02),
# 坐标系将会伸缩来匹配指定的height和width
height=6,
width=10,
# Axes由两个NumberLine组成,你可以通过axis_config来指定它们的样式
x_axis_config={
"stroke_color": GREY_A,
"stroke_width": 1,
"include_numbers": True
}
)
#axes.add_background_rectangle(color=GREEN)
ag = VGroup(axes, axes2)
ag.arrange()
axes2.shift(DOWN*0.1+RIGHT*6)
axes.center()
self.play(ShowCreation(axes), run_time=1)
#self.play(ShowCreation(axes2))
abins = []
for idx,hbin in enumerate(hbins):
xpos = bins[idx] + bins[idx+1]
xpos /= 2
#print(xpos)
abins.append(axes.get_v_line(axes.c2p(xpos,hbin),line_func=Line, color=DARK_BROWN, stroke_width=10))
self.play(*[ShowCreation(i) for i in abins], run_time=3)
c0 = ValueTracker(0)
c1 = ValueTracker(5)
c_0 = Tex("\hat{\mu_1}")
c_1 = Tex("\hat{\mu_2}")
q_j1 = Tex("q_{j1}",color=YELLOW_D)
q_fml = Tex(r"=\frac{\left(1+\lvert e_{j}-\mu_{1}\rvert_{2}^{2} / \alpha\right)^{-\frac{\alpha+1}{2}}}{\sum_{k^{\prime}=1}^{K}\left(1+\lvert e_{j}-\mu_{k^{\prime}}\rvert_{2}^{2} / \alpha\right)^{-\frac{\alpha+1}{2}}}", color=YELLOW_D)
#q_fml = SVGMobject(file_name="videos/q_fml.svg", color=YELLOW_D)
q_j2 = Tex("q_{j2}",color=TEAL_D)
p_j1 = Tex("p_{j1}",color=BLUE_D)
p_fml = Tex(r"=\frac{q_{j k}^{2} / f_{1}}{\sum_{k^{\prime}} q_{j k}^{2} / f_{k^{\prime}}}",color=BLUE_D)
#p_fml = SVGMobject(file_name="videos/p_fml.svg", color=BLUE_D)
loss = Tex("\ell_{j}^{C}",color=GREEN_D)
loss_fml = Tex(r"=\mathbf{K L}\left[p_{j} \vert q_{j}\right]=\sum_{k=1}^{K} p_{j k} \log \frac{p_{j k}}{q_{j k}}",color=GREEN_D)
#loss_fml = SVGMobject(file_name="videos/loss_fml.svg", color=GREEN_D)
Loss = Tex("\mathcal{L}",color=PURPLE_C)
'''
qdots0 = VGroup()
qdots1 = VGroup()
f_k0 = ValueTracker(0)
f_k1 = ValueTracker(0)
f_k0.add_updater(lambda m: m.set_value(axes.p2c([0,sum([d.get_center()[1] for d in qdots0]),0])[1]))
f_k1.add_updater(lambda m: m.set_value(axes.p2c([0,sum([d.get_center()[1] for d in qdots1]),0])[1]))
for sample in samples:
dot = SmallDot()
dot.set_color(YELLOW_C)
dot.axes = axes
dot.move_to(dot.axes.c2p(sample[0],0))
dot.add_updater(lambda m: m.set_y(getpfromdots(m,c_0,c_1,0)))
qdots0.add(dot)
dot = SmallDot()
dot.set_color(YELLOW_C)
dot.axes = axes
dot.move_to(dot.axes.c2p(sample[0],0))
dot.add_updater(lambda m: m.set_y(getpfromdots(m,c_0,c_1,1)))
qdots1.add(dot)
'''
c_0.generate_target()
c_1.generate_target()
c_0.add_updater(lambda m: m.target.move_to(axes.c2p(c0.get_value(),-0.2)))
c_1.add_updater(lambda m: m.target.move_to(axes.c2p(c1.get_value(),-0.2)))
c_0.move_to(c_0.target.get_center())
c_1.move_to(c_1.target.get_center())
r = result(samples,axes,axes2,c0.get_value(),c1.get_value())
q_j1.add_updater(lambda m: m.move_to(r.qdots[0].get_center() + LEFT + DOWN*0.2))
q_fml.add_updater(lambda m: m.next_to(q_j1.get_right()))
q_j2.add_updater(lambda m: m.move_to(r.q2dots[0].get_center() + LEFT + DOWN*0.2))
p_j1.add_updater(lambda m: m.move_to(r.pdots[0].get_center() + LEFT + UP*0.2))
p_fml.add_updater(lambda m: m.next_to(p_j1.get_right() + 0.5*RIGHT))
loss.add_updater(lambda m: m.move_to(r.ldots[0].get_center() + LEFT))
loss_fml.add_updater(lambda m: m.next_to(loss.get_right() + 0.5*RIGHT))
self.play(Write(c_0),Write(c_1))
#print(f_k[0].get_value(),f_k[1].get_value())
self.wait(1)
#self.play(ShowCreation(r.qdots), DrawBorderThenFill(q_j1))
self.play(DrawBorderThenFill(q_fml), DrawBorderThenFill(q_j1))
self.wait(2)
self.play(Uncreate(q_fml), DrawBorderThenFill(r.qdots))
self.wait(1)
self.play(DrawBorderThenFill(r.q2dots), DrawBorderThenFill(q_j2))
self.wait(2)
self.play(FadeOut(r.q2dots, DOWN), FadeOut(q_j2, DOWN))
self.wait(1)
#self.play(*[GrowFromPoint(r.pdots[i],r.qdots[i]) for i in range(len(samples))], DrawBorderThenFill(p_j1))
self.play(DrawBorderThenFill(p_fml), DrawBorderThenFill(p_j1))
self.wait(2)
self.play(Uncreate(p_fml), DrawBorderThenFill(r.pdots))
self.wait(1)
#self.play(*[GrowFromPoint(r.ldots[i],r.pdots[i]) for i in range(len(samples))])
t = 5
while t>2:
t-=0.5
c1.set_value(t)
r.update(c0.get_value(),c1.get_value())
self.play(*[MoveToTarget(d) for d in r.qdots], *[MoveToTarget(d) for d in r.pdots],*[MoveToTarget(d) for d in [c_0,c_1]], run_time=0.2, rate_func=linear)
while t<8:
t+=0.5
c1.set_value(t)
r.update(c0.get_value(),c1.get_value())
self.play(*[MoveToTarget(d) for d in r.qdots], *[MoveToTarget(d) for d in r.pdots],*[MoveToTarget(d) for d in [c_0,c_1]], run_time=0.2, rate_func=linear)
while t>2:
t-=0.5
c1.set_value(t)
r.update(c0.get_value(),c1.get_value())
self.play(*[MoveToTarget(d) for d in r.qdots], *[MoveToTarget(d) for d in r.pdots],*[MoveToTarget(d) for d in [c_0,c_1]], run_time=0.2, rate_func=linear)
while t<8:
t+=0.5
c1.set_value(t)
r.update(c0.get_value(),c1.get_value())
self.play(*[MoveToTarget(d) for d in r.qdots], *[MoveToTarget(d) for d in r.pdots],*[MoveToTarget(d) for d in [c_0,c_1]], run_time=0.2, rate_func=linear)
self.wait(1)
#self.play(*[GrowFromPoint(r.ldots[i],r.pdots[i]) for i in range(len(samples))], DrawBorderThenFill(loss))
self.play(DrawBorderThenFill(loss_fml), DrawBorderThenFill(loss))
self.wait(2)
self.play(Uncreate(loss_fml), DrawBorderThenFill(r.ldots))
self.play(*[MoveToTarget(d) for d in r.ldots])
r.update(c0.get_value(),c1.get_value())
#self.play( MoveToTarget(r.ldots))
self.wait(1)
#lg = VGroup(r.ldots, loss)
self.wait(3)
Loss.add_updater(lambda m: m.move_to(r.tdot.get_center() + UP*0.5 + LEFT*0.5))
'''
arrow1 = DoubleArrow(start=axes.c2p(5,-0.2), end=axes.c2p(5+(c1.get_value()-5)*0.9, -0.2))
arrow1.add_updater(lambda m: m.put_start_and_end_on(start=axes.c2p(5,-0.2), end=axes.c2p(5+(c1.get_value()-5)*0.9, -0.2)))
arrow2 = DoubleArrow(start=axes2.c2p(0,-0.017), end=axes2.c2p(0, -0.017)+[Loss.get_center()[0],0,0])
arrow2.add_updater(lambda m: m.put_start_and_end_on(start=axes2.c2p(0,-0.017), end=axes2.c2p(0, -0.017)+[Loss.get_center()[0],0,0]))
line2 = Line(start=axes2.c2p(0, -0.017)+[Loss.get_center()[0],0,0],end=Loss.get_center())
line2.add_updater(lambda m: m.put_start_and_end_on(start=axes2.c2p(0, -0.017)+[Loss.get_center()[0],0,0],end=Loss.get_center()))
'''
self.play(self.camera.frame.animate.scale(1.6), run_time=1)
self.play(self.camera.frame.animate.shift(RIGHT*5.5),FadeIn(axes2))
self.wait(1)
#self.play(Write(arrow1))
#self.play(TransformFromCopy(arrow1, arrow2))
#self.play(Write(line2))
self.play(TransformFromCopy(r.ldots, r.tdot),MoveToTarget(r.tdot))
self.play(Write(Loss))
trail = TracedPath(r.tdot.get_center,time_per_anchor=0.2)
self.add(trail)
self.wait(3)
while t>2:
t-=0.5
c1.set_value(t)
r.update(c0.get_value(),c1.get_value())
self.play(*[MoveToTarget(d) for d in r.qdots], *[MoveToTarget(d) for d in r.pdots],*[MoveToTarget(d) for d in r.ldots],*[MoveToTarget(d) for d in [c_0,c_1]],MoveToTarget(r.tdot),run_time=1, rate_func=linear)
self.wait(3)
``` |
{
"source": "057a3dd61f99517a3afea0051a49cb27994f94d/msoffcrypto-tool",
"score": 3
} |
#### File: msoffcrypto-tool/msoffcrypto/__init__.py
```python
import olefile
__version__ = "4.6.3"
def OfficeFile(file):
'''Return an office file object based on the format of given file.
Args:
file (:obj:`_io.BufferedReader`): Input file.
Returns:
BaseOfficeFile object.
Examples:
>>> f = open("tests/inputs/example_password.docx", "rb")
>>> officefile = OfficeFile(f)
>>> officefile.keyTypes
('password', 'private_key', 'secret_key')
'''
ole = olefile.OleFileIO(file)
# TODO: Make format specifiable by option in case of obstruction
# Try this first; see https://github.com/nolze/msoffcrypto-tool/issues/17
if ole.exists('EncryptionInfo'):
from .format.ooxml import OOXMLFile
return OOXMLFile(file)
# MS-DOC: The WordDocument stream MUST be present in the file.
# https://msdn.microsoft.com/en-us/library/dd926131(v=office.12).aspx
elif ole.exists('wordDocument'):
from .format.doc97 import Doc97File
return Doc97File(file)
# MS-XLS: A file MUST contain exactly one Workbook Stream, ...
# https://msdn.microsoft.com/en-us/library/dd911009(v=office.12).aspx
elif ole.exists('Workbook'):
from .format.xls97 import Xls97File
return Xls97File(file)
else:
raise Exception("Unrecognized file format")
```
#### File: msoffcrypto/method/ecma376_agile.py
```python
import logging
import hashlib, functools, io
from struct import pack, unpack
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def _hashCalc(i, algorithm):
if algorithm == "SHA512":
return hashlib.sha512(i)
else:
return hashlib.sha1(i)
class ECMA376Agile:
def __init__(self):
pass
@staticmethod
def decrypt(key, keyDataSalt, hashAlgorithm, ibuf):
r'''
Return decrypted data.
>>> key = b'<KEY>'
>>> keyDataSalt = b'\<KEY>'
>>> hashAlgorithm = 'SHA512'
'''
SEGMENT_LENGTH = 4096
obuf = io.BytesIO()
totalSize = unpack('<I', ibuf.read(4))[0]
logger.debug("totalSize: {}".format(totalSize))
remaining = totalSize
ibuf.seek(8)
for i, buf in enumerate(iter(functools.partial(ibuf.read, SEGMENT_LENGTH), b'')):
saltWithBlockKey = keyDataSalt + pack('<I', i)
iv = _hashCalc(saltWithBlockKey, hashAlgorithm).digest()
iv = iv[:16]
aes = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
decryptor = aes.decryptor()
dec = decryptor.update(buf) + decryptor.finalize()
if remaining < len(buf):
dec = dec[:remaining]
obuf.write(dec)
remaining -= len(buf)
return obuf.getvalue() # return obuf.getbuffer()
@staticmethod
def makekey_from_privkey(privkey, encryptedKeyValue):
privkey = serialization.load_pem_private_key(privkey.read(), password=None, backend=default_backend())
skey = privkey.decrypt(encryptedKeyValue, padding.PKCS1v15())
return skey
@staticmethod
def makekey_from_password(password, saltValue, hashAlgorithm, encryptedKeyValue, spinValue, keyBits):
r'''
Generate intermediate key from given password.
>>> password = '<PASSWORD>'
>>> saltValue = b'Lr]E\xdca\x0f\x93\x94\x12\xa0M\xa7\x91\x04f'
>>> hashAlgorithm = 'SHA512'
>>> encryptedKeyValue = b"\xa1l\xd5\x16Zz\xb9\xd2q\x11>\xd3\x86\xa7\x8c\xf4\x96\x92\xe8\xe5'\xb0\xc5\xfc\x00U\xed\x08\x0b|\xb9K"
>>> spinValue = 100000
>>> keyBits = 256
>>> expected = b'@ f\t\xd9\xfa\xad\xf2K\x07j\xeb\xf2\xc45\xb7B\x92\xc8\xb8\xa7\xaa\x81\xbcg\x9b\xe8\x97\x11\xb0*\xc2'
>>> ECMA376Agile.makekey_from_password(password, saltValue, hashAlgorithm, encryptedKeyValue, spinValue, keyBits) == expected
True
'''
block3 = bytearray([0x14, 0x6e, 0x0b, 0xe7, 0xab, 0xac, 0xd0, 0xd6])
# Initial round sha512(salt + password)
h = _hashCalc(saltValue + password.encode("UTF-16LE"), hashAlgorithm)
# Iteration of 0 -> spincount-1; hash = sha512(iterator + hash)
for i in range(0, spinValue, 1):
h = _hashCalc(pack("<I", i) + h.digest(), hashAlgorithm)
h2 = _hashCalc(h.digest() + block3, hashAlgorithm)
# Needed to truncate skey to bitsize
skey3 = h2.digest()[:keyBits // 8]
# AES encrypt the encryptedKeyValue with the skey and salt to get secret key
aes = Cipher(algorithms.AES(skey3), modes.CBC(saltValue), backend=default_backend())
decryptor = aes.decryptor()
skey = decryptor.update(encryptedKeyValue) + decryptor.finalize()
return skey
``` |
{
"source": "05bit/pushka",
"score": 3
} |
#### File: pushka/_http/base.py
```python
import asyncio
class BaseHTTPClient:
"""Simple abstract async HTTP client.
We use this interface to have ability of switching between
`AsyncHTTPClient` from Tornado and asyncio powered `aiohttp`.
Provides HTTP methods calls as coroutines:
get(url, params=None, **kwargs)
post(url, params=None, **kwargs)
put(url, params=None, data=None, **kwargs)
delete(url, params=None, data=None, **kwargs)
Args:
url (str): request URL
params (dict): query parameters added to URL
data: dict or raw data for request body
Keyword Args:
auth: Authentication data, for basic auth (login, password) tuple
headers: Custom headers
Other keyword arguments are not normalized for different HTTP clients
and should not be used! If you need it, you may need to update
:func:`.utils.norm_tornado_kwargs` and :func:`.utils.norm_aiohttp_kwargs`.
"""
@asyncio.coroutine
def get(self, url, params=None, **kwargs):
return self._request(url, 'GET', params=params, **kwargs)
@asyncio.coroutine
def post(self, url, params=None, data=None, **kwargs):
return self._request(url, 'POST', data=(data if data else {}),
params=params, **kwargs)
@asyncio.coroutine
def put(self, url, params=None, data=None, **kwargs):
return self._request(url, 'PUT', data=(data if data else {}),
params=params, **kwargs)
@asyncio.coroutine
def delete(self, url, params=None, **kwargs):
return self._request(url, 'DELETE', params=params, **kwargs)
@asyncio.coroutine
def _request(self, url, type, params=None, data=None, **kwargs):
raise NotImplementedError
```
#### File: pushka/_http/utils.py
```python
from urllib.parse import urlencode
def merge_url(url, params):
"""Merge URL params with query params and return new URL."""
if params:
url = url.strip('&?')
url += '&' if ('?' in url) else '?'
url += urlencode(params)
return url
def encode_body(body):
"""If body is ``dict`` object, perform url encoding,
else do nothing.
"""
return urlencode(body) if isinstance(body, dict) else body
def norm_tornado_kwargs(**kwargs):
"""Normalize request parameters for Tornado client."""
if 'auth' in kwargs:
auth = kwargs.pop('auth')
kwargs['auth_username'] = auth[0]
kwargs['auth_password'] = auth[1]
return kwargs
def norm_aiohttp_kwargs(**kwargs):
"""Normalize request parameters for aiohttp client."""
if 'auth' in kwargs:
kwargs['auth'] = _norm_aiohttp_auth(kwargs['auth'])
return kwargs
def _norm_aiohttp_auth(auth):
import aiohttp.helpers
return aiohttp.helpers.BasicAuth(login=auth[0], password=auth[1])
```
#### File: pushka/_providers/twilio.py
```python
import logging
import asyncio
import json
from .. import base
class TwilioSMSService(base.BaseSMSService):
"""Twilio powered SMS sender, subclass of :class:`.BaseSMSService`
Args:
loop: asyncio event loop or Tornado IOLoop
account (str): Twilio account identifier
token (str): Twilio secret token
default_sender (str): Default sender phone number
"""
url = 'https://api.twilio.com/2010-04-01/Accounts/{account}/Messages.json'
def __init__(self, *, loop, account, token, default_sender=None):
super().__init__(loop=loop, default_sender=default_sender)
self._account = account
self._token = token
self._http = self.new_http_client()
@asyncio.coroutine
def send_sms(self, *, text, recipients, sender=None):
"""Send SMS asynchronously.
See :meth:`.BaseSMSService.send_sms` docs for
parameters reference.
"""
answers = []
url = self.url.format(account=self._account)
recipients = [recipients] if isinstance(recipients, str) else recipients
for to_phone in recipients:
data = {
'To': to_phone.startswith('+') and to_phone or ('+%s' % to_phone),
'From': sender or self.default_sender,
'Body': text,
}
result = yield from self._http.post(
url, data=data, auth=(self._account, self._token))
answers.append(result)
return answers
``` |
{
"source": "05bit/python-signa",
"score": 2
} |
#### File: python-signa/signa/core.py
```python
from signa.providers import (
s3,
b2,
dospaces,
yaobject,
oss,
)
PROVIDERS = {
's3': s3.new,
'b2': b2.new,
'dospaces': dospaces.new,
'yaobject': yaobject.new,
'oss': oss.new,
}
def new(_provider, **kwargs):
assert _provider in PROVIDERS, (
"Unknown provider: '%s', available: %s" % (
_provider, list(PROVIDERS.keys())
)
)
return PROVIDERS[_provider](**kwargs)
class Factory:
def __init__(self, _provider, **base_params):
self._provider = _provider
self.base_params = base_params
def new(self, **kwargs):
for k, v in self.base_params.items():
kwargs.setdefault(k, v)
return new(self._provider, **kwargs)
if __name__ == '__main__':
import os
import json
import requests
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
access_key = os.environ['AWS_ACCESS_KEY_ID']
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
signed = new(
's3',
method='PUT',
region='eu-central-1',
bucket='jokelikeme-fr',
key='test.txt',
payload='UNSIGNED-PAYLOAD',
headers={
'x-amz-acl': 'public-read',
},
auth={
'access_key': access_key,
'secret_key': secret_key,
})
print(json.dumps(signed, indent=2))
print('\n')
r = requests.put(signed['url'], headers=signed['headers'], data=b'xxxxxxxx')
r.raise_for_status()
print(r.text)
print('\n')
# test_string = """
# AWS4-HMAC-SHA256
# 20150830T123600Z
# 20150830/us-east-1/service/aws4_request
# 816cd5b414d056048ba4f7c5386d6e0533120fb1fcfa93762cf0fc39e2cf19e0
# """.strip()
# access_key = 'AKIDEXAMPLE'
# secret_key = '<KEY>'
# date_only = '20150830'
# date_key = _hmac(('AWS4' + secret_key).encode('utf-8'), date_only)
# date_region_key = _hmac(date_key, 'us-east-1')
# date_region_service_key = _hmac(date_region_key, 'service')
# signing_key = _hmac(date_region_service_key, 'aws4_request')
# print(test_string)
# print("Calculated:\n%s" % _hmac(signing_key, test_string, hexdigest=True))
# print("Must be:\nb97d918cfa904a5beff61c982a1b6f458b799221646efd99d3219ec94cdf2500")
```
#### File: signa/providers/aws.py
```python
import datetime
import hashlib
import hmac
import urllib.parse
from signa.logger import get_logger
utcnow = datetime.datetime.utcnow
logger = get_logger(__name__)
def aws_headers(method=None, region=None, service=None, uri=None,
auth=None, headers=None, payload=None):
headers = headers.copy() if headers else {}
access_key = auth['access_key']
secret_key = auth['secret_key']
timestamp = utcnow().strftime('%Y%m%dT%H%M%SZ')
date_only = timestamp[:8]
scope = '%s/%s/%s/aws4_request' % (date_only, region, service)
if payload == 'UNSIGNED-PAYLOAD':
payload_hash = 'UNSIGNED-PAYLOAD'
elif payload:
payload_hash = _sha256(payload)
else:
payload_hash = _sha256('')
headers['x-amz-content-sha256'] = payload_hash
headers['x-amz-date'] = timestamp
if uri:
uri_parts = urllib.parse.urlparse(uri)
path = uri_parts.path
query = uri_parts.query
else:
path = '/'
query = ''
headers_keys = sorted(list(headers.keys()))
canonical_request = '\n'.join([
method or 'GET',
path,
query,
'\n'.join(['%s:%s' % (k.lower(), headers[k])
for k in headers_keys]),
'',
';'.join(headers_keys).lower(),
payload_hash,
]).strip()
logger.debug(canonical_request)
str_to_sign = '\n'.join([
'AWS4-HMAC-SHA256',
timestamp,
scope,
_sha256(canonical_request),
])
# logger.debug(str_to_sign)
base_key = ('AWS4' + secret_key).encode('utf-8')
date_key = _hmac(base_key, date_only)
date_region_key = _hmac(date_key, region)
date_region_service_key = _hmac(date_region_key, 's3')
signing_key = _hmac(date_region_service_key, 'aws4_request')
# logger.debug(signing_key)
signature = _hmac(signing_key, str_to_sign, hexdigest=True)
# logger.debug(signature)
headers['Authorization'] = (
'AWS4-HMAC-SHA256 '
'Credential=%s/%s,'
'SignedHeaders=%s,'
'Signature=%s' % (
access_key,
scope,
';'.join(headers_keys),
signature)
)
return headers
def _sha256(data):
return hashlib.sha256(data.encode('utf-8')).hexdigest()
def _hmac(key, msg, hexdigest=False):
h = hmac.new(key, msg=msg.encode('utf-8'),
digestmod=hashlib.sha256)
if hexdigest:
return h.hexdigest()
else:
return h.digest()
```
#### File: signa/providers/b2.py
```python
from .aws import aws_headers
REGIONS = {
'us-west-000', 'us-west-001', 'us-west-002', 'eu-central-003'
}
def new(method=None, region=None, bucket=None, key=None,
auth=None, headers=None, payload=None):
headers = headers.copy() if headers else {}
# assert region in REGIONS
# headers['host'] = '%s.s3.%s.backblazeb2.com' % (bucket, region)
# https://s3.<region>.backblazeb2.com/<bucket>
headers['host'] = 's3.%s.backblazeb2.com' % region
if key:
rel_uri = '/%s/%s' % (bucket, key)
else:
rel_uri = '/%s' % bucket
headers.update(aws_headers(
method=method,
region=region,
service='s3',
uri=rel_uri,
auth=auth,
headers=headers,
payload=payload
))
return {
'url': 'https://%s%s' % (headers['host'], rel_uri),
'headers': headers,
}
``` |
{
"source": "0-5-blood-prince/Search_Engine",
"score": 3
} |
#### File: Search_Engine/ESA/evaluation.py
```python
from util import *
# Add your import statements here
class Evaluation():
def queryPrecision(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):
"""
Computation of precision of the Information Retrieval System
at a given value of k for a single query
Parameters
----------
arg1 : list
A list of integers denoting the IDs of documents in
their predicted order of relevance to a query
arg2 : int
The ID of the query in question
arg3 : list
The list of IDs of documents relevant to the query (ground truth)
arg4 : int
The k value
Returns
-------
float
The precision value as a number between 0 and 1
"""
precision = 0.0
num = 0.0
for i in range(k):
if query_doc_IDs_ordered[i] in true_doc_IDs:
num+=1
#Fill in code here
precision = num/k
return precision
def meanPrecision(self, doc_IDs_ordered, query_ids, qrels, k):
"""
Computation of precision of the Information Retrieval System
at a given value of k, averaged over all the queries
Parameters
----------
arg1 : list
A list of lists of integers where the ith sub-list is a list of IDs
of documents in their predicted order of relevance to the ith query
arg2 : list
A list of IDs of the queries for which the documents are ordered
arg3 : list
A list of dictionaries containing document-relevance
judgements - Refer cran_qrels.json for the structure of each
dictionary
arg4 : int
The k value
Returns
-------
float
The mean precision value as a number between 0 and 1
"""
meanPrecision = 0.0
d = {}
for q in query_ids:
d[q] = set()
# print(q,d[q])
# print(e["query_num"])
# print(e["id"])
# print(d)
for e in qrels:
# print(e["query_num"])
# print(d.get(e["query_num"]))
# print(e["query_num"],d[e["query_num"]])
if int(e["query_num"]) in d:
d[int(e["query_num"])].add(int(e["id"]))
#Fill in code here
q = []
for i in range(len(query_ids)):
prec = self.queryPrecision(doc_IDs_ordered[i],query_ids[i],d[query_ids[i]],k)
meanPrecision += prec
q.append(prec)
meanPrecision /= len(query_ids)
return meanPrecision , q
def queryRecall(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):
"""
Computation of recall of the Information Retrieval System
at a given value of k for a single query
Parameters
----------
arg1 : list
A list of integers denoting the IDs of documents in
their predicted order of relevance to a query
arg2 : int
The ID of the query in question
arg3 : list
The list of IDs of documents relevant to the query (ground truth)
arg4 : int
The k value
Returns
-------
float
The recall value as a number between 0 and 1
"""
recall = 0.0
num = 0.0
for i in range(k):
if query_doc_IDs_ordered[i] in true_doc_IDs:
num+=1
#Fill in code here
num_relev_docs = len(true_doc_IDs)
recall = num/num_relev_docs
return recall
def meanRecall(self, doc_IDs_ordered, query_ids, qrels, k):
"""
Computation of recall of the Information Retrieval System
at a given value of k, averaged over all the queries
Parameters
----------
arg1 : list
A list of lists of integers where the ith sub-list is a list of IDs
of documents in their predicted order of relevance to the ith query
arg2 : list
A list of IDs of the queries for which the documents are ordered
arg3 : list
A list of dictionaries containing document-relevance
judgements - Refer cran_qrels.json for the structure of each
dictionary
arg4 : int
The k value
Returns
-------
float
The mean recall value as a number between 0 and 1
"""
meanRecall = 0.0
d = {}
for q in query_ids:
d[q] = set()
for e in qrels:
if int(e["query_num"]) in d:
d[int(e["query_num"])].add(int(e["id"]))
#Fill in code here
r = []
for i in range(len(query_ids)):
rec = self.queryRecall(doc_IDs_ordered[i],query_ids[i],d[query_ids[i]],k)
meanRecall += rec
r.append(rec)
meanRecall /= len(query_ids)
return meanRecall, r
def queryFscore(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):
"""
Computation of fscore of the Information Retrieval System
at a given value of k for a single query
Parameters
----------
arg1 : list
A list of integers denoting the IDs of documents in
their predicted order of relevance to a query
arg2 : int
The ID of the query in question
arg3 : list
The list of IDs of documents relevant to the query (ground truth)
arg4 : int
The k value
Returns
-------
float
The fscore value as a number between 0 and 1
"""
fscore = 0.0
precision = 0.0
recall = 0.0
num = 0.0
for i in range(k):
if query_doc_IDs_ordered[i] in true_doc_IDs:
num+=1
#Fill in code here
num_relev_docs = len(true_doc_IDs)
recall = num/num_relev_docs
precision = num/k
#Fill in code here
if precision + recall == 0:
return 0
fscore = (2*precision*recall) / (precision + recall)
return fscore
def meanFscore(self, doc_IDs_ordered, query_ids, qrels, k):
"""
Computation of fscore of the Information Retrieval System
at a given value of k, averaged over all the queries
Parameters
----------
arg1 : list
A list of lists of integers where the ith sub-list is a list of IDs
of documents in their predicted order of relevance to the ith query
arg2 : list
A list of IDs of the queries for which the documents are ordered
arg3 : list
A list of dictionaries containing document-relevance
judgements - Refer cran_qrels.json for the structure of each
dictionary
arg4 : int
The k value
Returns
-------
float
The mean fscore value as a number between 0 and 1
"""
meanFscore = 0.0
d = {}
for q in query_ids:
d[q] = set()
for e in qrels:
if int(e["query_num"]) in d:
d[int(e["query_num"])].add(int(e["id"]))
#Fill in code here
for i in range(len(query_ids)):
meanFscore += self.queryFscore(doc_IDs_ordered[i],query_ids[i],d[query_ids[i]],k)
meanFscore /= len(query_ids)
#Fill in code here
return meanFscore
def queryNDCG(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):
"""
Computation of nDCG of the Information Retrieval System
at given value of k for a single query
Parameters
----------
arg1 : list
A list of integers denoting the IDs of documents in
their predicted order of relevance to a query
arg2 : int
The ID of the query in question
arg3 : list (Dict changed)
The list of IDs of documents relevant to the query (ground truth)
arg4 : int
The k value
Returns
-------
float
The nDCG value as a number between 0 and 1
"""
nDCG = 0.0
dcg = 0.0
num = 0.0
rel = []
for i in range(k):
if query_doc_IDs_ordered[i] in true_doc_IDs.keys():
r = true_doc_IDs[query_doc_IDs_ordered[i]]
dcg += (5 - r) / math.log2(i+2)
rel.append(r)
else:
rel.append(5)
rel.sort()
idcg = 0.0
for i in range(k):
idcg += (5-rel[i])/math.log2(i+2)
if idcg==0:
return 0
# print(dcg,idcg)
nDCG = dcg / idcg
return nDCG
def meanNDCG(self, doc_IDs_ordered, query_ids, qrels, k):
"""
Computation of nDCG of the Information Retrieval System
at a given value of k, averaged over all the queries
Parameters
----------
arg1 : list
A list of lists of integers where the ith sub-list is a list of IDs
of documents in their predicted order of relevance to the ith query
arg2 : list
A list of IDs of the queries for which the documents are ordered
arg3 : list
A list of dictionaries containing document-relevance
judgements - Refer cran_qrels.json for the structure of each
dictionary
arg4 : int
The k value
Returns
-------
float
The mean nDCG value as a number between 0 and 1
"""
meanNDCG = 0.0
d = {}
for q in query_ids:
d[q] = {}
for e in qrels:
if int(e["query_num"]) in d:
d[int(e["query_num"])][int(e["id"])] = int(e["position"])
#Fill in code here
for i in range(len(query_ids)):
meanNDCG += self.queryNDCG(doc_IDs_ordered[i],query_ids[i],d[query_ids[i]],k)
meanNDCG /= len(query_ids)
return meanNDCG
def queryAveragePrecision(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):
"""
Computation of average precision of the Information Retrieval System
at a given value of k for a single query (the average of precision@i
values for i such that the ith document is truly relevant)
Parameters
----------
arg1 : list
A list of integers denoting the IDs of documents in
their predicted order of relevance to a query
arg2 : int
The ID of the query in question
arg3 : list
The list of documents relevant to the query (ground truth)
arg4 : int
The k value
Returns
-------
float
The average precision value as a number between 0 and 1
"""
avgPrecision = 0.0
num = 0.0
for i in range(k):
if query_doc_IDs_ordered[i] in true_doc_IDs:
num+=1
precision = num/(i+1)
avgPrecision += precision
#Fill in code here
if num==0:
return 0
avgPrecision /= num
return avgPrecision
def meanAveragePrecision(self, doc_IDs_ordered, query_ids, q_rels, k):
"""
Computation of MAP of the Information Retrieval System
at given value of k, averaged over all the queries
Parameters
----------
arg1 : list
A list of lists of integers where the ith sub-list is a list of IDs
of documents in their predicted order of relevance to the ith query
arg2 : list
A list of IDs of the queries
arg3 : list
A list of dictionaries containing document-relevance
judgements - Refer cran_qrels.json for the structure of each
dictionary
arg4 : int
The k value
Returns
-------
float
The MAP value as a number between 0 and 1
"""
meanAveragePrecision = 0.0
d = {}
for q in query_ids:
d[q] = set()
for e in q_rels:
if int(e["query_num"]) in d:
d[int(e["query_num"])].add(int(e["id"]))
#Fill in code here
for i in range(len(query_ids)):
meanAveragePrecision += self.queryAveragePrecision(doc_IDs_ordered[i],query_ids[i],d[query_ids[i]],k)
meanAveragePrecision /= len(query_ids)
#Fill in code here
return meanAveragePrecision
def precision_recall( self, precisions,recalls, query_ids):
###### precisions ranks * num_queries
# pass
query_tup_list = [{} for i in range(len(query_ids))]
for i in range(len(query_ids)):
q = []
for j in range(len(precisions)):
q.append((recalls[j][i],-precisions[j][i]))
q.sort()
for item in q:
if query_tup_list[i].get(item[0],None) != None:
query_tup_list[i][item[0]] = max(-item[1],query_tup_list[i][item[0]])
else:
query_tup_list[i][item[0]] = -item[1]
#interpolation 0 0.01 0.02 ... 1
interpo = 21.0
avg = [0.0 for i in range(int(interpo))]
for i in range(len(query_ids)):
for k in range(int(interpo)):
p = -1
for a, b in query_tup_list[i].items():
if a >= (k / (interpo-1) ):
p = max(p,b)
if p!=-1:
avg[k] = avg[k] + p
avg = [avg[i]/(len(query_ids)) for i in range(len(avg))]
x_axis = [i/(interpo-1) for i in range( int(interpo) ) ]
return avg, x_axis
```
#### File: Search_Engine/ESA/tokenization.py
```python
from util import *
class Tokenization():
def naive(self, text):
"""
Tokenization using a Naive Approach
Parameters
----------
arg1 : list
A list of strings where each string is a single sentence
Returns
-------
list
A list of lists where each sub-list is a sequence of tokens
"""
tokenizedText = [list(filter(bool,re.split('[^a-zA-Z0-9_]', sentence))) for sentence in text]
#Fill in code here
return tokenizedText
def pennTreeBank(self, text):
"""
Tokenization using the Penn Tree Bank Tokenizer
Parameters
----------
arg1 : list
A list of strings where each string is a single sentence
Returns
-------
list
A list of lists where each sub-list is a sequence of tokens
"""
tokenizedText = [TreebankWordTokenizer().tokenize(sentence) for sentence in text]
#Fill in code here
return tokenizedText
```
#### File: Search_Engine/LSA/informationRetrieval.py
```python
from util import *
import numpy as np
# Add your import statements here
class InformationRetrieval():
def __init__(self):
self.index = None
### Basic vector space model
def buildIndex_basic(self, docs, docIDs):
"""
Builds the document index in terms of the document
IDs and stores it in the 'index' class variable
Parameters
----------
arg1 : list
A list of lists of lists where each sub-list is
a document and each sub-sub-list is a sentence of the document
arg2 : list
A list of integers denoting IDs of the documents
Returns
-------
None
"""
self.terms_list = set()
self.term_doc_freq = {}
self.index = {}
self.num_docs = len(docs)
self.doc_len = {}
self.doc_id = docIDs.copy()
doc_terms = {}
for i in range(self.num_docs):
doc_terms[docIDs[i]] = []
for sentence in docs[i]:
for term in sentence:
if term not in self.terms_list:
self.terms_list.add(term)
if self.index.get((term, docIDs[i]),0.0) == 0.0:
doc_terms[docIDs[i]].append(term)
self.index[(term, docIDs[i])] = self.index.get((term,docIDs[i]),0.0)+1.0
for term in self.terms_list:
for id in docIDs:
if self.index.get((term,id),0) != 0.0:
self.term_doc_freq[term] = 1.0+self.term_doc_freq.get(term,0.0)
for k in self.index.keys():
self.index[k] = self.index[k]*math.log10(self.num_docs/(self.term_doc_freq.get(k[0],0.0)+1.0))
for id in docIDs:
v = 0.0
for term in doc_terms[id]:
v += (math.pow(self.index.get((term,id),0.0),2.0))
self.doc_len[id] = math.sqrt(v)
# print(list(self.doc_len.values())[:4])
# print(list(self.index.keys())[:4],list(self.index.values())[:4])
return
def rank_basic(self, queries):
"""
Rank the documents according to relevance for each query
Parameters
----------
arg1 : list
A list of lists of lists where each sub-list is a query and
each sub-sub-list is a sentence of the query
Returns
-------
list
A list of lists of integers where the ith sub-list is a list of IDs
of documents in their predicted order of relevance to the ith query
"""
doc_IDs_ordered = []
query_dic = {}
query_len = {}
query_terms = [[] for i in range(len(queries))]
for i in range(len(queries)):
for sentence in queries[i]:
for term in sentence:
if query_dic.get((term, i),0.0) == 0.0:
query_terms[i].append(term)
query_dic[(term, i)] = query_dic.get((term, i),0.0)+1.0
for k in query_dic.keys():
query_dic[k] = query_dic[k]*math.log10(self.num_docs/(self.term_doc_freq.get(k[0],0.0)+1.0))
for id in range(len(queries)):
v = 0.0
for term in self.terms_list:
v += (math.pow(query_dic.get((term,id),0.0),2.0))
query_len[id] = math.sqrt(v)
for i in range(len(queries)):
buff = []
for d in self.doc_id:
if self.doc_len[d] == 0.0:
buff.append((0.0,d))
continue
dot = 0.0
for term in query_terms[i]:
dot += (query_dic.get((term,i),0.0)*self.index.get((term,d),0.0))
buff.append((dot/(query_len[i]*self.doc_len[d]),d))
buff.sort(reverse=True)
doc_IDs_ordered.append([i[1] for i in buff])
return doc_IDs_ordered
### Latent Semantic Indexing method
def buildIndex_lsi(self, docs, docIDs, dim):
"""
Builds the document index in terms of the document
IDs and stores it in the 'index' class variable
Parameters
----------
arg1 : list
A list of lists of lists where each sub-list is
a document and each sub-sub-list is a sentence of the document
arg2 : list
A list of integers denoting IDs of the documents
arg 3:
Number of factors to perform svd
Returns
-------
None
"""
self.dim = dim
### Set of Terms
self.terms_list = set()
### GLobal measure
self.term_doc_freq = {}
### term - doc inverse index
self.index = {}
self.num_docs = len(docs)
### Length of document vector
self.doc_len = {}
self.doc_id = docIDs.copy()
## Terms in each doc
doc_terms = {}
for i in range(self.num_docs):
doc_terms[docIDs[i]] = []
for sentence in docs[i]:
for term in sentence:
if term not in self.terms_list:
self.terms_list.add(term)
if self.index.get((term, docIDs[i]),0.0) == 0.0: ### get(key,0.0) return 0.0 if not available
doc_terms[docIDs[i]].append(term)
self.index[(term, docIDs[i])] = self.index.get((term,docIDs[i]),0.0)+1.0
for term in self.terms_list:
for id in docIDs:
if self.index.get((term,id),0) != 0.0:
self.term_doc_freq[term] = 1.0 + self.term_doc_freq.get(term,0.0)
### Using tf idf improved LSA performance
for k in self.index.keys():
self.index[k] = self.index[k] * math.log10(self.num_docs/(self.term_doc_freq.get(k[0],0.0)+1.0)) # (tf) * (idf) SMoothed idf
# print(self.terms_list)
# print(list(self.doc_len.values())[:4])
# print(list(self.index.keys())[:4],list(self.index.values())[:4])
return
def svd_lsi(self , docIDs):
'''
Performs SVD and stores the required matrices and representations
'''
self.num_index = []
tl = list(self.terms_list)
tl.sort()
# print(tl)
# for t in tl:
# print(t)
for i in range(len(tl)):
a = [0.0 for j in range(self.num_docs)]
for j in range(self.num_docs):
a[j] = self.index.get((tl[i],docIDs[j]),0.0)
self.num_index.append(a)
self.num_index = np.asarray(self.num_index)
self.u, self.sig, self.v = np.linalg.svd(self.num_index)
self.sig = np.diag(self.sig)
# print(self.sig)
self.u = self.u[:, :self.dim]
self.sig = self.sig[:self.dim, :self.dim]
# print(self.v.shape)
self.v = self.v.T[:, :self.dim]
# print(self.num_index)
self.num_index = self.u @ self.sig @ self.v.T
# print(self.num_index)
# print(self.num_index.T.shape , self.u.shape , np.linalg.pinv(self.sig).shape)
# self.transform_docs = self.num_index.T @ self.u @ self.sig
self.transform_docs = self.v #### Representation of docs in Semantic space
# self.transform_docs = self.num_index.T
for j in range(self.num_docs):
self.doc_len[docIDs[j]] = np.linalg.norm(self.transform_docs[j])
return
def rank_lsi(self, queries):
"""
Rank the documents according to relevance for each query
Parameters
----------
arg1 : list
A list of lists of lists where each sub-list is a query and
each sub-sub-list is a sentence of the query
Returns
-------
list
A list of lists of integers where the ith sub-list is a list of IDs
of documents in their predicted order of relevance to the ith query
"""
doc_IDs_ordered = []
query_dic = {}
query_len = {}
query_terms = [[] for i in range(len(queries))]
for i in range(len(queries)):
for sentence in queries[i]:
for term in sentence:
if query_dic.get((term, i),0.0) == 0.0:
query_terms[i].append(term)
query_dic[(term, i)] = query_dic.get((term, i),0.0)+1.0
for k in query_dic.keys():
query_dic[k] = query_dic[k]*math.log10(self.num_docs/(self.term_doc_freq.get(k[0],0.0)+1.0))
query_list = []
tl = list(self.terms_list)
tl.sort()
for i in range(len(queries)):
a = [0.0 for j in range(len(tl))]
for j in range(len(tl)):
a[j] = query_dic.get((tl[j],i),0.0)
query_list.append(a)
query_list = np.asarray(query_list)
print(query_list.shape , self.u.shape , np.linalg.pinv(self.sig).shape)
self.transform_queries = query_list @ self.u @ (self.sig) ### Representations of Queries in Semantic space
self.query_len = [0 for i in range(len(queries))]
for j in range(len(queries)):
self.query_len[j] = np.linalg.norm(self.transform_queries[j])
for i in range(len(queries)):
buff = []
for j in range(self.num_docs):
dot = self.transform_queries[i] @ self.transform_docs[j]
d = self.doc_id[j]
if self.query_len[i] == 0 or self.doc_len[d] == 0:
# print("Bro",i,self.query_len[i],d,self.doc_len[d]) ### 471 and 995 are NULL Docs
continue
buff.append((dot/(self.query_len[i]*self.doc_len[d]),d))
buff.sort(reverse=True)
doc_IDs_ordered.append([i[1] for i in buff])
return doc_IDs_ordered
### Supervised Learning model which incorporates relevance feedback
def train(self, queries, query_ids, qrels, w):
'''
returns X(which is enough to represent the trained supervised model) and the ranked docs for training queries with LSI and Supervised models
'''
query_dic = {}
query_len = {}
query_terms = [[] for i in range(len(queries))]
for i in range(len(queries)):
for sentence in queries[i]:
for term in sentence:
if query_dic.get((term, i),0.0) == 0.0:
query_terms[i].append(term)
query_dic[(term, i)] = query_dic.get((term, i),0.0)+1.0
for k in query_dic.keys():
query_dic[k] = query_dic[k]*math.log10(self.num_docs/(self.term_doc_freq.get(k[0],0.0)+1.0))
query_list = []
tl = list(self.terms_list)
tl.sort()
for i in range(len(queries)):
a = [0.0 for j in range(len(tl))]
for j in range(len(tl)):
a[j] = query_dic.get((tl[j],i),0.0)
query_list.append(a)
query_list = np.asarray(query_list)
# print(query_list.shape , self.u.shape , np.linalg.pinv(self.sig).shape)
self.transform_queries = query_list @ self.u @ (self.sig) ### Representations of queries in Semantic space
# Supervised learning is done in context space so below transformation not necessary
# if you want to do it do this transformations and use num_index which are the representations of docs in term space instead of tranform docs
# self.transform_queries = self.u @ self.sig @ np.transpose(self.transform_queries)
################################ Preprocessing the representations of docs and queries in semantic space ###########################
self.query_len = [0 for i in range(len(queries))]
for j in range(len(queries)):
self.query_len[j] = np.linalg.norm(self.transform_queries[j])
self.transform_queries[j] /= self.query_len[j]
self.transform_queries = self.transform_queries.T
shape_Q = self.transform_queries.shape
# assert(shape_Q[0]==self.dim and shape_Q[1]==len(queries))
for j in range(self.num_docs):
if self.doc_len[self.doc_id[j]]!=0:
self.transform_docs[j] /= self.doc_len[self.doc_id[j]]
self.transform_docs = self.transform_docs.T
shape_D = self.transform_docs.shape
# assert(shape_D[0]==self.dim and shape_D[1]==self.num_docs)
# print(self.transform_queries.shape,self.transform_docs.shape)
########################### FINDING X part ################################
Q_concat = np.concatenate((self.transform_queries, self.transform_docs),axis=1)
# Q_concat = self.transform_queries
A = np.zeros((self.num_docs,len(queries)))
d = {}
for q in query_ids:
d[q] = set()
for e in qrels:
if int(e["query_num"]) in d:
d[int(e["query_num"])].add(int(e["id"]))
for i in range(len(queries)):
for j in range(self.num_docs):
if self.doc_id[j] in d[query_ids[i]]:
A[j][i] = 1
# print(A)
A = A * w
A_concat = np.concatenate((A,(self.transform_docs.T @ self.transform_docs)), axis=1)
# A_concat = A
# print(A_concat)
print("Norm A concat :",np.linalg.norm(A_concat))
#solving M*
q,r = np.linalg.qr(self.transform_docs.T)
y = q.T @ A_concat
M_star = np.linalg.pinv(r) @ y
print("Norm A concat - DtM:",np.linalg.norm( (A_concat- (self.transform_docs.T @ M_star) ) ))
#solving X*
print("Norm M*t :", np.linalg.norm(M_star.T))
q,r = np.linalg.qr(Q_concat.T)
y = q.T @ M_star.T
X_star_trans = np.linalg.pinv(r) @ y
print("Norm M*t - Qt Xt:", np.linalg.norm( (M_star.T - (Q_concat.T @ X_star_trans) ) ))
X_star = X_star_trans.T
# assert(X_star.shape==(self.dim,self.dim))
###################### Similarity Matrix calculation #####################################
sim_matrix_sup = self.transform_docs.T @ X_star @ Q_concat
sim_matrix_sup = sim_matrix_sup[:,:(len(queries))]
sim_matrix_lsi = self.transform_docs.T @ self.transform_queries
# print(sim_matrix_lsi)
# print(sim_matrix_sup)
# print(sim_matrix_lsi)
# print(sim_matrix_sup)
doc_IDs_ordered_lsi = []
doc_IDs_ordered_sup = []
for i in range(len(queries)):
buff_lsi = []
buff_sup = []
for j in range(self.num_docs):
d = self.doc_id[j]
buff_lsi.append((sim_matrix_lsi[j][i],d))
buff_sup.append((sim_matrix_sup[j][i],d))
buff_lsi.sort(reverse=True)
buff_sup.sort(reverse=True)
doc_IDs_ordered_lsi.append([i[1] for i in buff_lsi])
doc_IDs_ordered_sup.append([i[1] for i in buff_sup])
return X_star , doc_IDs_ordered_lsi, doc_IDs_ordered_sup
def test(self, queries, query_ids, qrels, X):
'''
returns the ranked docs for testing queries with LSI and Supervised model(the model trained in function train)
'''
query_dic = {}
query_len = {}
query_terms = [[] for i in range(len(queries))]
for i in range(len(queries)):
for sentence in queries[i]:
for term in sentence:
if query_dic.get((term, i),0.0) == 0.0:
query_terms[i].append(term)
query_dic[(term, i)] = query_dic.get((term, i),0.0)+1.0
for k in query_dic.keys():
query_dic[k] = query_dic[k]*math.log10(self.num_docs/(self.term_doc_freq.get(k[0],0.0)+1.0))
query_list = []
tl = list(self.terms_list)
tl.sort()
for i in range(len(queries)):
a = [0.0 for j in range(len(tl))]
for j in range(len(tl)):
a[j] = query_dic.get((tl[j],i),0.0)
query_list.append(a)
query_list = np.asarray(query_list)
print(query_list.shape , self.u.shape , np.linalg.pinv(self.sig).shape)
self.transform_queries = query_list @ self.u @ (self.sig)
# Supervised learning is done in context space so below transformation not necessary
# if you want to do it do this transformationa and use num_index instead of tranform docs
# self.transform_queries = self.u @ self.sig @ np.transpose(self.transform_queries)
################################ Preprocessing the representations of queries in semantic space ###########################
self.query_len = [0 for i in range(len(queries))]
for j in range(len(queries)):
self.query_len[j] = np.linalg.norm(self.transform_queries[j])
self.transform_queries[j] /= self.query_len[j]
self.transform_queries = self.transform_queries.T
shape_Q = self.transform_queries.shape
# assert(shape_Q[0]==self.dim and shape_Q[1]==len(queries))
Q_concat = np.concatenate((self.transform_queries, self.transform_docs),axis=1)
############################# SImilarity Matrix Calculation #####################################
sim_matrix_sup = self.transform_docs.T @ X @ Q_concat
sim_matrix_sup = sim_matrix_sup[:,:(len(queries))]
sim_matrix_lsi = self.transform_docs.T @ self.transform_queries
print(sim_matrix_lsi)
print(sim_matrix_sup)
# print(sim_matrix_lsi)
# print(sim_matrix_sup)
doc_IDs_ordered_lsi = []
doc_IDs_ordered_sup = []
for i in range(len(queries)):
buff_lsi = []
buff_sup = []
for j in range(self.num_docs):
d = self.doc_id[j]
buff_lsi.append((sim_matrix_lsi[j][i],d))
buff_sup.append((sim_matrix_sup[j][i],d))
buff_lsi.sort(reverse=True)
buff_sup.sort(reverse=True)
doc_IDs_ordered_lsi.append([i[1] for i in buff_lsi])
doc_IDs_ordered_sup.append([i[1] for i in buff_sup])
return doc_IDs_ordered_lsi, doc_IDs_ordered_sup
```
#### File: Search_Engine/LSA/sentenceSegmentation.py
```python
from util import *
class SentenceSegmentation():
def naive(self, text):
"""
Sentence Segmentation using a Naive Approach
Parameters
----------
arg1 : str
A string (a bunch of sentences)
Returns
-------
list
A list of strings where each string is a single sentence
"""
### Punctuation and Spaces
lines = text.splitlines()
segmentedText = []
for line in lines:
l = list(filter(bool,re.split('[?!:.]',line)))
segmentedText.extend([i.strip() for i in l])
#Fill in code here
return segmentedText
def punkt(self, text):
"""
Sentence Segmentation using the Punkt Tokenizer
Parameters
----------
arg1 : str
A string (a bunch of sentences)
Returns
-------
list
A list of strings where each strin is a single sentence
"""
segmentedText = tokenizer(text)
#Fill in code here
return segmentedText
``` |
{
"source": "05dt/scrapy",
"score": 3
} |
#### File: demo2/spiders/book_spider.py
```python
import scrapy
from ..items import BookItem
class BooksSpider(scrapy.Spider):
# 每一个爬虫的唯一标识
name = "books"
# 定义爬虫爬取的起始点,起始点可以是多个,这里只有一个
start_urls = ['http://books.toscrape.com/']
def parse(self, response):
# 提取数据
# 每一本书的信息在<article class="product_pod"> </article>元素中,我们使用css()方法找到所有这样的article元素,并依次迭代
# 以下是 改动部分
for sel in response.css('article.product_pod'):
book = BookItem()
book['name'] = sel.xpath('./h3/a/@title').extract_first()
book['price'] = sel.css('p.price_color::text').extract_first()
yield book
# 以上是 改动部分
# 提取翻页链接
# 下一页next按钮的url 在 ul.pager > li.next > a 元素的 href 属性中
next_url = response.css('ul.pager li.next a::attr(href)').extract_first()
if next_url:
# 如果找到下一页的url,得到绝对路径,构造新的Request对象
# 使用urljoin()方法构建完整的绝对URL
next_url = response.urljoin(next_url)
yield scrapy.Request(next_url, callback=self.parse)
```
#### File: demo7/spiders/book_spider.py
```python
import scrapy
class BooksSpider(scrapy.Spider):
# 每一个爬虫的唯一标识
name = "books"
# 定义爬虫爬取的起始点,起始点可以是多个,这里只有一个
start_urls = ['http://books.toscrape.com/']
def parse(self, response):
# 提取数据
# 每一本书的信息在<article class="product_pod"> </article>元素中,我们使用css()方法找到所有这样的article元素,并依次迭代
for book in response.css('article.product_pod'):
# 书名信息在 article > h3 > a 元素的title属性里
name = book.xpath('./h3/a/@title').extract_first()
# 书的价格信息在<p class="price_color">£23.88</p>的TEXT中
price = book.css('p.price_color::text').extract_first()
yield{
'书名': name,
'价格': price,
}
# 提取翻页链接
# 下一页next按钮的url 在 ul.pager > li.next > a 元素的 href 属性中
next_url = response.css('ul.pager li.next a::attr(href)').extract_first()
if next_url:
# 如果找到下一页的url,得到绝对路径,构造新的Request对象
# 使用urljoin()方法构建完整的绝对URL
next_url = response.urljoin(next_url)
yield scrapy.Request(next_url, callback=self.parse)
``` |
{
"source": "05xapresses/Menel",
"score": 3
} |
#### File: Menel/commands/dywan.py
```python
from random import choice
def setup(cliffs):
@cliffs.command('dywan [<width: int>] [<length: int>]', name='dywan', cooldown=3)
async def command(m, width=15, length=10):
if width <= 0 or length <= 0:
await m.error('Taki dywan byłby za mały, kasztanie')
return
if width > 25 or length > 100 or width * length > 2000:
await m.error('Taki dywan byłby za szeroki, kasztanie!')
return
lines = list()
for _ in range(length):
line = str()
for _ in range(width):
line += choice('╱╲')
lines.append(f'┃{line}┃')
line = '━' * width
lines.insert(0, f'┏{line}┓')
lines.append(f'┗{line}┛')
lines = '```\n' + '\n'.join(lines) + '\n```'
await m.success(f'Proszę, oto Twój darmowy dywan\n{lines}')
```
#### File: Menel/commands/webimg.py
```python
from asyncio import sleep
from io import BytesIO
from discord import File
from pyppeteer import launch
from pyppeteer.errors import NetworkError, PageError, TimeoutError
from ..objects.bot import bot
from ..objects.message import Message
def setup(cliffs):
@cliffs.command('webimg [scrolling|fullpage]:fullpage <url...>', name='webimg', cooldown=10)
async def command(m: Message, url, fullpage=None):
async with m.channel.typing():
browser = await launch(ignoreHTTPSErrors=True, headless=True, loop=bot.loop, args=['--no-sandbox'])
page = await browser.newPage()
await page.setViewport({'width': 2048, 'height': 1024, 'deviceScaleFactor': 2})
try:
await page.goto(url, timeout=60000)
except TimeoutError:
await m.error('Minął czas na wczytanie strony.')
except (PageError, NetworkError):
await m.error('Nie udało się wczytać strony. Sprawdź czy podany adres jest poprawny.')
else:
await sleep(2)
try:
screenshot = await page.screenshot(
type='png',
fullPage=fullpage is not None,
encoding='binary'
)
except NetworkError as e:
await m.error(str(e))
else:
await m.send(file=File(BytesIO(screenshot), 'screenshot.png'))
finally:
await browser.close()
```
#### File: Menel/functions/clean_content.py
```python
from discord.utils import escape_markdown, escape_mentions
def clean_content(content: any, *, markdown: bool = True, mentions: bool = True) -> str:
if not isinstance(content, str):
content = str(content)
if markdown:
content = escape_markdown(content)
if mentions:
content = escape_mentions(content)
return content
```
#### File: Menel/functions/constant_length_text.py
```python
def constant_length_text(text: str, length: int):
if len(text) > length:
return text[:length - 1] + '…'
else:
return text.rjust(length, ' ')
```
#### File: Menel/handlers/message.py
```python
import re
import discord
from cliffs import CommandDispatcher
from ..command_dispatcher.dispatch import dispatch
from ..functions.constant_length_text import constant_length_text as clt
from ..functions.cut_long_text import cut_long_text
from ..objects.bot import Menel
from ..objects.cooldowns import cooldowns
from ..objects.message import Message
from ..resources import regexes
def setup(bot: Menel, cliffs: CommandDispatcher):
@bot.event
async def on_message(m: discord.Message):
m = Message(m)
if m.content:
print(f'{clt(str(m.guild), 16)}\t{clt(str(m.channel), 16)}\t{clt(str(m.author), 16)}' +
' -> ' + cut_long_text(m.clean_content, 128))
if m.author.bot or not m.guild:
return
prefix = '.'
if re.fullmatch(regexes.mention(bot.user.id), m.content):
if not cooldowns.auto(m.author.id, '_mention', 3):
await m.channel.send('Cześć')
elif m.content.lower().startswith(prefix):
await dispatch(cliffs, m, prefix)
elif re.match(regexes.mention(bot.user.id), m.content):
await dispatch(cliffs, m, f'@{bot.user.name}')
```
#### File: Menel/handlers/reaction_add.py
```python
import discord
from ..objects.bot import Menel
def setup(bot: Menel):
@bot.event
async def on_raw_reaction_add(payload: discord.RawReactionActionEvent):
if payload.user_id == bot.OWNER and payload.emoji.name == '🗑️':
try:
message = await (await bot.fetch_channel(payload.channel_id)).fetch_message(payload.message_id)
if message.author == bot.user:
await message.delete()
except discord.HTTPException as e:
print(e)
```
#### File: Menel/objects/cooldowns.py
```python
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Optional
from ..objects.bot import bot
class Cooldowns:
def __init__(self):
self.cooldowns = defaultdict(dict)
def get(self, user_id: int, command: str) -> Optional[float]:
if user_id not in self.cooldowns or command not in self.cooldowns[user_id]:
return None
if user_id == bot.OWNER:
return None
if self.cooldowns[user_id][command] <= datetime.utcnow().timestamp():
return None
else:
return self.cooldowns[user_id][command] - datetime.utcnow().timestamp()
def set(self, user_id: int, command: str, time: Optional[int]) -> None:
if time:
self.cooldowns[user_id][command] = (datetime.utcnow() + timedelta(seconds=time)).timestamp()
def auto(self, user_id: int, command: str, time: int) -> Optional[float]:
cooldown = self.get(user_id, command)
if not cooldown:
self.set(user_id, command, time)
return cooldown
cooldowns = Cooldowns()
```
#### File: Menel/resources/regexes.py
```python
import re
DISCORD_BOT_TOKEN = re.compile(r'(?:[\w\-=]+)\.(?:[\w\-=]+)\.(?:[\w\-=]+)', re.ASCII)
def mention(user_id: int) -> re.Pattern:
return re.compile(rf'<@!?{user_id}>', re.IGNORECASE | re.ASCII)
``` |
{
"source": "0604hx/buter",
"score": 2
} |
#### File: buter/app/DockerController.py
```python
import sys
from subprocess import Popen
from flask import jsonify, request
from buter import Q, ServiceException
from buter.app import dockerBp
from buter.server import docker
@dockerBp.route("/images", methods=['GET', 'POST'])
def images():
"""
docker image inspect alpine
:return:
"""
_images = []
try:
_images = [{
"size": i.attrs['Size'],
'id': i.short_id,
'name': i.tags[0],
'created': i.attrs['Created'],
'dockerVersion': i.attrs['DockerVersion']
} for i in docker.listImage()]
except Exception:
pass
return jsonify(_images)
@dockerBp.route("/logs/<aid>", methods=['GET', 'POST'])
def logs(aid):
"""
获取某个容器的日志信息
默认显示最近的 1000 条记录
:param aid:
:return:
"""
tail = Q('tail', 1000, int)
d = ""
try:
d = docker.logs(aid, tail)
except Exception:
pass
return d
@dockerBp.route("/install", methods=['GET', 'POST'])
def install():
"""
安装 docker ,只支持 linux 系统
对于 Linux 系统,直接执行
curl -sSL http://acs-public-mirror.oss-cn-hangzhou.aliyuncs.com/docker-engine/internet | sh -
快速安装 docker, 详见:https://yq.aliyun.com/articles/7695?spm=5176.100239.blogcont29941.14.kJOgzy
:return:
"""
is_linux = sys.platform == 'Linux'
if not is_linux:
raise ServiceException("只支持在 Linux 系统下安装 docker(其他平台请手动安装)")
# cmd = "curl -sSL http://acs-public-mirror.oss-cn-hangzhou.aliyuncs.com/docker-engine/internet | sh -"
pass
```
#### File: buter/main/IndexController.py
```python
from flask import jsonify
from buter.logger import LOG
from buter.server import docker
from buter.util import OSUtil
from . import mainBp
@mainBp.route("/heartbeat/<string:data>")
def heartbeat(data):
"""
心跳测试,直接返回参数
:param data:
:return:
"""
LOG.info("heartbeat testing : %s", data)
return data
@mainBp.route("/info")
def sys_info():
"""
:return:
"""
info = {
'system': OSUtil.getOSInfo(),
'docker': docker.version(),
'python': OSUtil.getPythonInfo()
}
return jsonify(info)
```
#### File: buter/resource/ResourceController.py
```python
from flask import jsonify
from buter import db, ServiceException
from buter.logger import LOG
from buter.util import Result
from buter.util.FlaskTool import Q
from . import resourceBp
from ..models import Resource
@resourceBp.route("/list", methods=['GET', 'POST'])
def lists():
name = Q('name')
clauses = []
if name is not None:
clauses += [Resource.name.like("%{}%".format(name))]
count, items = Resource.query.pageFind(clauses)
return jsonify(Result.ok(data=items, count=count))
@resourceBp.route("/delete", methods=['GET', 'POST'])
@resourceBp.route("/delete/<aid>", methods=['GET', 'POST'])
def delete(aid=None):
aid = aid if aid is not None else Q('ids', type=int)
LOG.info("客户端请求删除 ID=%d 的资源..." % aid)
app = Resource.query.get(aid)
if app:
db.session.delete(app)
db.session.commit()
LOG.info("成功删除 ID=%d 的资源" % aid)
return jsonify(Result.ok())
else:
raise ServiceException("ID=%d 的成功不存在故不能执行删除操作..." % aid)
```
#### File: buter/util/OSUtil.py
```python
import sys
import platform
import os
def getOSInfo():
"""
获取操作系统信息
:return:
"""
return {
'system': platform.system(),
'version': platform.version(),
'machine': platform.machine(),
'platform': sys.platform,
'64Bit': sys.maxsize > 2 ** 32,
'cpu': platform.processor()
}
def getPythonInfo():
"""
获取 python 信息,返回 version、 compiler 两个属性
:return:
"""
return {
'version': platform.python_version(),
'compiler': platform.python_compiler()
}
def listDir(path, detail=False):
"""
获取某个目录下的所有目录、文件,如果 detail = True 则计算文件的大小
:param path:
:param detail:
:return: 返回 list ,元素为 { name, path, file, size }
"""
if not os.path.isdir(path):
return []
files = [
{
"name": f,
"path": os.path.join(path, f),
"file": os.path.isfile(os.path.join(path, f)),
"size": -1
}
for f in os.listdir(path)
]
if detail:
for f in [f for f in files if f['file']]:
f['size'] = os.path.getsize(f['path'])
return files
```
#### File: 0604hx/buter/config.py
```python
import os
import logging
import sys
ENCODING = "utf-8"
# 对于 windows 系统,docker 相关的配置有所不同
IS_WINDOWS = (sys.platform == 'win32')
#
# 如果是 pyinstaller 打包后的程序(单文件),需要判断 frozen 属性
# 详见:https://pyinstaller.readthedocs.io/en/stable/runtime-information.html
#
IS_PYINSTALLER = (getattr(sys, 'frozen', False) == True)
# 获取程序的根目录,后续可以设置 日志、数据库文件的 目录
BASE_DIR = os.path.dirname(sys.executable) \
if IS_PYINSTALLER \
else os.path.abspath(os.path.dirname(__file__))
SETTING_FILE = "setting"
# 如果是 pyinstaller 环境,则默认为 prod
env = os.getenv('FLASK_CONFIG') or ('prod' if IS_PYINSTALLER else 'default')
def getPath(name):
"""
获取当前执行脚本的文件
:param name:
:return:
"""
return os.path.join(BASE_DIR, name)
class BasicConfig:
USE_RELOADER = False
# default secret-key is md5("buter")
SECRET_KEY = os.environ.get('SECRET_KEY') or '<KEY>'
"""
是否使用 HTTPS 协议
如果为 true 则在启动 Server 时传递 ssl_context 参数,官方描述如下:
an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
HTTPS = None
'''
使用 本地 sqlite 数据库
如需要更换成到其他数据库:
MySQL : mysql://scott:tiger@localhost/foo
Oracle : oracle://scott:[email protected]:1521/sidname
更多配置请看 :http://docs.sqlalchemy.org/en/latest/core/engines.html
'''
SQLALCHEMY_DATABASE_URI = "sqlite:///"+getPath("buter.db")
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
DEBUG = True
'''
服务器相关
'''
# 默认端口为 5000
SERVER_PORT = 5000
# 如果只希望本机访问,则设置为 127.0.0.1
SERVER_HOST = '0.0.0.0'
SERVER_STATIC_DIR = getPath('static')
SERVER_INDEX_PAGE = "index.html"
'''
日志相关配置
'''
LOG_LEVEL = logging.DEBUG
LOG_FORMAT = '%(asctime)s %(levelname)s %(process)d - [%(threadName)s] %(filename)s (%(lineno)d) : %(message)s'
LOG_FILE = getPath("logs/buter") if IS_PYINSTALLER else "./logs/buter.log"
# 默认每天产生一个日志文件( S、M、D、W0-W6 分别代表了时间单位 秒、分、时、天、周)
LOG_FILE_WHEN = "D"
# 日志轮询的时间单位
LOG_FILE_INTERVAL = 1
# 默认保留15天内的日志
LOG_BACKUP = 15
LOG_ENCODING = 'utf-8'
'''
Docker 配置
'''
DOCKER_HOST = None
DOCKER_CERT_PATH = None
DOCKER_TLS_VERIFY = None
# 设置此值为 False 则不会连接到 Docker server
DOCKER_ABLE = True
# docker 连接超时, 单位为秒
DOCKER_TIMEOUT = 10
'''
定时任务
'''
JOBS = [
{
'id': 'checkDocker',
'func': 'buter.schedule.jobs:checkDocker',
'args': (),
'trigger': 'interval',
'seconds': 60
}
]
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(BasicConfig):
"""
开发环境下的配置,如果没有指定则作为默认配置
"""
# LOG_FILE = None
'''
Docker 配置
开发环境下使用的是 docker-toolbox 运行的 docker-server
'''
if IS_WINDOWS:
print("detected Buter running on windows, DOCKER configuration will set around this platform...\n")
DOCKER_HOST = "tcp://192.168.99.100:2376"
DOCKER_CERT_PATH = "C:\\Users\\Administrator\\.docker\\machine\\certs"
DOCKER_TLS_VERIFY = "1"
class TestingConfig(BasicConfig):
SQLALCHEMY_DATABASE_URI = "sqlite:///"+getPath("buter-test.db")
LOG_FILE = None
JOBS = None
class ProductionConfig(BasicConfig):
DEBUG = False
LOG_LEVEL = logging.INFO
configs = {
'dev': DevelopmentConfig,
'prod': ProductionConfig,
'testing': TestingConfig,
'default': DevelopmentConfig
}
def getConfig(name=None, customs=None):
config = configs[env if name is None else name]
'''
判断根目录下是否有 setting.py ,如果有则自动加载此文件
'''
setting_file = getPath(SETTING_FILE+".py")
if os.path.exists(setting_file):
print("detected setting.py exist, try to use it...")
sys.path.append(BASE_DIR)
customSettings = __import__(SETTING_FILE)
for s in [s for s in dir(customSettings) if not s.startswith("__")]:
value = customSettings.__getattribute__(s)
print("replace or setting {:25} to {}".format(s, value))
setattr(config, s, value)
if customs is not None and isinstance(customs, dict):
for s in [s for s in customs.keys() if not s.startswith("__")]:
value = customs.get(s)
print("replace or setting {:25} to {}".format(s, value))
setattr(config, s, value)
return config
```
#### File: tests/app/test_app_service.py
```python
import json
import unittest
from buter.app.services import load_from_file, detect_app_name
from buter.server import docker
from buter.util.Utils import unzip
from config import getConfig
class AppServiceTest(unittest.TestCase):
def setUp(self):
"""
这里只需要初始化 server.docker 对象
:return:
"""
config = getConfig('dev')
docker.setup(config)
def test_load_from_file(self):
load_from_file("G:/tidb.zip")
def test_load_image(self):
docker.loadImage("G:/tidb.tar")
def test_json_read(self):
with open("G:/app.json") as content:
app = json.load(content) # '{"name":"abc"}'
print(app)
docker.createContainer("pingcap/tidb", app['cmd'], app['args'])
def test_detect_app_name(self):
app = json.loads('{"image":"pingcap/tidb", "args":{"name":"tidb01"}}')
self.assertEqual("tidb", detect_app_name(None, app['image']))
self.assertEqual("tidb01", detect_app_name(app['args']))
self.assertEqual("tidb", detect_app_name("tidb"))
def test_unzip(self):
file_path = "G:/test/test.zip"
unzip(file_path, "G:/test")
def test_list_container(self):
containers = docker.listContainer()
print(containers)
for c in containers:
print("container: name={}, id={} ({}), labels={}, stat={}"
.format(c.name, c.id, c.short_id, c.labels, c.status))
print([{"name": c.name, "id": c.short_id, "labels": c.labels, "stat": c.status} for c in containers])
cs = dict((c.name, {"id": c.short_id, "labels": c.labels, "stat": c.status}) for c in containers)
print(cs)
if __name__ == '__main__':
unittest.main()
```
#### File: buter/tests/test_setting.py
```python
import unittest
from config import getConfig
class SettingTestCase(unittest.TestCase):
def test_something(self):
config = getConfig()
for k in [kk for kk in dir(config) if not kk.startswith("__")]:
print(k,"=", getattr(config, k))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "06372500/q",
"score": 3
} |
#### File: q/examples/mcs.py
```python
import Adafruit_DHT
import time
import sys
import httplib, urllib
import json
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
deviceId = "DYo1KV2e"
deviceKey = "<KEY>"
def post_to_mcs(payload):
headers = {"Content-type": "application/json", "deviceKey": deviceKey}
not_connected = 1
while (not_connected):
try:
conn = httplib.HTTPConnection("api.mediatek.com:80")
conn.connect()
not_connected = 0
except (httplib.HTTPException, socket.error) as ex:
print "Error: %s" % ex
time.sleep(10)
conn.request("POST", "/mcs/v2/devices/" + deviceId + "/datapoints", json.dumps(payload), headers)
response = conn.getresponse()
print( response.status, response.reason, json.dumps(payload), time.strftime("%c"))
data = response.read()
conn.close()
while 1:
humidity, temperature= Adafruit_DHT.read_retry(11, 4)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
payload = {"datapoints":[{"dataChnId":"Humidity","values":{"value":humidity}},
{"dataChnId":"Temperature","values":{"value":temperature}}]}
post_to_mcs(payload)
time.sleep(3)
else:
print('Failed to get reading. Try again!')
sys.exit(1)
``` |
{
"source": "06f7b1afdb2a4801b0dbde6635f227b7/thresh",
"score": 2
} |
#### File: thresh/thresh/tabular_file_container.py
```python
import sys
import json
import pprint
import keyword
import pathlib
import numpy as np
from collections import OrderedDict
class TabularFile:
"""
The basic representation of tabular files.
"""
def __init__(self, *, content=None, alias=None, name=None, namespace_only=False, length_check=True):
"""
A file, as represented in thresh, requires only two descriptors, the
alias and the data itself. As the data has headers and columns it only
seemed reasonable to store it as an OrderedDict of Numpy arrays.
self.name = str or None
self.alias = str or None
self.content = OrderedDict((
('Column1', np.array([ 0.0, 1.0, 2.0])),
('Column2', np.array([ 1.2, 1.1, 1.0])),
('Column3', np.array([-3.0, 1.4, 1.5])),
)) or None
"""
if not isinstance(namespace_only, bool):
raise TypeError(f"`namespace_only` must be of type bool, not {type(namespace_only)}.")
self.namespace_only = namespace_only
if not isinstance(length_check, bool):
raise TypeError(f"`length_check` must be of type bool, not {type(length_check)}.")
self.length_check = length_check
# Process 'content'. If 'None', initialize an empty OrderedDict
if content is None:
content = OrderedDict()
# Process 'alias'. Must be either 'str' or 'None'
if not isinstance(alias, str) and alias is not None:
raise TypeError(
"Variable 'alias' is not of type str or None: {0}".format(type(alias))
)
if isinstance(alias, str):
if keyword.iskeyword(alias):
raise SyntaxError(
"Alias can not be a python keyword. Got: {0}".format(repr(alias))
)
if not alias.isidentifier():
raise SyntaxError(
"Alias must be a valid python identifier. Got: {0}".format(repr(alias))
)
self.alias = alias
# Process 'name'. Must be either 'str' or 'None'
if not isinstance(name, str) and name is not None:
raise TypeError(
"Variable 'name' is not of type str or None: {0}".format(type(name))
)
self.name = name
# 'content' must be 'OrderedDict'
if not isinstance(content, OrderedDict):
raise TypeError(
"Variable 'content' is not an OrderedDict: {0}".format(repr(content))
)
# All the keys in 'content' must be 'str'
if not all([isinstance(_, str) for _ in content.keys()]):
raise KeyError(
"Variable 'content' has non-string key(s): {0}".format(
list(content.keys())
)
)
# All values in 'content' must have the same length.
if self.length_check and len(content) > 0 and len(set([len(_) for _ in content.values()])) != 1:
raise IndexError(
"arrays in 'content' have varying lengths: {0}".format(
[len(_) for _ in content.values()]
)
)
self.content = OrderedDict(content.items())
def list_headers(self):
"""
Print the list of headers and the header index of the TabularFile. The
header index starts at 1, not 0.
"""
try:
lines = [f"{'col':>4s} | {'length':>6s} | {'header':<s}"]
lines.append("-" * len(lines[0]))
for idx, key in enumerate(self.content.keys()):
lines.append(f"{idx: 4d} | {len(self.content[key]): 6d} | {key:s}")
except Exception as exc:
obj_types = [[str(key), type(val).__name__] for key, val in self.content.items()]
header_len = max([len(_[0]) for _ in obj_types]) + 1
type_len = max([len(_[1]) for _ in obj_types]) + 1
lines = [f"{'name':>{header_len}s} | {'type':>{type_len}s}"]
lines.append("-" * len(lines[0]))
for key, val in obj_types:
lines.append(f"{key:>{header_len}s} | {val:>{type_len}s}")
print("\n".join(lines))
def basic_list_headers(self):
"""
Print all the headers, one per line. This minimally-formatted option
enables looping over headers in bash for-loops and other scripting
fun.
"""
for key in self.content.keys():
print(key)
def as_text(self, *, delimiter=""):
"""
Compile the contents of the TabularFile and return as
text. This allows easy uniform printing to the terminal
or to a file.
"""
if not self.length_check:
return json.dumps(dict(self.content))
# Requres 17 digits to prefectly re-create a double in-memory.
n_chars_decimal = 17
len_biggest_number = len("+1." + n_chars_decimal * "0" + "e+301")
# Ensure that the columns are wide enough for the longest header.
len_biggest_header = max(map(len, self.content.keys()))
n_chars_per_column = max(len_biggest_number, len_biggest_header) + 1
strfmt = "{0:>" + str(n_chars_per_column) + "s}"
fltfmt = "{0:+" + str(n_chars_per_column) + "." + str(n_chars_decimal) + "e}"
lines = []
# Format the headers.
lines.append(delimiter.join(strfmt.format(_) for _ in self.content))
# Format the data lines
keys = list(self.content.keys())
for idx in range(len(self.content[keys[0]])):
lines.append(
delimiter.join(fltfmt.format(self.content[_][idx]) for _ in keys)
)
return "\n".join(lines) + "\n"
@classmethod
def format_if_history_file(cls, lines):
"""
Look to see if it's formatted like a history file. If it is, then
remove the comments at the top (if any) and then remove the two
horizontal rules above and below the headers.
Comments can go here
Comments can go here
---------
col1 col2
=========
1 2
3 4
"""
lines_lengths = [len(_) for _ in lines]
lines_sets = [set(_) for _ in lines]
# Look for two lines that are nothing but '-' or '='.
if {"-", "\n"} not in lines_sets or {"=", "\n"} not in lines_sets:
return lines
# We are looking to grab the last set of '-' and '=' rules.
N = len(lines_sets)
top_idx = (N - 1) - list(reversed(lines_sets)).index({"-", "\n"})
bottom_idx = (N - 1) - list(reversed(lines_sets)).index({"=", "\n"})
# Those two lines must have one line between them (where the
# headers go).
if bottom_idx - top_idx != 2:
return lines
# The lengths of the top rule, bottom rule, and headers must
# be the same.
if len(set(lines_lengths[top_idx : bottom_idx + 1])) != 1:
return lines
# It is a history file - remove the extra lines.
lines.pop(bottom_idx)
for idx in range(top_idx + 1):
lines.pop(0)
return lines
@classmethod
def from_file(cls, filename, alias=None):
"""
Read in a text-delimited or comma-delimited text file
and return the corresponding TabularFile object.
"""
# Convert the filename to a Path if it isn't already.
if isinstance(filename, str):
path_filename = pathlib.Path(filename)
elif isinstance(filename, pathlib.Path):
path_filename = filename
else:
raise TypeError(
f"Argument 'filename' must be str or Path, not {type(filename)}"
)
# Set the alias to None if it is not given
if alias is not None and not isinstance(alias, str):
raise TypeError(f"Argument 'alias' must be None or str, not {type(alias)}")
if path_filename.suffix.lower() == ".json":
if str(path_filename).lower() == "-.json":
json_data = json.load(sys.stdin)
else:
with open(path_filename, "r") as stream:
json_data = json.load(stream)
if not isinstance(json_data, dict):
raise TypeError(f"JSON data must be a dict, not {type(json_data)}.")
return cls(
content=OrderedDict(json_data),
alias=alias,
name=str(filename),
namespace_only=True,
length_check=False,
)
elif path_filename.suffix.lower() == ".csv":
# Comma delimited text
delimiter = ","
if str(path_filename).lower() == "-.csv":
lines = sys.stdin.readlines()
else:
with path_filename.open() as fobj:
lines = fobj.readlines()
else:
# whitespace delimited text.
delimiter = None
if str(path_filename) == "-":
lines = sys.stdin.readlines()
else:
with path_filename.open() as fobj:
lines = fobj.readlines()
lines = cls.format_if_history_file(lines)
head = lines[0].rstrip().split(delimiter)
head = [_.strip() for _ in head]
def can_convert_to_float(x):
try:
float(x)
except:
return False
return True
if all([can_convert_to_float(_) for _ in head]):
sys.stderr.write(
f"WARNING: No headers detected in '{filename}'. Using auto-generated ones.\n"
)
head = [f"column_{_:d}" for _ in range(len(head))]
# Verify that all headers are unique
if len(head) != len(set(head)):
raise KeyError(f"Non-unique headers detected in {path_filename}")
# Read the data
data = np.genfromtxt(lines, skip_header=1, unpack=True, delimiter=delimiter)
if len(data.shape) == 1:
# One column of data (1D). Need to make the array 2D.
data = np.array([data,])
# Put it together
content = OrderedDict(zip(head, data))
return cls(content=content, alias=alias, name=str(filename))
```
#### File: thresh/test/test_environment.py
```python
import sys
import itertools
import pathlib
from collections import OrderedDict
import numpy as np
import pytest
import basic_files
import thresh
def test_absolute_truth():
"""Ensure that the testing library is working."""
assert True
def test_capture_output(capsys):
"""Test that the capturing of stdout works."""
print("hello world")
out, err = capsys.readouterr()
assert out == "hello world\n"
assert err == ""
def test_require_python3():
"""The module 'thresh' and these tests require at least Python 3.0."""
assert sys.version_info > (3, 0)
def test_import():
"""Ensure that 'thresh' is imported."""
assert thresh is not None
def test_initialize():
"""Do something simple with 'thresh'."""
assert thresh.__version__ > (0, 0, 0)
#
# Initialize TabularFile Object
#
def test_initialize_TabularFile_no_content():
""" Do a basic initialization of a TabularFile without content. """
tabularfile = thresh.TabularFile()
assert tabularfile.alias is None
assert tabularfile.content == OrderedDict()
def test_initialize_TabularFile_no_alias(content_1):
""" Do a basic initialization of a TabularFile without an alias. """
tabularfile = thresh.TabularFile(content=content_1)
assert tabularfile.alias is None
assert tabularfile.content == content_1
def test_initialize_TabularFile_with_alias(content_1):
""" Do a basic initialization of a TabularFile. """
alias = "A"
tabularfile = thresh.TabularFile(content=content_1, alias=alias)
assert tabularfile.alias == alias
assert tabularfile.content == content_1
@pytest.mark.parametrize("bad_alias", [3.14, ".foo", "fizz.buzz", "1baz", "hello*world", ""])
def test_initialize_TabularFile_with_bad_alias(content_1, bad_alias):
""" TabularFile initialization with bad alias - not str. """
if isinstance(bad_alias, str):
with pytest.raises(SyntaxError):
thresh.TabularFile(content=content_1, alias=bad_alias)
else:
with pytest.raises(TypeError):
thresh.TabularFile(content=content_1, alias=bad_alias)
def test_initialize_TabularFile_with_bad_content_1():
""" TabularFile initialization with bad content - not OrderedDict. """
with pytest.raises(TypeError):
thresh.TabularFile(content=3.14)
def test_initialize_TabularFile_with_bad_content_2(content_1):
""" TabularFile initialization with bad content - non-text key. """
content_1[3.14] = content_1['a']
with pytest.raises(KeyError):
thresh.TabularFile(content=content_1)
def test_initialize_TabularFile_with_bad_content_3(content_1):
""" TabularFile initialization with bad content - uneven column lengths. """
content_1['a'] = np.append(content_1['a'], content_1['a'])
with pytest.raises(IndexError):
thresh.TabularFile(content=content_1)
#
# TabularFile.list_headers()
#
def test_list_headers_default(capsys, tabularfile_2):
""" Check the default behavior of the list_headers() function. """
tabularfile_2.list_headers()
out, err = capsys.readouterr()
assert out == """ col | length | header
----------------------
0 | 4 | time
1 | 4 | strain
2 | 4 | stress
"""
assert err == ""
def test_list_headers_json(capsys, jsonfile_1):
""" Check the default behavior of the list_headers() function. """
jsonfile_1.list_headers()
out, err = capsys.readouterr()
assert out == """name | type\n-----------\n bar | int\n foo | int\n"""
assert err == ""
#
# TabularFile.basic_list_headers()
#
def test_basic_list_headers_default(capsys, tabularfile_2):
""" Check the default behavior of the basic_list_headers() function. """
tabularfile_2.basic_list_headers()
out, err = capsys.readouterr()
assert out == """time
strain
stress
"""
assert err == ""
#
# TabularFile.as_text()
#
def test_as_text_default(tabularfile_3):
""" Verifies the conversion to text with default delimiter. """
txt = tabularfile_3.as_text()
print("comp", repr(txt))
assert txt == ' var1 var2\n +1.57079632679489656e+00 +1.11111111111111105e-01\n +3.14159265358979312e+00 +2.22222222222222210e-01\n +4.71238898038468967e+00 +3.33333333333333315e-01\n'
def test_as_text_whitespace_delimiter(tabularfile_3):
""" Verifies the conversion to text with whitespace delimiter. """
txt = tabularfile_3.as_text(delimiter='')
print("comp", repr(txt))
assert txt == ' var1 var2\n +1.57079632679489656e+00 +1.11111111111111105e-01\n +3.14159265358979312e+00 +2.22222222222222210e-01\n +4.71238898038468967e+00 +3.33333333333333315e-01\n'
def test_as_text_comma_delimiter(tabularfile_3):
""" Verifies the conversion to text with comma delimiter. """
txt = tabularfile_3.as_text(delimiter=',')
print("comp", repr(txt))
assert txt == ' var1, var2\n +1.57079632679489656e+00, +1.11111111111111105e-01\n +3.14159265358979312e+00, +2.22222222222222210e-01\n +4.71238898038468967e+00, +3.33333333333333315e-01\n'
#
# TabularFile.from_file()
#
@pytest.mark.parametrize("thresh_file", [_ for _ in basic_files.base_files if _.startswith("pass_")])
@pytest.mark.parametrize("do_path", [True, False])
def test_from_file_string(thresh_file, do_path):
""" Check that the TabularFile.from_file() function behaves properly. """
solution_content = basic_files.base_files[thresh_file][1]
# Do every test with pathlib and without
file_obj = pathlib.Path(thresh_file) if do_path else thresh_file
pathlib.Path(thresh_file).write_text(basic_files.base_files[thresh_file][0])
tf_obj = thresh.TabularFile.from_file(file_obj)
print("tf_obj.content", tf_obj.content)
for key in tf_obj.content:
assert np.allclose(tf_obj.content[key], solution_content[key],
atol=1.0e-12, rtol=1.0e-12)
def test_from_file_fail_nonunique_headers(thresh_files):
""" Test the TabularFile.from_file() for non-unique headers. """
filename = thresh_files["fail_nonunique_headers.txt"]
with pytest.raises(KeyError):
thresh.TabularFile.from_file(filename)
def test_from_file_fail_unknown_filename_input():
""" Test the TabularFile.from_file() for unknown filename input. """
with pytest.raises(TypeError):
thresh.TabularFile.from_file(lambda x: x+1)
def test_from_file_fail_file_not_found():
""" Test the TabularFile.from_file() for nonexistant files. """
with pytest.raises(FileNotFoundError):
thresh.TabularFile.from_file("c75fc775d1439c3f3d9212d5c813b594.txt")
```
#### File: thresh/test/test_tabular_file_container.py
```python
import copy
import pytest
import thresh
db_good = [
{
"name": "with comments",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----\n",
" a b\n",
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "without comments",
"lines": [
"-----\n",
" a b\n",
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "with rules in comments",
"lines": [
"-----------------\n",
"this is a comment\n",
"=================\n",
"this is a comment\n",
"-----\n",
" a b\n",
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
]
@pytest.mark.parametrize("db", db_good, ids=[_["name"] for _ in db_good])
def test_format_if_history_file_good(db):
"""
This covers all the instances where we expect the formatting function
will succeed because the strict formatting rules for a history file
have been met.
"""
lines = db["lines"]
out = thresh.TabularFile.format_if_history_file(copy.deepcopy(lines))
# make sure that it doesn't care about newlines at the end.
out[-1] = out[-1].rstrip("\n")
assert out == [" a b\n", " 1 2\n", " 3 4"]
db_bad = [
{
"name": "no rules - not history file",
"lines": [
" a b\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "missing '-' line",
"lines": [
"this is a comment\n",
"this is a comment\n",
" a b\n",
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "missing '=' line",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----n",
" a b\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "too many header lines",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----n",
" a b\n",
" a b\n",
"=====n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "too few header lines",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----\n",
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "upper rule too long",
"lines": [
"this is a comment\n",
"this is a comment\n",
"------\n",
" a b\n"
"=====n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "upper rule too short",
"lines": [
"this is a comment\n",
"this is a comment\n",
"----\n",
" a b\n"
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "lower rule too long",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----\n",
" a b\n"
"======\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "lower rule too short",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----\n",
" a b\n"
"====\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "header line too long",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----\n",
" a b\n"
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
{
"name": "header line too short",
"lines": [
"this is a comment\n",
"this is a comment\n",
"-----\n",
" a b\n"
"=====\n",
" 1 2\n",
" 3 4\n",
],
},
]
@pytest.mark.parametrize("db", db_bad, ids=[_["name"] for _ in db_bad])
def test_format_if_history_file_bad(db):
"""
This covers all the instances where we expect the formatting function
will not do anything because the strict formatting required for the
history file is not met.
"""
lines = db["lines"]
out = thresh.TabularFile.format_if_history_file(copy.deepcopy(lines))
assert lines == out
``` |
{
"source": "06keito/study-atcoder",
"score": 3
} |
#### File: study-atcoder/src/abc175_b.py
```python
def flag(a,b,c):
if a+b>c and b+c>a and c+a>b and a!=b and b!=c and a!=c:
return True
return False
N = int(input())
L = list(map(int,input().split()))
count = 0
for i in range(0,N-2,1):
for j in range(i+1,N-1,1):
for k in range(j+1,N,1):
if flag(L[i],L[j],L[k])==True:
count += 1
print(count)
```
#### File: study-atcoder/src/abc197_b.py
```python
def check(N,S):
if S=='.':
return N+1,False
else:
return N,True
def main():
H,W,X,Y = map(int,input().split())
S = [list(map(str,input())) for i in range(H)]
X -= 1
Y -= 1
ans = -3
for i in range(X,H,1):
ans,flag = check(ans,S[i][Y])
if flag: break
for i in range(X,-1,-1):
ans,flag = check(ans,S[i][Y])
if flag: break
for j in range(Y,W,1):
ans,flag = check(ans,S[X][j])
if flag: break
for j in range(Y,-1,-1):
ans,flag = check(ans,S[X][j])
if flag: break
print(ans)
if __name__ == '__main__':
main()
```
#### File: study-atcoder/src/abc197_c.py
```python
def main():
N = int(input())
array = list(map(int,input().split()))
ans = 10**9+7
if N==1:
print(array[0])
exit()
for i in range(2**(N-1)):
base = 0
or_value = array[0]
for j in range(1,N):
if (i>>(j-1)) & 1:
base ^= or_value
or_value = 0
or_value |= array[j]
else:
or_value |= array[j]
base ^= or_value
ans = min(ans,base)
print(ans)
if __name__ == '__main__':
main()
```
#### File: study-atcoder/src/abc199_c.py
```python
def main():
N = int(input())
S = list(input())
pre,post = S[:N],S[N:]
Q = int(input())
for i in range(Q):
T,A,B = map(int,input().split())
A,B = A-1,B-1 #index処理対策
if T==1: #A<Bが条件となっている
if B<N: #前半部だけのswap処理
pre[A],pre[B] = pre[B],pre[A]
elif N<=A: #後半部のswap処理
post[A-N],post[B-N] = post[B-N],post[A-N]
else: #A<=N<Bの処理
pre[A],post[B-N] = post[B-N],pre[A]
else: #T==2は前半部と後半部をswapさせるだけで良い
pre,post = post,pre
print(''.join(pre+post))
if __name__ == '__main__':
main()
```
#### File: study-atcoder/src/abc200_c.py
```python
import collections
from scipy.special import comb
def nCr(n,r):
return comb(n, r, exact=True)
N = int(input())
A = list(map(int,input().split()))
A = list(map(lambda x:x%200,A))
C = collections.Counter(A)
ans = 0
for key,value in C.items():
if value>1:
ans += nCr(value,2)
print(ans)
```
#### File: study-atcoder/src/abc201_c.py
```python
def main():
S = str(input())
ans = 0
for i in range(10000):
array = [False]*10
now = i
for j in range(4):
array[now%10] = True
now //= 10
N = 1
for j in range(10):
if (S[j] == "o" and not array[j]) or (S[j] == 'x' and array[j]):
N = 0
ans += N
print(ans)
if __name__ == "__main__":
main()
```
#### File: study-atcoder/src/abc217_d.py
```python
import bisect
import array
def main():
L,Q = map(int,input().split())
separator = array.array('i',[0,L])
for _ in range(Q):
c,x = map(int,input().split())
y = bisect.bisect(separator,x)
if c==1:
separator.insert(y,x)
else:
print(separator[y]-separator[y-1])
if __name__ == '__main__':
main()
```
#### File: study-atcoder/src/abc220_c.py
```python
import math
def main():
N = int(input())
A = list(map(int,input().split()))
X = int(input())
value = (X//sum(A))*sum(A)
ans = (X//sum(A))*N
for i in A:
value += i
ans += 1
if value>X:
print(ans)
break
if __name__ == '__main__':
main()
```
#### File: study-atcoder/src/hitachi2020_b.py
```python
def main():
A,B,M = map(int,input().split())
A_prise = list(map(int,input().split()))
B_prise = list(map(int,input().split()))
Most_low_prise = min(A_prise)+min(B_prise)
for i in range(M):
x,y,c = map(int,input().split())
Post_coupon_orientation_prise = A_prise[x-1]+B_prise[y-1]-c
Most_low_prise = min(Most_low_prise,Post_coupon_orientation_prise)
print(Most_low_prise)
if __name__ == '__main__':
main()
``` |
{
"source": "06needhamt/intellij-community",
"score": 2
} |
#### File: transport/framed/__init__.py
```python
from __future__ import absolute_import
import struct
from io import BytesIO
from _shaded_thriftpy._compat import CYTHON
from ..base import TTransportBase, readall
from ..buffered import TBufferedTransport
class TFramedTransport(TTransportBase):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans):
self._trans = trans
self._rbuf = BytesIO()
self._wbuf = BytesIO()
def is_open(self):
return self._trans.is_open()
def open(self):
return self._trans.open()
def close(self):
return self._trans.close()
def read(self, sz):
# Important: don't attempt to read the next frame if the caller
# doesn't actually need any data.
if sz == 0:
return b''
ret = self._rbuf.read(sz)
if len(ret) != 0:
return ret
self.read_frame()
return self._rbuf.read(sz)
def read_frame(self):
buff = readall(self._trans.read, 4)
sz, = struct.unpack('!i', buff)
frame = readall(self._trans.read, sz)
self._rbuf = BytesIO(frame)
def write(self, buf):
self._wbuf.write(buf)
def flush(self):
# reset wbuf before write/flush to preserve state on underlying failure
out = self._wbuf.getvalue()
self._wbuf = BytesIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive
# copies
self._trans.write(struct.pack("!i", len(out)) + out)
self._trans.flush()
def getvalue(self):
return self._trans.getvalue()
class TFramedTransportFactory(object):
def get_transport(self, trans):
return TBufferedTransport(TFramedTransport(trans))
if CYTHON:
from .cyframed import TCyFramedTransport, TCyFramedTransportFactory # noqa
```
#### File: testData/copyPaste/IndentTabIncrease.dst.py
```python
print("Line 2")
class Test:
def __init__(self):
<caret>
print("Line 1")
```
#### File: testData/folding/stringPrefixFolding.py
```python
def foo():<fold text='...'>
<fold text='r"""..."""'>r"""
foo/bar
"""</fold>
pass</fold>
mline = <fold text='u"..."'>u"\n\n\n\n\n\n\n\n" \
"\n\n\n\n\n\n\n\"</fold>
```
#### File: testData/formatter/comment_after.py
```python
def foo(a):
if a == 5:
# a is 5
print('no')
foo(5)
```
#### File: testData/formatter/forceNewLineAfterLeftParenInMethodParameters_after.py
```python
def f1(
first_argument,
second_argument):
pass
def f2(first_argument, second_argument):
pass
def f3(
first_argument,
second_argument
):
pass
```
#### File: testData/formatter/tupleAssignment.py
```python
def bad_autoformat_example():
a = 5
b = 10
print(a, b)
(a, b) = b, a
print(a, b)
a, b = b, a
print(a, b)
```
#### File: PyTypeCheckerInspection/ModuleTypeParameter/a.py
```python
import module
from types import ModuleType
def foo(m: ModuleType):
pass
def bar(m):
return m.__name__
foo(module)
bar(module)
```
#### File: inspections/PyTypeCheckerInspection/OptionalOfBoundTypeVarInWarnings.py
```python
from typing import Optional, TypeVar
T = TypeVar('T', int)
def expects_int_subclass_or_none(x: Optional[T]):
pass
expects_int_subclass_or_none(<warning descr="Expected type 'Any | None' (matched generic type 'T | None'), got 'str' instead">'foo'</warning>)
```
#### File: inspections/PyUnboundLocalVariableInspection/NameDefinedInCaseClauseBodyUsedAfterMatchStatement.py
```python
def func(x):
match x:
case 42:
y = 'foo'
print(<warning descr="Local variable 'y' might be referenced before assignment">y</warning>)
```
#### File: inspections/PyUnboundLocalVariableInspection/OrPatternAlternativesDefineDifferentNames.py
```python
def func(x):
match x:
case [1, y] | [2, z]:
print(<warning descr="Local variable 'y' might be referenced before assignment">y</warning>, <warning descr="Local variable 'z' might be referenced before assignment">z</warning>)
```
#### File: inspections/PyUnusedLocalInspection/allBindingsOfSameNameInOrPatternConsideredUsed.py
```python
def func(x):
match x:
case (1 as y) | (2 as y):
print(y)
```
#### File: intentions/PyAnnotateTypesIntentionTest/functionParameterTypeAnnotationsNotChanged.py
```python
def fo<caret>o(x: bool, y: bool):
return "42"
```
#### File: intentions/PyAnnotateVariableTypeIntentionTest/annotationCallableType_after.py
```python
from typing import Callable, Any
def func(x):
pass
var: [Callable[[Any], None]] = func
```
#### File: testData/multipleArgumentsCompletion/notSuggestIfNotEnoughArgumentsInTheScopeOfFunction.py
```python
def foo(x, y, z):
pass
z = 33
def bar():
x = 42
y = 100500
foo(<caret>)
```
#### File: testData/multipleArgumentsCompletion/notSuggestPositionalContainer.py
```python
def foo(x, *y):
pass
x = 42
y = 22
foo(<caret>
```
#### File: testData/multipleArgumentsCompletion/slashAndSingleStarParameter.after.py
```python
def foo(a, /, b, *, c):
print(a, b, c)
def egg():
a = 1
b = 2
c = 3
foo(a, b, c=c)<caret>
```
#### File: PyAddImportQuickFixTest/localFromImportForCommonPackageAlias/main.py
```python
def main():
<error descr="Unresolved reference 'plt'">p<caret>lt</error>.plot
```
#### File: quickFixes/PyMakeFunctionFromMethodQuickFixTest/usageSelf.py
```python
class A():
def method(self):
self.method2()
def metho<caret>d2(self):
print(1)
```
#### File: quickFixes/PyRemoveStatementQuickFixTest/lastStatement_after.py
```python
class B(object):
def __init__(self): # error
print("")
```
#### File: quickFixes/PyRemoveUnusedLocalQuickFixTest/removeChainedAssignmentStatementUnpackingFirstTarget_after.py
```python
def f():
a = _, b = 42, 42
return a, b
```
#### File: quickFixes/PyReplaceWithOldStyleUnionQuickFixTest/bitwiseOrUnionReplacedByOldSyntaxInReturn.py
```python
def foo() -> <warning descr="Python versions 2.7, 3.5, 3.6, 3.7, 3.8, 3.9 do not allow writing union types as X | Y"><caret>int | str</warning>:
return 42
```
#### File: quickFixes/PyReplaceWithOldStyleUnionQuickFixTest/bitwiseOrUnionReplacedByOldSyntaxIntNoneWithOptional_after.py
```python
from typing import Optional
def foo() -> Opt<caret>ional[int]:
return 42
```
#### File: quickFixes/PyReplaceWithOldStyleUnionQuickFixTest/bitwiseOrUnionReplacedByOldSyntaxNoneIntWithOptional_after.py
```python
from typing import Optional
def foo() -> Optional[int]<caret>:
return 42
```
#### File: refactoring/extractsuperclass/classPropertyDependsOnMethod.after.py
```python
from abc import ABCMeta, abstractmethod
class Spam(metaclass=ABCMeta):
@abstractmethod
def __add__(self, other):
pass
__radd__ = __add__
class C(Spam):
def __add__(self, other):
pass
#
#
```
#### File: refactoring/extractsuperclass/noClassCastExceptionInCopiedFunctionWithClassInitAndMethodCall.after.py
```python
class Foo:
def foo(self):
pass
class Bar:
@staticmethod
def baz():
foo = Foo()
foo.foo()
class Baz(Bar):
pass
```
#### File: testData/resolve/NoResolveInstanceAttrBelow.py
```python
class C:
def f(self):
x = self.foo
# <ref>
self.foo = 1
```
#### File: selectWord/word/after6.py
```python
import os
<selection>def f(arg):
print(a<caret>rg)
print("a")</selection>
``` |
{
"source": "070411209/GAAS-Object-Tracking",
"score": 3
} |
#### File: goturn/tracker/tracker_manager.py
```python
import cv2
class tracker_manager:
"""Docstring for tracker_manager. """
def __init__(self, init_rect, first_frame, regressor, tracker):
"""This is
:videos: list of video frames and annotations
:regressor: regressor object
:tracker: tracker object
:returns: list of video sub directories
"""
self.tracker = tracker
self.regressor = regressor
self.tracker.init(first_frame, init_rect, self.regressor)
def track_frame(self, frame):
obj_tracker = self.tracker
obj_regressor = self.regressor
bbox = obj_tracker.track(frame, obj_regressor)
return bbox
```
#### File: pysot/models/model_builder.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import torch
import torch.nn as nn
import torch.nn.functional as F
from pysot.core.config import cfg
class ModelBuilder(nn.Module):
def __init__(self):
super(ModelBuilder, self).__init__()
# build backbone
module_name, cls_name = cfg.BACKBONE.TYPE.rsplit('.', 1)
module = importlib.import_module(module_name)
if cls_name.startswith('alexnet'):
self.backbone = getattr(module, cls_name)(width_mult=cfg.BACKBONE.WIDTH_MULT)
elif cls_name.startswith('mobile'):
self.backbone = getattr(module, cls_name)(width_mult=cfg.BACKBONE.WIDTH_MULT,
used_layers=cfg.BACKBONE.LAYERS)
else:
self.backbone = getattr(module, cls_name)(used_layers=cfg.BACKBONE.LAYERS)
# build adjust layer
if cfg.ADJUST.ADJUST:
module_name, cls_name = cfg.ADJUST.TYPE.rsplit('.', 1)
module = importlib.import_module(module_name)
module = getattr(module, cls_name)
self.neck = module(cfg.BACKBONE.CHANNELS, cfg.ADJUST.ADJUST_CHANNEL)
# build rpn head
module_name, cls_name = cfg.RPN.TYPE.rsplit('.', 1)
module = importlib.import_module(module_name)
cls = getattr(module, cls_name)
if cfg.ADJUST.ADJUST:
channels = cfg.ADJUST.ADJUST_CHANNEL
else:
channels = cfg.BACKBONE.CHANNELS
if len(channels) == 1:
channels = channels[0]
if cfg.RPN.WEIGHTED:
self.rpn_head = cls(cfg.ANCHOR.ANCHOR_NUM, channels, True)
else:
self.rpn_head = cls(cfg.ANCHOR.ANCHOR_NUM, channels)
# build mask head
if cfg.MASK.MASK:
module_name, cls_name = cfg.MASK.MASK_TYPE.rsplit('.', 1)
module = importlib.import_module(module_name)
cls = getattr(module, cls_name)
self.mask_head = cls(cfg.ADJUST.ADJUST_CHANNEL[0],
cfg.ADJUST.ADJUST_CHANNEL[0],
cfg.MASK.OUT_CHANNELS)
if cfg.MASK.REFINE:
module_name, cls_name = cfg.MASK.REFINE_TYPE.rsplit('.', 1)
module = importlib.import_module(module_name)
cls = getattr(module, cls_name)
self.refine_head = cls()
def template(self, z):
zf = self.backbone(z)
if cfg.MASK.MASK:
zf = zf[-1]
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
self.zf = zf
def track(self, x):
xf = self.backbone(x)
if cfg.MASK.MASK:
self.xf = xf[:-1]
xf = xf[-1]
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
cls, loc = self.rpn_head(self.zf, xf)
if cfg.MASK.MASK:
mask, self.mask_corr_feature = self.mask_head(self.zf, xf)
return {
'cls': cls,
'loc': loc,
'mask': mask if cfg.MASK.MASK else None
}
def mask_refine(self, pos):
return self.refine_head(self.xf, self.mask_corr_feature, pos)
```
#### File: models/neck/neck.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch.nn as nn
class AdjustLayer(nn.Module):
def __init__(self, inplane, outplane):
super(AdjustLayer, self).__init__()
self.downsample = nn.Sequential(
nn.Conv2d(inplane, outplane, kernel_size=1, bias=False),
nn.BatchNorm2d(outplane),
)
def forward(self, x):
x = self.downsample(x)
if x.size(3) < 20:
l = 4
r = l + 7
x = x[:, :, l:r, l:r]
return x
class AdjustAllLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(AdjustAllLayer, self).__init__()
self.num = len(out_channels)
if self.num == 1:
self.downsample = AdjustLayer(in_channels[0], out_channels[0])
else:
for i in range(self.num):
self.add_module('downsample'+str(i+2),
AdjustLayer(in_channels[i], out_channels[i]))
def forward(self, features):
if self.num == 1:
return self.downsample(features)
else:
out = []
for i in range(self.num):
adj_layer = getattr(self, 'downsample'+str(i+2))
out.append(adj_layer(features[i]))
return out
``` |
{
"source": "070411209/mono-vo",
"score": 3
} |
#### File: mono-vo/python/ch4.py
```python
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_iris
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.tree import DecisionTreeRegressor
def A():
# 准备数据集
iris=load_iris()
# 获取特征集和分类标识
features = iris.data
labels = iris.target
# 随机抽取33%的数据作为测试集,其余为训练集
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size=0.33, random_state=0)
# 创建CART分类树
clf = DecisionTreeClassifier(criterion='gini')
# 拟合构造CART分类树
clf = clf.fit(train_features, train_labels)
# 用CART分类树做预测
test_predict = clf.predict(test_features)
# 预测结果与测试集结果作比对
score = accuracy_score(test_labels, test_predict)
print("CART分类树准确率 %.4lf" % score)
def B():
# 准备数据集
boston=load_boston()
# 探索数据
print(boston.feature_names)
# 获取特征集和房价
features = boston.data
prices = boston.target
# 随机抽取33%的数据作为测试集,其余为训练集
train_features, test_features, train_price, test_price = train_test_split(features, prices, test_size=0.33)
# 创建CART回归树
dtr=DecisionTreeRegressor()
# 拟合构造CART回归树
dtr.fit(train_features, train_price)
# 预测测试集中的房价
predict_price = dtr.predict(test_features)
# 测试集的结果评价
print('回归树二乘偏差均值:', mean_squared_error(test_price, predict_price))
print('回归树绝对值偏差均值:', mean_absolute_error(test_price, predict_price))
if __name__ == "__main__":
# execute only if run as a script
B()
``` |
{
"source": "07231985/3partition",
"score": 3
} |
#### File: 07231985/3partition/3partition_complete_greedy_algorithm.py
```python
set_of_numbers = [20, 23, 25, 30, 49, 45, 27, 30, 30, 40, 23, 18, 55,
35, 0, 89, 1, 0, 45, 40, 5, 0, -1, 91, -89, 179, 0, ]
# set_of_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# set_of_numbers = [9, 8, 7, 6, 5, 4, 3, 2, 1]
# set_of_numbers = [13, 10, 8, 1, 1, 1] # 13 has no match
# set_of_numbers = [9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1,
# 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1,
# 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, ]
# set_of_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9,
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9,
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]
# set_of_numbers = [9, 5, 3, 6, 7, 1, 2, 4, 8]
# set_of_numbers = [0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1, ]
# number of steps test sets (Section 14):
# section 14.a
# set_of_numbers = [9, 8, 7, 6, 5, 4, 3, 2, 1]
# section 14.b
# set_of_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, ]
# section 14.c
# set_of_numbers = [9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1,
# 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1,
# 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, ]
# section 14.d
# set_of_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9,
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9,
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]
# section 14.e
# set_of_numbers = [20, 23, 25, 30, 49, 45, 27, 30, 30, 40, 23, 18, 55,
# 35, 0, 89, 1, 0, 45, 40, 5, 0, -1, 91, -89, 179, 0, ]
# section 14.f
# set_of_numbers = [13, 10, 8, 1, 1, 1] # 13 has no match
# section 14.g
# set_of_numbers = [0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1, 0, 7, 2, 3, 1, 9, 8, 5, 1,
# 0, 7, 2, 3, 1, 9, 8, 5, 1]
# section 14.h
# set_of_numbers = [9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
# 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
# 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
# 7, 7, 7, 7, 7, 7, 7, 7, 7, 5, 5, 5, 5, 5, 5, 5, 5, 5,
# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3,
# 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2,
# 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
# 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0]
# section 14.i
# set_of_numbers = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
# 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
# 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5,
# 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
# 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
# 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
# 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
# 9, 9, 9, 9, 9, 9, 9, 9, 9]
original_set_numbers = set_of_numbers.copy()
new_set_numbers = set_of_numbers.copy()
amount_each_subset = -1
break_all_loops = False
count_steps = 0
number_of_subsets = 0
collect_subsets = []
def count_total_steps():
global count_steps
count_steps += 1
def sum_of_set():
# ========= count total steps for each function and loop
count_total_steps()
# ============================
print(">>>>>>>>>> Step: Sum of sets <<<<<<<<<<\n")
sum_nums = sum(set_of_numbers)
print(f"Number of subsets: {number_of_subsets}")
if sum_nums % number_of_subsets == 0:
global amount_each_subset
amount_each_subset = sum_nums // number_of_subsets
print(f"Total sum = {sum_nums}")
print(f"Amount each subset = {amount_each_subset}")
else:
print(f"{sum_nums} is not divisible by {number_of_subsets}")
def can_be_split_into_three():
# ========= count total steps for each function and loop
count_total_steps()
# ============================
print(">>>>>>>>>> Step: Split into three <<<<<<<<<<")
if len(new_set_numbers) % 3 == 0:
global number_of_subsets
number_of_subsets = len(new_set_numbers) // 3
# ========== function sum of sets
sum_of_set()
# =======================
else:
print("================ Error =================")
print("The total number of items can't be divided into 3")
# ***********************************************************************************************
# *************************************** Start . Program ***************************************
# ***********************************************************************************************
can_be_split_into_three()
if amount_each_subset > -1:
print(f"\noriginal set: {original_set_numbers}")
print(f"original set length is {len(original_set_numbers)}")
new_set_numbers_x = new_set_numbers.copy()
for x_index in range(len(new_set_numbers) - 2):
x = new_set_numbers[x_index]
new_set_numbers_x.remove(new_set_numbers[x_index])
new_set_numbers_y = new_set_numbers_x.copy()
print(f"\n=========================== start program with x: {x}")
while len(new_set_numbers_y) > 1:
print(f"\ny subset length is {len(new_set_numbers_y)}")
print(f"y subset = {new_set_numbers_y}")
y = new_set_numbers_y[0]
print(f"\n=========================== start program with x: {x} and y: {y}")
new_set_numbers_y.remove(new_set_numbers_y[0])
for z in new_set_numbers_y:
# ========= count total steps
count_total_steps()
# ============================
print(f"combination: x: {x} y:{y} z:{z}")
combination_sum = x + y + z
if combination_sum == amount_each_subset:
collect_combination = [x, y, z]
print("\n**************************** ")
print(f"match found: sum of combination {collect_combination} = {amount_each_subset}")
print("**************************** \n")
collect_subsets.append(collect_combination)
print("\n*************************************** Results ***************************************")
print(f"original set: {original_set_numbers}")
print(f"total number of elements inside the set: {len(original_set_numbers)}")
print(f"\nnumber of subsets: {number_of_subsets}")
print(f"Total sum of the entire set: {amount_each_subset * number_of_subsets}")
print(f"The amount for each subset is: {amount_each_subset}")
print(f"\ncollected list of all possible subsets: {collect_subsets}")
print(f"Total number of all possible subsets collected: {len(collect_subsets)}")
print(f"\nTotal steps for finding all possible subsets: {count_steps}")
print("***************************************************************************************")
# ***********************************************************************************************
# *************************************** Finding Subsets ***************************************
# ***********************************************************************************************
print("\n\n=================== start looking for subsets")
set_numbers_complete = set_of_numbers.copy()
loop_count_for = 0
collect_sets = []
count_steps_finding_subset = 0
set_x = collect_subsets.copy()
for x in collect_subsets:
loop_count_for += 1
print(f"\n=================== starting subsets program with the for loop: {loop_count_for}")
find_subsets = x.copy()
set_x.remove(find_subsets)
set_numbers_complete = set_of_numbers.copy()
set_numbers_complete.remove(find_subsets[0])
set_numbers_complete.remove(find_subsets[1])
set_numbers_complete.remove(find_subsets[2])
collect_sets.append(find_subsets)
for y in set_x:
find_y_subsets = y.copy()
if len(set_numbers_complete) > 0:
count_total_steps()
count_steps_finding_subset += 1
# print(f"available subset: {find_y_subsets} and the remaining set = {set_numbers_complete}")
if find_y_subsets[0] in set_numbers_complete:
set_numbers_complete.remove(find_y_subsets[0])
if find_y_subsets[1] in set_numbers_complete:
set_numbers_complete.remove(find_y_subsets[1])
if find_y_subsets[2] in set_numbers_complete:
set_numbers_complete.remove(find_y_subsets[2])
collect_sets.append(find_y_subsets)
# print("\n**************************** ")
# print(f"match found: removing combination {find_y_subsets} new set: {set_numbers_complete}")
# print("**************************** \n")
else:
set_numbers_complete.append(find_y_subsets[0])
set_numbers_complete.append(find_y_subsets[1])
else:
set_numbers_complete.append(find_y_subsets[0])
else:
break
if len(set_numbers_complete) == 0:
break
else:
collect_sets = []
# print("\n**************************** ")
# print(f"resetting collected sets: {collect_sets}")
# print("**************************** \n")
print("\n*************************************** Finding Subsets Output ****************************************")
print(f"found possible subsets: {collect_subsets}")
print(f"\nThe complete list of subsets: {collect_sets}")
print(f"total steps for finding the complete list of subsets only: {count_steps_finding_subset}")
print(f"\ntotal steps for finding all possible subsets: {count_steps - count_steps_finding_subset}")
print(f"total steps for finding all possible subsets + finding the {number_of_subsets} subset(s): {count_steps}")
print("*******************************************************************************************************")
``` |
{
"source": "0724654276/MusicFlow-App",
"score": 3
} |
#### File: MusicFlow-App/app/views.py
```python
from flask import render_template
from app import app
from app.request import get_music
# Views
@app.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting popular music
popular_music = get_music('popular')
print(popular_music)
title = 'Home - Welcome to The best Music Review Website Online'
return render_template('index.html', title = title,popular = popular_music)
@app.route('/music/<int:music_id>')
def music(music_id):
'''
View music page function that returns the music details page and its data
'''
return render_template('music.html',id = music_id)
``` |
{
"source": "0724654276/Myblog",
"score": 2
} |
#### File: app/main/views.py
```python
from flask import render_template,request,redirect,url_for,abort,flash
from . import main
from ..requests import get_quotes
from ..models import User, Blog,Comment
from .forms import UpdateProfileForm,UploadBlogForm,CommentsForm
from .. import db,photos
from flask_login import login_required,current_user
#Views
@main.route("/")
def index():
#Getting our quotes
quotes = get_quotes()
blog = Blog.query.filter_by().all()
return render_template("index.html",quotes=quotes,blog=blog)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
blogs = Blog.query.filter_by(user_id=current_user.id).all()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user,blogs=blogs)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
form = UpdateProfileForm()
if user is None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/user/<uname>/upload_blog',methods = ['POST','GET'])
@login_required
def upload_blog(uname):
user = User.query.filter_by(username = uname ).first()
form = UploadBlogForm()
if user is None:
abort(404)
if form.validate_on_submit():
title = form.title.data
blog = form.blog.data
user_id = current_user._get_current_object().id
new_blog_object = Blog(title=title,blog=blog,user_id=user_id)
new_blog_object.save_blog()
return redirect(url_for('main.index'))
return render_template('new_blog.html', form = form)
@main.route('/<int:blog_id>/delete',methods=['POST','GET'])
@login_required
def delete_blog(blog_id):
blog=Blog.query.get(blog_id)
if blog.user_id != current_user.id:
abort(403)
db.session.delete(blog)
db.session.commit()
return redirect(url_for('main.profile',uname=current_user.username))
@main.route('/<int:blog_id>/update',methods=['GET','POST'])
@login_required
def update_blog(blog_id):
form=UploadBlogForm()
blog=Blog.query.get(blog_id)
if blog.user_id != current_user.id:
abort(403)
if form.validate_on_submit():
blog.title=form.title.data
blog.blog=form.blog.data
db.session.commit()
flash('Successfully Updated!')
return redirect(url_for('main.profile',uname=current_user.username))
elif request.method=='GET':
form.title.data=blog.title
form.blog.data=blog.blog
return render_template('update_blog.html',form=form,legend="Update Blog")
@main.route("/comment/<int:blog_id>",methods=["POST","GET"])
@login_required
def comment_blog(blog_id):
form = CommentsForm()
blog = Blog.query.get(blog_id)
all_comments = Comment.get_comments(blog_id)
if form.validate_on_submit():
new_comment = form.comment.data
blog_id = blog_id
user_id = current_user._get_current_object().id
comment_object = Comment(comment=new_comment,user_id=user_id,blog_id=blog_id)
comment_object.save_comment()
return redirect(url_for(".comment_blog",blog_id=blog_id))
return render_template("comments.html",comment_form=form,blog=blog,all_comments=all_comments)
``` |
{
"source": "072ashin/OffLabelWeb",
"score": 3
} |
#### File: OffLabelWeb/fitting_py_code/fitCylinder.py
```python
import numpy as np
from distance2cylinder import *
from scipy.optimize import leastsq
from scipy.optimize import least_squares
def fitCylinder(P, dx, dy, dz, px, py, pz, r):
'''
% Fit the cylinder to n 3D-points in P given the initial estimate of the cylinder
% Input: P, dx, dy, dz, px, py, pz, r
% P : list of 3D-points, is of size n * 3
% dx dy dz : vector indicates the axis of the cylinder
% px py pz : a point on the rotational axis of the cylinder
% r : the radius of the cylinder
% Output: dx dy dz px py pz r
:param P:
:param dx:
:param dy:
:param dz:
:param px:
:param py:
:param pz:
:param r:
:return:
'''
k = 1 / r
tmp1 = np.array([dx, dy, dz])
tmp2 = np.array([-px, -py, -pz])
t = np.inner(tmp1, tmp2)/ np.linalg.norm(tmp1)
x = px + t * dx
y = py + t * dy
z = pz + t * dz
rho = np.sqrt(x ** 2 + y ** 2 + z ** 2) - r
phi = np.arctan2(y, x)
zeta = np.arccos(z / np.sqrt(x ** 2 + y ** 2 + z ** 2))
n_zeta = np.array([np.cos(phi) * np.cos(zeta), np.sin(phi) * np.cos(zeta), -np.sin(zeta)])
n_phi_bar = np.array([-np.sin(phi), np.cos(phi), 0])
cos_alpha = np.sum(tmp1 * n_zeta) / np.linalg.norm(tmp1)
sin_alpha = np.sum(tmp1 * n_phi_bar) / np.linalg.norm(tmp1)
alpha = np.arccos(cos_alpha) * np.sign(sin_alpha)
alpha = max(alpha, -np.pi)
alpha = min(alpha, np.pi)
#####Solve nonlinear least-squares (nonlinear data-fitting) problems
p0 = np.array([rho, phi, zeta, alpha, k])
out = least_squares(distance2cylinder, p0, jac=JacobianofCylinder, method='trf', bounds=([-np.inf, -np.pi, 0, -np.pi, 0],[np.inf,np.pi,np.pi,np.pi,np.inf]), args=([P]))
out = out.x
####End
r = 1 / out[4]
px = (out[0] + r) * np.cos(out[1]) * np.sin(out[2])
py = (out[0] + r) * np.sin(out[1]) * np.sin(out[2])
pz = (out[0] + r) * np.cos(out[2])
dx = np.cos(out[1]) * np.cos(out[2]) * np.cos(out[3]) - np.sin(out[1]) * np.sin(out[3])
dy = np.sin(out[1]) * np.cos(out[2]) * np.cos(out[3]) + np.cos(out[1]) * np.sin(out[3])
dz = -np.sin(out[2]) * np.cos(out[3])
return dx, dy, dz, px, py, pz, r
```
#### File: OffLabelWeb/fitting_py_code/fitPlane.py
```python
import numpy as np
import math
from scipy.optimize import leastsq
from distance2plane import distance2plane
def fitPlane(P, x, y, z, nx ,ny, nz):
'''
% Fit a plane to n 3D-points in P given the initial estimate of it
% Input: P, x, y, z, nx, ny, nz
% P : list of 3-D points, is of size n x 3, P(i, :) is the coordinates
% of the i-th point
% x, y, z : a point on the plane
% nx, ny, nz : the normal vector of the plane
% Output: x, y, z, nx, ny, nz where [nx, ny, nz] is the plane normal vector
% and [x, y, z] is a representative point on that plane
'''
phi=np.arctan2(ny,nx)
zeta=np.arccos(nz/np.sqrt(nx**2+ny**2+nz**2))
a=np.array([x,y,z])
#is phi zerta a number?
b=np.array([np.cos(phi)*np.sin(zeta),np.sin(phi)*np.sin(zeta),np.cos(zeta)])
rho=-np.sum(a*b)
#####Solve nonlinear least-squares (nonlinear data-fitting) problems
p0=np.array([rho, phi, zeta])
out=leastsq(distance2plane,p0,args=(P))
out=out[0]
####End
nx=np.cos(out[1])*np.sin(out[2])
ny=np.sin(out[1])*np.sin(out[2])
nz=np.cos(out[2])
x=np.mean(P[:,0])
y=np.mean(P[:,1])
z=np.mean(P[:,2])
return x,y,z,nx,ny,nz
``` |
{
"source": "076923/cv2-utils",
"score": 3
} |
#### File: cv2u/core/cluster.py
```python
import cv2
import numpy as np
from sklearn.cluster import DBSCAN as skDBSCAN
def DBSCAN(src, eps, min_samples):
arr = cv2.cvtColor(src, cv2.COLOR_BGR2LAB).reshape(-1, src.shape[2])
clustering = skDBSCAN(eps=eps, min_samples=min_samples).fit(arr)
labels = clustering.labels_ + 1
maps = labels.reshape(src.shape[:2])
return maps, labels
def drawDBSCAN(src, maps, labels):
colors = []
for lb in set(labels):
mask = np.where(maps == lb, 255, 0).astype(np.uint8)
color = list(map(int, list(cv2.mean(src, mask)[:src.shape[2]])))
colors.append(np.array(color, dtype=np.uint8))
colors = np.asarray(colors)
dst = colors[labels].astype(np.uint8).reshape(src.shape)
return dst
```
#### File: cv2u/imgcodecs/gradient.py
```python
import cv2
import numpy as np
def makeGradient(shape, start, end):
def gradient(_start, _end):
return np.linspace(_start, _end, num=shape[0]*shape[1], endpoint=True, retstep=False, dtype=np.uint8)
if shape[2] == 1:
start = start[0] if len(start) > 1 else start
end = end[0] if len(end) > 1 else end
img = gradient(start, end).reshape(*shape)
elif shape[2] == 3:
b = gradient(start[0], end[0]).reshape(*shape[:2], 1)
g = gradient(start[1], end[1]).reshape(*shape[:2], 1)
r = gradient(start[2], end[2]).reshape(*shape[:2], 1)
img = cv2.merge((b, g, r))
return img
```
#### File: cv2u/imgcodecs/urlread.py
```python
import cv2
import numpy as np
from urllib.request import urlopen
def urlread(url, flags=cv2.IMREAD_UNCHANGED):
response = urlopen(url)
img = np.asarray(bytearray(response.read()), dtype=np.uint8)
img = cv2.imdecode(img, flags)
return img
``` |
{
"source": "07734willy/07734willy.github.io",
"score": 3
} |
#### File: 07734willy.github.io/src/latextension.py
```python
from markdown.extensions.codehilite import HiliteTreeprocessor as HTP
from markdown.treeprocessors import Treeprocessor
from markdown.inlinepatterns import InlineProcessor
from markdown.blockprocessors import BlockProcessor
from markdown.extensions import Extension
from latex2mathml.converter import convert as convert_latex
import xml.etree.ElementTree as etree
import re
def tex_to_mathml(data):
tex = HTP.code_unescape(None, data)
html = convert_latex(tex).strip()
return html
def wrap_inline_math(data):
math_html = tex_to_mathml(data)
html = f"<span class=\"inline-math\">{math_html}</span>"
return html
def wrap_block_math(data):
math_html = tex_to_mathml(data)
html = f"<div class=\"block-math\">{math_html}</div>"
return html
class TexTreeprocessor(Treeprocessor):
def run(self, root):
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
code = block[0].text
if not code.startswith(":::latex"):
continue
regex = r"(?:^|(?<=\n)):::latex\s*"
html = "<br>".join(map(wrap_block_math, re.split(regex, code)[1:]))
placeholder = self.md.htmlStash.store(html)
block.clear()
block.tag = 'p'
block.text = placeholder
class TexBlockProcessor(BlockProcessor):
PATTERN = r"^([\s\S]*?)(?<![\w\$])(\$(?!\s)((?:[^\$]|\\\$)+?)(?<!\s)\$)(?![\w\$])([\s\S]*)$"
def __init__(self, md):
self.md = md
super().__init__(md.parser)
def test(self, parent, block):
return re.match(self.PATTERN, block)
def run(self, parent, blocks):
self.add_tex_seg(parent, blocks[0])
del blocks[0]
return True
def add_span(self, parent, text):
e = etree.SubElement(parent, 'span')
e.text = text
def add_tex_seg(self, parent, block):
if not block:
return
if not re.match(self.PATTERN, block):
self.add_span(parent, block)
return
match = re.match(self.PATTERN, block)
if match.group(1):
self.add_span(parent, match.group(1))
html = wrap_inline_math(match.group(3))
placeholder = self.md.htmlStash.store(html)
self.add_span(parent, placeholder)
self.add_tex_seg(parent, match.group(4))
class TexExtension(Extension):
def extendMarkdown(self, md):
treeprocessor = TexTreeprocessor(md)
blockprocessor = TexBlockProcessor(md)
md.treeprocessors.register(treeprocessor, 'textree', 31)
md.parser.blockprocessors.register(blockprocessor, 'texblock', 18)
md.registerExtension(self)
``` |
{
"source": "07734willy/pythagorean_tuples",
"score": 3
} |
#### File: 07734willy/pythagorean_tuples/test_pythagorean_tuples.py
```python
import math
import unittest
from random import randint
from pythagorean_tuples import pythagorean_triples
def is_pythagorean_triple(t):
return t[0] ** 2 + t[1] ** 2 == t[2] ** 2
def is_primitive(t):
return math.gcd(t[0], t[1]) == 1 and math.gcd(t[1], t[2]) == 1 and math.gcd(t[2], t[0]) == 1
class TestPythagoreanTriples(unittest.TestCase):
def test_not_integer(self):
self.assertRaises(TypeError, pythagorean_triples, 3.14, "TypeError has not been raised")
def test_not_positive(self):
self.assertRaises(ValueError, pythagorean_triples, 0, "ValueError has not been raised")
def test_number_AP(self):
self.assertEqual(set(), pythagorean_triples(2, True))
self.assertEqual({(4, 3, 5)}, pythagorean_triples(4, True))
self.assertEqual({(32, 255, 257)}, pythagorean_triples(32, True))
self.assertEqual({(512, 65535, 65537)}, pythagorean_triples(512, True))
def test_number_A(self):
self.assertEqual(set(), pythagorean_triples(2))
self.assertEqual({(4, 3, 5)}, pythagorean_triples(4))
self.assertEqual({(32, 24, 40), (32, 60, 68), (32, 126, 130), (32, 255, 257)}, pythagorean_triples(32))
self.assertEqual({(512, 384, 640), (512, 960, 1088), (512, 2016, 2080), (512, 4080, 4112), (512, 8184, 8200),
(512, 16380, 16388), (512, 32766, 32770), (512, 65535, 65537)}, pythagorean_triples(512))
def test_random_AP(self):
for _ in range(10):
a = 2 ** randint(15, 50)
primitive_triples = pythagorean_triples(a, True)
for t in primitive_triples:
self.assertTrue(is_pythagorean_triple(t), f"{t} is not Pythagorean triple")
self.assertTrue(is_primitive(t), f'{t} is not primitive')
def test_random_A(self):
for _ in range(10):
a = 2 ** randint(15, 50)
triples = pythagorean_triples(a)
for t in triples:
self.assertTrue(is_pythagorean_triple(t), f"{t} is not Pythagorean triple")
def test_number_BP(self):
self.assertEqual({(3, 4, 5)}, pythagorean_triples(3, True))
self.assertEqual({(13, 84, 85)}, pythagorean_triples(13, True))
self.assertEqual({(271, 36720, 36721)}, pythagorean_triples(271, True))
self.assertEqual({(121, 7320, 7321)}, pythagorean_triples(121, True))
self.assertEqual({(153, 104, 185), (153, 11704, 11705)}, pythagorean_triples(153, True))
self.assertEqual({(235, 1092, 1117), (235, 27612, 27613)}, pythagorean_triples(235, True))
def test_number_B(self):
self.assertEqual({(3, 4, 5)}, pythagorean_triples(3))
self.assertEqual({(13, 84, 85)}, pythagorean_triples(13))
self.assertEqual({(271, 36720, 36721)}, pythagorean_triples(271))
self.assertEqual({(121, 660, 671), (121, 7320, 7321)}, pythagorean_triples(121))
self.assertEqual({(153, 104, 185), (153, 204, 255), (153, 420, 447), (153, 680, 697), (153, 1296, 1305),
(153, 3900, 3903), (153, 11704, 11705)}, pythagorean_triples(153))
self.assertEqual({(235, 564, 611), (235, 1092, 1117), (235, 5520, 5525), (235, 27612, 27613)},
pythagorean_triples(235))
def test_random_BP(self):
for _ in range(10):
a = randint(10_000_000, 100_000_000) * 2 + 1
primitive_triples = pythagorean_triples(a, True)
for t in primitive_triples:
self.assertTrue(is_pythagorean_triple(t), f"{t} is not Pythagorean triple")
self.assertTrue(is_primitive(t), f'{t} is not primitive')
def test_random_B(self):
for _ in range(10):
a = randint(10_000_000, 100_000_000) * 2 + 1
triples = pythagorean_triples(a)
for t in triples:
self.assertTrue(is_pythagorean_triple(t), f"{t} is not Pythagorean triple")
def test_number_CP(self):
self.assertEqual({(12, 5, 13), (12, 35, 37)}, pythagorean_triples(12, True))
self.assertEqual(set(), pythagorean_triples(14, True))
self.assertEqual(set(), pythagorean_triples(34, True))
self.assertEqual({(56, 33, 65), (56, 783, 785)}, pythagorean_triples(56, True))
self.assertEqual({(68, 285, 293), (68, 1155, 1157)}, pythagorean_triples(68, True))
self.assertEqual(set(), pythagorean_triples(126, True))
def test_number_C(self):
self.assertEqual({(12, 5, 13), (12, 35, 37), (12, 9, 15), (12, 16, 20)}, pythagorean_triples(12))
self.assertEqual({(14, 48, 50)}, pythagorean_triples(14))
self.assertEqual({(34, 288, 290)}, pythagorean_triples(34))
self.assertEqual({(56, 33, 65), (56, 42, 70), (56, 90, 106), (56, 105, 119), (56, 192, 200), (56, 390, 394),
(56, 783, 785)}, pythagorean_triples(56))
self.assertEqual({(68, 51, 85), (68, 285, 293), (68, 576, 580), (68, 1155, 1157)},
pythagorean_triples(68))
self.assertEqual({(126, 32, 130), (126, 120, 174), (126, 168, 210), (126, 432, 450), (126, 560, 574),
(126, 1320, 1326), (126, 3968, 3970)}, pythagorean_triples(126))
def test_random_CP(self):
for _ in range(10):
a = randint(10_000_000, 100_000_000) * 2
primitive_triples = pythagorean_triples(a, True)
for t in primitive_triples:
self.assertTrue(is_pythagorean_triple(t), f"{t} is not Pythagorean triple")
self.assertTrue(is_primitive(t), f'{t} is not primitive')
def test_random_C(self):
for _ in range(10):
a = randint(10_000_000, 100_000_000) * 2
triples = pythagorean_triples(a)
for t in triples:
self.assertTrue(is_pythagorean_triple(t), f"{t} is not Pythagorean triple")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "07734willy/Python-Playground",
"score": 4
} |
#### File: codegolf/count_and_say/alt_solution.py
```python
from itertools import*
class Solution:
def countAndSay(f,n,s="1"):return s if n<2else f.countAndSay(n-1,"".join(str(len(list(v)))+d for d,v in groupby(s)))
```
#### File: codegolf/reverse_integer/alt_solution.py
```python
l=2**31
class Solution:
def reverse(s,x):r=(-1)**(x<0)*int(str(abs(x))[::-1]);return r if -l<r<l-1else 0
```
#### File: codegolf/reverse_integer/reverse_integer_list_comprehension.py
```python
def reverse(num):
result = int(str(num)[::-1]) if num > 0 else int('-' + str(num)[::-1].replace('-',''))
return result if -2147483648 <= result <= 2147483647 else 0
```
#### File: codegolf/two_sum/two_sum_list_comprehension.py
```python
def two_sums(numbers, target):
return [[numbers.index(num1), numbers.index(num2)]
for num1 in numbers for num2 in numbers if num1 + num2 == target][0]
print(two_sums([2, 7, 11, 15],9))
```
#### File: exercises/exercise1/comprehensions.py
```python
def add_five(numbers):
out = []
for num in numbers:
out.append(num + 5)
return out
# TODO Complete this function
def add_five_one_liner(numbers):
pass
# Exercise 2
# Drops any small numbers (strictly less than 4)
def drop_small(numbers):
out = []
for num in numbers:
if not num < 4:
out.append(num)
return out
# TODO Complete this function
def drop_small_one_liner(numbers):
pass
# Exercise 3
# Returns a *set* of all distinct numbers (i.e. no repeats)
def get_distinct(numbers):
out = set()
for num in numbers:
out.add(num)
return out
# TODO Complete this function
def get_distinct_one_liner(numbers):
pass
## Helper testing functions, for your convienence ##
def test_add_five(numbers):
out1 = add_five(numbers)
out2 = add_five_one_liner(numbers)
assert(out1 == out2)
def test_drop_small(numbers):
out1 = drop_small(numbers)
out2 = drop_small_one_liner(numbers)
assert(out1 == out2)
def test_get_distinct(numbers):
out1 = get_distinct(numbers)
out2 = get_distinct_one_liner(numbers)
assert(out1 == out2)
# Main method
def main():
# Feel free to add anything you need to test your solutions here
pass
if __name__ == "__main__":
main()
```
#### File: exercises/exercise2/comprehensions.py
```python
def make_dict(keys, values):
out = dict()
for i in range(min(len(keys), len(values))):
out[keys[i]] = values[i]
return out
# HINT: there's special syntax for a dict comprehension
# Takes a 2D matrix (any size), and returns a plain list of elements that
# it contains, which are larger than 4
def large_elements(matrix):
out = []
for row in matrix:
for elem in row:
if elem > 4:
out.append(elem)
return out
# Takes a 2D matrix (with only two rows) and flips it across its diagonal
# [[1, 7, 4], [[1, 3],
# [[3, 2, 5]] becomes [7, 2],
# [4, 5]]
def matrix_transpose(matrix):
out = []
for i in range(len(matrix[0])):
elem0 = matrix[0][i]
elem1 = matrix[1][i]
out.append([elem0, elem1])
return out
# HINT: You'll need to use `zip()`
# Returns a list taking the max out of every two numbers
# [7, 1, 9, 10] gives [7, 10]
def max_every_other(numbers):
out = []
for i in range(0, len(numbers), 2):
out.append(max(numbers[i], numbers[i+1]))
return out
# HINT: again, `zip()`
# Forms lists of repeating elements, from a list of elements. Ex:
# [1, 2, 1, 4] produces
# [[1], [2, 2], [1], [4, 4, 4, 4]]
def repeat_elements(counts):
out = []
for count in counts:
tmp = []
for _ in range(count):
tmp.append(count)
out.append(tmp)
return out
# A generator producing all powers of two on-demand
def power_of_twos():
start = 0
while True:
yield 2 ** start
start += 1
# HINT: this is a generator. Generator comprehensions use `()` instead
# of `[]`. Additionally, you'll want `count()` from `itertools`
# `from itertools import count` at the top of the file
# Takes a matrix, and return a string in csv format
def make_csv(matrix):
out = ""
rows = len(matrix)
cols = len(matrix[0])
for i in range(rows):
if i != 0:
out += "\n"
for j in range(cols):
if j != 0:
out += ", "
out += str(matrix[i][j])
return out
# HINT: you can use str.join() to join a bunch of strings into one.
# "RR".join(["foo", "bar", "fizz", "buzz"]) yields
# "fooRRbarRRfizzRRbuzz"
```
#### File: exercises/exercise3/function solutions.py
```python
def do_something(x, y):
return x + y
#answer:
do_something = lambda x,y: x + y
#nothing to change
#2
def drop_value(numbers, value):
return [num for num in numbers if num != value]
#answer:
numbers = [1,2,3]
target = [1]
drop_value = list(filter(lambda value: value not in target, numbers))
#nothing to change
#3
def find_max_vals(matrix):
return [max(row) for row in matrix]
#answer:
find_max_vals = list(map(lambda matrix: max(matrix),[[1,2,3], [4,5,6]]))
def find_max_vals(matrix):
return list(map(max, matrix))
#4
def get_digits(number):
return [int(digit) for digit in str(number)]
#answer:
get_digits = list(map(lambda number: int(number), list(str(235))))
def get_digits(number):
return map(int, number)
#5
def sort_lists(*lists):
tuples = [(len(row), row) for row in lists]
tuples.sort()
return [row for length, row in tuples]
#answer:
sort_lists = lambda *lists: sorted(lists)
def sort_lists(*lists):
return list(map(sorted, lists))
#6
def get_abs_min(numbers):
try:
non_neg_min = min(num for num in numbers if num >= 0)
except: non_neg_min = -1
try:
neg_min = max(num for num in numbers if num < 0)
except: neg_min = 1
if neg_min == 1:
return non_neg_min
if non_neg_min == -1:
return neg_min
if abs(neg_min) < non_neg_min:
return neg_min
return non_neg_min
#answer:
get_abs_min = lambda numbers: min(abs(num) for num in numbers)
def get_abs_min(numbers):
return min(numbers, key=abs)
#7
def max_row_sum(matrix):
return max(sum(e for e in row) for row in matrix)
#answer:
max_row_sum = lambda matrix: max(sum(array) for array in matrix)
def max_row_sum(matrix):
return max(matrix, key=sum)
#8
def count_nonzero(numbers):
return len([num for num in numbers if num != 0])
def count_nonzero(numbers):
return sum(map(bool, numbers))
#my 'genius' answer
count_nonzero = lambda numbers: len(numbers) - numbers.count(0)
```
#### File: exercises/exercise4/misc.py
```python
def subtraction(numbersA, numbersB):
out = []
for i in range(min(len(numbersA), len(numbersB))):
out.append(numbersA[i] - numbersB[i])
return out
# TODO use map and str- no comprehensions
def letters_in_string(word):
return [letter for letter in word]
# TODO fill both of the following two functions in with some calculation (anything), and call them both.
# don't edit the function's arguments
def calculate(input_list):
pass
def calculat32(*vargs):
pass
# ^ the pupose is so that you learn what the `*` does in the function signature
# HINT: it makes it turn `n` separate arguments into a single list of those `n` values
# TODO use the short-circuiting behavior demonstated in the following function to the `ternary` into
# an expression of `or`s and variables
# returns varA if varA is not 0/None/[]/False, otherwise returns varB
def short_circuit_demo(varA, varB):
return varA or varB
# TODO use the above example to convert this function to `or`s
def ternary(varA, varB, varC):
tmp = varC if varC else varA
return varB if varB else tmp
# TODO return true is any of the values in `numbers` are greater than 3
# use `any()` and a comprehension
def any_above_three(numbers):
for num in numbers:
if num > 3:
return True
return False
# TODO same behavior, except if all values are larger than 3
# hint: `all`
def all_above_three(numbers):
for num in numbers:
if num <= 3:
return False
return True
# use the following example to see how for-else is used, for the next problem
def last_zero(numbers):
index = 0
for i, val in enumerate(numbers):
if val == 0:
index = i
else:
raise Exception("No zeros found")
return index
# TODO iterate over two lists simulaneously, report the index of the first pair with equal value.
# raise an exception if there's no such pair
# example: [1, 3, 4, 5], [3, 5, 4, 7] would return 2 (the index of the 4's)
# NOTE this cannot be done as a one-liner, but could be done in three.
# Either way, use `zip` to make your life easier, and for-else for the sake of practice
def index_of_same_element(numsA, numsB):
pass
# TODO strip this solution down to 4 lines (including def ... ), by solely
# removing whitespace and adding `;` where need be
def random_thing(numbers, const):
for num in numbers:
if 3 < num < 12:
return True
temp = const + max(numbers)
return tmp
# TODO sort a string consisting solely of digits (there may be duplicates) in the following order
# 1 < 4 < 3 < 5 < 9 < 7 < 2 < 8 < 0 < 6
# don't fret if you can't get it- this one is mainly meant to be an eye-opener to what crazy things
# you can do with functional programming
def digit_sort(digits):
pass
# TODO do this, but without using the word `list`, and without using comprehensions. You'll need to use the
# unpacking operator `*`, and plain old []'s (no comprehension though)
def set_to_list(input_set):
return list(input_set)
``` |
{
"source": "07734willy/StackOverflowSearch",
"score": 3
} |
#### File: 07734willy/StackOverflowSearch/menu.py
```python
from textwrap import wrap
ITEM_FORMAT = """\
[{score}] {title}
{desc}"""
def format_question(question):
score = 3
title = "How to do thing"
desc = "just do abc, some really long filler text here" + "here " * 50
indent = ' ' * 4
desc = '\n'.join(wrap(desc, width=80, tabsize=4, max_lines=3,
initial_indent=indent, subsequent_indent=indent))
text = ITEM_FORMAT.format(score=score, title=title, desc=desc)
return text
print(format_question(None))
```
#### File: 07734willy/StackOverflowSearch/stack_search.py
```python
import click
def scrape_overflow(query, sources, answers):
"""
{
"title": "string",
"votes": int,
"answers": [
{
"text": "string",
"votes": int,
"author": "string",
"date": "string" or datetime
}
],
"tags": [
"string",
"string"
],
"link": "string",
"date": "string" or datetime,
"author": "string"
}
"""
return []
@click.command()
@click.argument('query', nargs=-1, required=True)
@click.option('--sources', '--s', default=2, show_default=True, help='Number of questions used')
@click.option('--results', '--r', default=3, show_default=True, help='Number of answers shown per question')
def main(query, sources, results):
""" A program that find answers from Stacksoverflow """
print (" ".join(query), sources, results)
scraped_data = scrape_overflow(" ".join(query), sources, results)
for question in scraped_data:
click.echo(f"Question {question}:")
for answer in question["answers"]:
click.echo(f"Answer: {answer}")
if __name__ == "__main__":
main()
``` |
{
"source": "07734willy/Strange-Attractors",
"score": 3
} |
#### File: 07734willy/Strange-Attractors/attractor.py
```python
from argparse import ArgumentParser, ArgumentTypeError
from PIL import Image
import numpy as np
import ctypes
import time
MAX_ATTRACTORS = 1 # number of attractors to search for
T_SEARCH = 2000 # number of iterations to perform during search
T_RENDER = int(10e6) # number of iterations to perform during render
T_IDX = int(0.01 * T_RENDER) # first index after transient
MODE = "Cubic"
""" import external C helper functions """
dll = ctypes.cdll.LoadLibrary("./helper.so")
c_iterator = dll.iterator
c_sum_alpha = dll.sum_alpha
def iterator(x, y, z, coeff, repeat, radius=0):
""" compute an array of positions visited by recurrence relation """
c_iterator.restype = ctypes.POINTER(ctypes.c_double * (3 * repeat))
start = to_double_ctype(np.array([x, y, z]))
coeff = to_double_ctype(coeff)
out = to_double_ctype(np.zeros(3 * repeat))
res = c_iterator(start, coeff, repeat, ctypes.c_double(radius), out).contents
return np.array(res).reshape((repeat, 3)).T
def sum_alpha(yres, xres, Is, Js, rx, ry, rz):
""" compute the sum of zalpha values at each pixel """
c_sum_alpha.restype = ctypes.POINTER(ctypes.c_double * (yres * xres * 3))
size = len(Is)
out = to_double_ctype(np.zeros(yres * xres * 3))
Is, Js = to_int_ctype(Is), to_int_ctype(Js)
rx = to_double_ctype(rx)
ry = to_double_ctype(ry)
rz = to_double_ctype(rz)
res = c_sum_alpha(yres, xres, size, Is, Js, rx, ry, rz, out).contents
return np.array(res).reshape((yres, xres, 3))
def to_double_ctype(arr):
""" convert arr to a ctype array of doubles """
arr_type = ctypes.POINTER(ctypes.c_double * len(arr))
return arr.astype(np.float64).ctypes.data_as(arr_type)
def to_int_ctype(arr):
""" convert arr to a ctype array of ints """
arr_type = ctypes.POINTER(ctypes.c_int32 * len(arr))
return arr.astype(np.int32).ctypes.data_as(arr_type)
def coeff_to_string(coeff):
"""convert coefficients to alphabetical values (see Sprott)"""
att_string = "".join([chr(int((c + 7.7)*10)) for c in coeff])
return att_string
def pixel_density(xdata, ydata, coeff, xres=320, yres=180):
""" check for density of points in image """
xmin, ymin, xrng, yrng = set_aspect(xdata, ydata, xres, yres)
render = np.zeros((yres, xres))
try:
for x, y in zip(xdata, ydata):
J = get_index(x, xmin, xrng, xres)
I = get_index(y, ymin, yrng, yres)
render[I, J] += 1
except ValueError:
print("Invalid value (pixel density)")
return False
return check_density(render, coeff)
def check_density(render, coeff, min_fill=1.5):
""" check if pixel density exceeds threshold """
filled_pixels = np.count_nonzero(render)
fill_percentage = 100 * filled_pixels/np.size(render)
if fill_percentage > min_fill:
print(coeff_to_string(coeff))
print("Non-zero points: {} ({:.2f}%)".format(filled_pixels, fill_percentage))
print("")
return True
return False
def set_aspect(xdata, ydata, width, height, debug=False, margin=1.1):
""" get boundaries for given aspect ratio w/h """
xmin, xrng = get_minmax_rng(xdata)
ymin, yrng = get_minmax_rng(ydata)
if debug:
print("Data range | X: {:.2f} | Y: {:.2f} | Intrinsic aspect ratio: {:.2f}".format(xrng, yrng, xrng/yrng))
xmid = xmin + xrng/2
ymid = ymin + yrng/2
if xrng/yrng < width/height:
xrng = width/height * yrng
else:
yrng = height/width * xrng
xrng *= margin
yrng *= margin
xmin = xmid - xrng/2.
ymin = ymid - yrng/2
if debug:
print("Rescaled data range | X: {:.2f} | Y: {:.2f} | New aspect ratio: {:.2f}".format(xrng, yrng, xrng/yrng))
return xmin, ymin, xrng, yrng
def get_minmax_rng(data):
max_val = data.max()
min_val = data.min()
data_range = max_val - min_val
return min_val, data_range
def get_index(x, xmin, xrng, xres):
""" map coordinate to array index """
return int((x-xmin) * (xres-1) / xrng)
def get_dx(xdata):
dx = abs(xdata - np.roll(xdata, 1))[1:]
mdx = np.amax(dx)
return dx, mdx
def zalpha(z, zmin, zrng, a_min=0):
""" return alpha based on z depth """
alpha = a_min + (1-a_min)*(z-zmin)/zrng
return alpha
def save_image(xdata, ydata, zdata, coeff, plane, alpha=0.025, xres=3200, yres=1800):
start = time.time()
xmin, ymin, xrng, yrng = set_aspect(xdata, ydata, xres, yres, debug=True)
zmin, zrng = get_minmax_rng(zdata)
dxs, mdx = get_dx(xdata)
dys, mdy = get_dx(ydata)
dzs, mdz = get_dx(zdata)
print("Calculating pixel values")
xscaled = (xdata[1:]-xmin) * (xres-1) / xrng
yscaled = (ydata[1:]-ymin) * (yres-1) / yrng
clip = np.logical_and(xscaled < xres, yscaled < yres)
xscaled = xscaled.astype(int)[clip]
yscaled = yscaled.astype(int)[clip]
a_min = 0.25
zscaled = (zdata[1:]-zmin) * (1-a_min) / zrng + a_min
xpix = (1-dxs/mdx)*alpha*zscaled[clip]
ypix = (1-dys/mdy)*alpha*zscaled[clip]
zpix = (1-dzs/mdz)*alpha*zscaled[clip]
render = sum_alpha(yres, xres, yscaled, xscaled, xpix, ypix, zpix)
render = np.clip(render, None, 1)
fname = "{}-{}K-{}.png".format(coeff_to_string(coeff), T_RENDER//1000, plane)
Image.fromarray((render * 255).astype(np.uint8)).save(fname, compress_level=1)
end = time.time()
print("Saved " + fname)
print("{:.2f} sec".format(end-start))
def coeff_from_str(word):
"""convert alphabetical values to coefficients"""
return np.array([(ord(c)-ord("A")-12)/10 for c in word.upper()])
def search_attractors(max_attractors):
print("Searching for attractors | Mode: {}".format(MODE))
att_coeffs = []
n_attractors = 0
while n_attractors < max_attractors:
# pick random coefficients in the range (-1.2,1.2)
if MODE == "Cubic":
coeff = np.random.randint(-12, 13, 60)/10
n_coeff = 20
else:
raise ValueError("Only 'Cubic' mode is currently supported")
x, y, z = 0, 0, 0
xl, yl, zl = iterator(x, y, z, coeff, T_SEARCH, 10)
if zl[-1] <= 10:
if pixel_density(xl, yl, coeff):
att_coeffs.append(coeff)
n_attractors += 1
print("")
return att_coeffs
def plot_attractors(att_coeffs):
for i, coeff in enumerate(att_coeffs, 1):
print("\nAttractor: {} | {}/{}".format(coeff_to_string(coeff), i, len(att_coeffs)))
print("Iterating {} steps".format(T_RENDER))
start = time.time()
x, y, z = 0, 0, 0
xl, yl, zl = iterator(x, y, z, coeff, T_IDX)
x, y, z = xl[-1], yl[-1], zl[-1]
if np.isnan(x+y+z):
print("Error during calculation")
continue
xl, yl, zl = iterator(x, y, z, coeff, T_RENDER - T_IDX)
end = time.time()
print("Finished iteration: {:.1f} sec | {} iterations per second".format((end-start), T_RENDER/(end-start)))
save_image(xl[T_IDX:], yl[T_IDX:], zl[T_IDX:], coeff, plane="xy")
save_image(xl[T_IDX:], zl[T_IDX:], yl[T_IDX:], coeff, plane="xz")
save_image(yl[T_IDX:], zl[T_IDX:], xl[T_IDX:], coeff, plane="yz")
def seed_check(seed):
symbols_valid = all(ord("A") <= ord(c) <= ord("Y") for c in seed.upper())
if symbols_valid and len(seed) == 60:
return seed
raise ArgumentTypeError("Seed must contain exactly 60 characters in range A-Y inclusive")
def main():
parser = ArgumentParser(description="Plots strange attractors")
parser.add_argument("--seed", dest="seed", action="store", nargs=1, type=seed_check,
help="an alphabetical seed representing the coefficients of the attractor")
args = parser.parse_args()
if args.seed:
att_coeffs = [coeff_from_str(args.seed[0])]
else:
att_coeffs = search_attractors(MAX_ATTRACTORS)
plot_attractors(att_coeffs)
if __name__ == "__main__":
main()
``` |
{
"source": "07akshay/Huffman-Coding-algorithm",
"score": 3
} |
#### File: 07akshay/Huffman-Coding-algorithm/__init__.py
```python
import heapq
from heapq import heapify
from heapq import heappush
from heapq import heappop
class Node:
def __init__(self, val, freq: int):
self.val = val
self.left = None
self.right = None
self.freq = freq
def __lt__(self, other):
return (self.freq < other.freq)
def doc():
print("This is a very simple library which can be used for text compression. \nYou need to give a text input in the expand() function and recieve the dictionary, compressed text and root object as the output.\n\nThen to get the original text back you only need to pass these 3 things as the input to the expand() function.\nThe output recieved would be the original text")
def recur(codes, root : Node, a):
if root.left:
recur(codes, root.left, a+'0')
if root.right:
recur(codes, root.right, a+'1')
if root.left==None and root.right==None:
codes[root.val] = a
def compress(text):
"""
This function takes the text input - which can contain
any ascii character.
It returns 3 things:
1. A dictionary of unique characters which contains
thier corresponding codes.
2. The compressed message
3. The final object list, which contains the root
node object
"""
try:
d = dict.fromkeys(text,0)
for i in text:
d[i] +=1
final_list =[]
for i in d:
obj = Node(i, d[i])
final_list.append(obj)
heapify(final_list)
while len(final_list)>1:
obj1 = heappop(final_list)
obj2 = heappop(final_list)
obj = Node(None, obj1.freq+obj2.freq)
obj.left = obj1
obj.right = obj2
heappush(final_list, obj)
codes = dict.fromkeys(text,'')
recur(codes, final_list[0].left, '0')
recur(codes, final_list[0].right, '1')
encryption = ''
for j in text:
encryption += codes[j]
encryption
return codes, encryption, final_list
except:
print("Invalid Input!")
def expand(codes, encryption, start):
"""
It takes the dictionary, compressed message and root
object as the input.
Returns the original text.
"""
try:
root = start
decryption = ""
for code in encryption:
if code == '0':
obj = root.left
root = obj
elif code == '1':
obj = root.right
root = obj
if obj.val:
decryption += obj.val
root = start
return decryption
except:
print("Can't expand using invalid dictionary or compressed message")
``` |
{
"source": "07ATUL/whatsapp-play",
"score": 3
} |
#### File: whatsapp-play/wplay/messagetimer.py
```python
import time
import random
#These lines import from utils pyton file in wplay.
from wplay.utils import browser_config
from wplay.utils import target_search
from wplay.utils import target_select
from wplay.utils import io
# This function first checks for proper whatsapp web loading.
async def msgTimer(target):
page, _ = await browser_config.configure_browser_and_load_whatsapp()
# Waiting till the target username is found.
await target_search.search_and_select_target(page, target)
# Region INPUTS
if target is not None:
await target_search.search_and_select_target(page, target)
else:
await target_select.manual_select_target(page)
# Region INPUTS
message_type_numbers = int(
input("How many types of messages will you send? "))
messages = list()
for _ in range(message_type_numbers):
messages.append(io.ask_user_for_message_breakline_mode())
# Asking the client for no of message inputs.
number_of_messages = int(input("Enter the number of messages to send: "))
# The interval of inputs that is time after which the messages are to be send
minimumTimeInterval = int(
input("Enter minimum interval number in seconds: "))
maximumTimeInterval = int(
input("Enter maximum interval number in seconds: "))
# Endregion
random.seed()
for _ in range(number_of_messages):
if not messages:
break
await io.send_message(page, messages[random.randrange(0, message_type_numbers)])
if minimumTimeInterval != maximumTimeInterval:
time.sleep(random.randrange(minimumTimeInterval, maximumTimeInterval))
else:
time.sleep(minimumTimeInterval)
# This script is a helper script for message blast script as the no of messages and the message time are defined here and are being using in the blast script.
```
#### File: whatsapp-play/wplay/onlinetracker.py
```python
import time
from pathlib import Path
from datetime import datetime
from playsound import playsound
# These import from util folder in wplay
from wplay.utils import browser_config
from wplay.utils import target_search
from wplay.utils import target_select
from wplay.utils import target_data
from wplay.utils.helpers import data_folder_path
# After the client run the command line script, the user enters a name and that is the target which is used here.
async def tracker(target):
# Waiting for Whatsapp web to be loading
page, _ = await browser_config.configure_browser_and_load_whatsapp()
# Searching for the target name
target_name = await target_search.search_and_select_target(page, target, hide_groups = True)
if target is not None:
target_name = await target_search.search_and_select_target(page, target, hide_groups = True)
else:
target_name = await target_select.manual_select_target(page, hide_groups = True)
Path(data_folder_path / 'tracking_data').mkdir(parents = True, exist_ok = True)
status_file = open(data_folder_path / 'tracking_data' / f'status_{target_name}.txt', 'w').close()
status_file = open(data_folder_path / 'tracking_data' / f'status_{target_name}.txt', 'a')
is_sound_enabled = True
last_status = 'offline'
try:
# After username is found it prints the various name if matching strings are there and asks the user for which name to track and sets that name to target.
print(f'Tracking: {target_name}')
# It writes the status whether offline or online in tracking file.
# Checks for the status and plays the sound for confirmation
status_file.write(f'Tracking: {target_name}\n')
while True:
status = await target_data.get_last_seen_from_focused_target(page)
if status == 'online':
is_online = True
else:
is_online = False
status = 'offline'
if last_status != is_online:
if is_online:
try:
if is_sound_enabled:
playsound('plucky.wav')
except:
print("Error: Couldn't play the sound.")
is_sound_enabled = False
print(
f'{datetime.now().strftime("%d/%m/%Y, %H:%M:%S")}' + f' - Status: {status}'
)
status_file.write(
f'{datetime.now().strftime("%d/%m/%Y, %H:%M:%S")}' + f' - Status: {status}\n')
last_status = is_online
time.sleep(0.5)
finally:
status_file.close()
#The final status of the operations that is the tracking data is stored in tracking data file.
print(f'\nStatus file saved in: {str(data_folder_path/"tracking_data"/"status_")}{target_name}.txt')
``` |
{
"source": "07fi123/domain-token-demo",
"score": 3
} |
#### File: domain-token-demo/dt/watch.py
```python
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import os
class Watcher:
DIRECTORY_TO_WATCH = "./web"
print "Watching " + DIRECTORY_TO_WATCH
def __init__(self):
self.observer = Observer()
def run(self):
event_handler = Handler()
self.observer.schedule(event_handler, self.DIRECTORY_TO_WATCH, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print "Error"
self.observer.join()
class Handler(FileSystemEventHandler):
@staticmethod
def on_any_event(event):
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
print "Received created event - %s." % event.src_path
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
print "Received modified event - %s." % event.src_path
os.system("docker stop $(docker ps -a -q)")
if __name__ == '__main__':
w = Watcher()
w.run()
``` |
{
"source": "07kshitij/CS60075-Team-11-Task-1",
"score": 3
} |
#### File: 07kshitij/CS60075-Team-11-Task-1/get_features.py
```python
import numpy as np
from wordfreq import word_frequency
""" Features used
* Word Embedding [GloVe 50 dimensional embeddings](http://nlp.stanford.edu/data/glove.6B.zip)
* Length of word
* Syllable count [PyPy](https://pypi.org/project/syllables/)
* Word Frequency [PyPy](https://pypi.org/project/wordfreq/)
* POS tag [Spacy](https://spacy.io/usage/linguistic-features#pos-tagging)
[Reference](https://www.aclweb.org/anthology/W18-0508.pdf)
"""
# Construct features for single word expressions
def prepare_features_single_word(tokens, sentences, nlp, word_to_ix, model, embedding_index, EMBEDDING_DIM):
features = []
for idx, word in enumerate(tokens):
word = word.lower()
feature = []
# Word length
feature.append(len(word))
doc = nlp(word)
# Syllable count and word frequency in the corpus
# Spacy tokenizes the input sentence
# In this case we would have only one token, the target word
for token in doc:
feature.append(token._.syllables_count)
feature.append(word_frequency(word, 'en'))
# Probability of target word `word` in the sentence estimated from by `model`
if word in word_to_ix:
# Output scores for each of the word in the sentence
out = model(sentences[idx])
pos = -1
for itr, token in enumerate(sentences[idx].split()):
if token.lower() == word:
pos = itr
break
id_pos = word_to_ix[word] # word to id mapping
feature.append(float(out[pos][id_pos]))
else:
# `word` not in vocabulary, so cannot predict probability in context
feature.append(0.0)
# GloVE embedding for the `word`
if word in embedding_index:
feature.extend(embedding_index[word].tolist())
else:
# `word` not in the GloVE corpus, take a random embedding
feature.extend(np.random.random(EMBEDDING_DIM).tolist())
features.append(feature)
if (idx + 1) % 500 == 0:
print('Prepared features for {} single target word sentences'.format(idx + 1))
return features
# Construct features for multi word expressions
def prepare_features_multi_word(tokens, sentences, nlp, word_to_ix_multi, model_multi, embedding_index, EMBEDDING_DIM):
features = []
for idx, word in enumerate(tokens):
word = word.lower()
feature = []
doc = nlp(word)
word = word.split(' ')
assert(len(word) == 2)
# MWE length = sum(length of individual words)
feature.append(len(word[0]) + len(word[1]))
syllables = 0
probability = 1
embedding = np.zeros(EMBEDDING_DIM)
# Syllable count and word frequency in the corpus
# Spacy tokenizes the input sentence
# In this case we would have two tokens
for token in doc:
word_ = token.text
syllables += token._.syllables_count
probability *= word_frequency(word_, 'en')
# GloVE embedding current `word_` of the MWE
if word_ in embedding_index:
embedding = embedding + embedding_index[word_]
else:
# `word_` not in the GloVE corpus, take a random embedding
embedding = embedding + np.random.random(EMBEDDING_DIM)
# Average embedding of the two tokens in the MWE
embedding = embedding / 2
feature.append(syllables)
feature.append(probability)
# Product of probabilities of constituent words in the MWE
if word[0] in word_to_ix_multi and word[1] in word_to_ix_multi:
# Output scores for each of the word in the sentence
out = model_multi(sentences[idx])
pos0, pos1 = -1, -1
for itr, token in enumerate(sentences[idx].split()):
if token.lower() == word[0]:
pos0 = itr
pos1 = itr + 1
break
id_pos0 = word_to_ix_multi[word[0]]
id_pos1 = word_to_ix_multi[word[1]]
feature.append(float(out[pos0][id_pos0] * out[pos1][id_pos1]))
else:
# Either of the constituent words of the MWE not in vocabulary \
# So cannot predict probability in context
feature.append(0.0)
feature.extend(embedding.tolist())
features.append(feature)
if (idx + 1) % 500 == 0:
print('Prepared features for {} multi target word sentences'.format(idx + 1))
return features
```
#### File: 07kshitij/CS60075-Team-11-Task-1/lcp_shared_task_overall_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy import stats
import spacy
from spacy_syllables import SpacySyllables
from utils import *
from get_features import *
from Models.NeuralNet import NN
from Models.LinearRegression import LinearRegressor
from Models.SVR import SupportVectorRegressor
import sys
USE_SAVED_MODELS = 1
if len(sys.argv) > 1:
USE_SAVED_MODELS = int(sys.argv[1])
if not USE_SAVED_MODELS:
print('\n +++ Training all models from scratch +++ \n')
else:
print('\n +++ Using saved models from TrainedModels/ +++ \n')
# Seed all rngs for deterministic results
seed_all(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Syllable tagger pipeline
nlp = spacy.load("en_core_web_sm")
nlp.add_pipe("syllables", after='tagger')
# Dataset Paths
SINGLE_TRAIN_DATAPATH = "https://raw.githubusercontent.com/MMU-TDMLab/CompLex/master/train/lcp_single_train.tsv"
SINGLE_TEST_DATAPATH = "https://raw.githubusercontent.com/MMU-TDMLab/CompLex/master/test-labels/lcp_single_test.tsv"
MULTI_TRAIN_DATAPATH = "https://raw.githubusercontent.com/MMU-TDMLab/CompLex/master/train/lcp_multi_train.tsv"
MULTI_TEST_DATAPATH = "https://raw.githubusercontent.com/MMU-TDMLab/CompLex/master/test-labels/lcp_multi_test.tsv"
# Obtain the dataFrames
df_train_single, df_test_single, df_train_multi, df_test_multi = get_data_frames(SINGLE_TRAIN_DATAPATH, SINGLE_TEST_DATAPATH, MULTI_TRAIN_DATAPATH, MULTI_TEST_DATAPATH)
single_tokens_train_raw = df_train_single["token"].astype(str).to_list()
single_tokens_test_raw = df_test_single["token"].astype(str).to_list()
y_single_train = df_train_single["complexity"].astype(np.float32).to_numpy()
y_single_test = df_test_single["complexity"].astype(np.float32).to_numpy()
multi_tokens_train_raw = df_train_multi["token"].astype(str).to_list()
multi_tokens_test_raw = df_test_multi["token"].astype(str).to_list()
y_multi_train = df_train_multi["complexity"].astype(np.float32).to_numpy()
y_multi_test = df_test_multi["complexity"].astype(np.float32).to_numpy()
sent_train_single_raw = df_train_single["sentence"].to_list()
sent_test_single_raw = df_test_single["sentence"].to_list()
sent_train_multi_raw = df_train_multi["sentence"].to_list()
sent_test_multi_raw = df_test_multi["sentence"].to_list()
EMBEDDING_DIM = 50
def get_embeddings(EMBEDDING_DIM):
embedding_index = {}
with open('glove.6B.{}d.txt'.format(EMBEDDING_DIM), 'r', encoding='utf-8') as f:
for line in f:
values = line.split()
token = values[0]
embedding_index[token] = np.asarray(values[1:], dtype='float32')
return embedding_index
embedding_index = get_embeddings(EMBEDDING_DIM)
print('\n[Token Count] GloVE embeddings: {}'.format(len(embedding_index)))
word_to_ix, word_to_ix_multi = map_token_to_idx(sent_train_single_raw, sent_train_multi_raw)
print('\n[Vocab size] Single Word: {}\n[Vocab size] Multi Word: {}'.format(len(word_to_ix), len(word_to_ix_multi)))
""" biLSTM to predict target probability
Reference - [PyTorch](https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html)
"""
HIDDEN_DIM = 10
"""biLSTM class to calculate token probability given context"""
class biLSTM(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, output_size):
super(biLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(embedding_dim, hidden_dim, bidirectional=True)
self.hidden2tag = nn.Linear(2 * hidden_dim, output_size)
def prepare_embedding(self, sentence):
embeddings = []
for word in sentence:
word = word.lower()
if word in embedding_index:
embeddings.extend(embedding_index[word])
else:
embeddings.extend(np.random.random(EMBEDDING_DIM).tolist())
embeddings = torch.tensor(embeddings, dtype=torch.float32, device=device)
return embeddings
def forward(self, sentence):
sentence = sentence.split()
embeds = self.prepare_embedding(sentence)
lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.softmax(tag_space, dim=1)
return tag_scores
"""biLSTM model for single word targets"""
model = biLSTM(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(word_to_ix))
# Used while training phase to avoid re training the same model again and again
# To use the saved model, change the False in next cell to True
path_biLSTM_single = './TrainedModels/biLSTM.pt'
USE_PRETRAINED_SINGLE_WORD_TARGET_MODEL = USE_SAVED_MODELS
if USE_PRETRAINED_SINGLE_WORD_TARGET_MODEL:
print('Using pre-trained biLSTM on single target expressions')
model = torch.load(path_biLSTM_single)
model.eval()
else:
print('Training biLSTM on single target expressions')
# Train the model for 10 epochs
model = biLSTM(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(word_to_ix))
loss_function = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
for epoch in range(10):
loss_sum = 0
for sentence in sent_train_single_raw:
model.zero_grad()
targets = prepare_sequence(sentence, word_to_ix)
tag_scores = model(sentence)
loss = loss_function(tag_scores, targets)
loss_sum += loss
loss.backward()
optimizer.step()
print('Epoch: {} Loss: {}'.format(epoch, loss_sum.item()))
"""biLSTM model for multi word targets"""
model_multi = biLSTM(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix_multi), len(word_to_ix_multi))
# Used while training phase to avoid re training the same model again and again
# To use the saved model, change the False in next cell to True
path_biLSTM_multi = './TrainedModels/biLSTM_multi.pt'
USE_PRETRAINED_MULTI_WORD_TARGET_MODEL = USE_SAVED_MODELS
if USE_PRETRAINED_MULTI_WORD_TARGET_MODEL:
print('Using pre-trained biLSTM on multi target expressions')
model_multi = torch.load(path_biLSTM_multi)
model_multi.eval()
else:
print('Training biLSTM on multi target expressions')
model_multi = biLSTM(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix_multi), len(word_to_ix_multi))
loss_function = nn.MSELoss()
optimizer = optim.Adam(model_multi.parameters(), lr=0.01)
for epoch in range(10):
loss_sum = 0
for sentence in sent_train_multi_raw:
model_multi.zero_grad()
targets = prepare_sequence(sentence, word_to_ix_multi)
tag_scores = model_multi(sentence)
loss = loss_function(tag_scores, targets)
loss_sum += loss
loss.backward()
optimizer.step()
print('Epoch: {} Loss: {}'.format(epoch, loss_sum.item()))
print('\n')
print('+++ Generating Train features for Single word expressions +++')
features_train_single = prepare_features_single_word(single_tokens_train_raw, sent_train_single_raw, nlp, word_to_ix, model, embedding_index, EMBEDDING_DIM)
print('+++ [COMPLETE] Feature generation for Train Single word expressions +++')
print('\n')
print('+++ Generating Test features for Single word expressions +++')
features_test_single = prepare_features_single_word(single_tokens_test_raw, sent_test_single_raw, nlp, word_to_ix, model, embedding_index, EMBEDDING_DIM)
print('+++ [COMPLETE] Feature generation for Test Single word expressions +++')
print('\n')
print('+++ Generating Train features for Multi word expressions +++')
features_train_multi = prepare_features_multi_word(multi_tokens_train_raw, sent_train_multi_raw, nlp, word_to_ix_multi, model_multi, embedding_index, EMBEDDING_DIM)
print('+++ [COMPLETE] Feature generation for Train Multi word expressions +++')
print('\n')
print('+++ Generating Test features for Multi word expressions +++')
features_test_multi = prepare_features_multi_word(multi_tokens_test_raw, sent_test_multi_raw, nlp, word_to_ix_multi, model_multi, embedding_index, EMBEDDING_DIM)
print('+++ [COMPLETE] Feature generation for Test Multi word expressions +++')
print('\n')
# Convert all features to torch.tensor to enable use in PyTorch models
X_train_single_tensor = torch.tensor(features_train_single, dtype=torch.float32, device=device)
X_test_single_tensor = torch.tensor(features_test_single, dtype=torch.float32, device=device)
X_train_multi_tensor = torch.tensor(features_train_multi, dtype=torch.float32, device=device)
X_test_multi_tensor = torch.tensor(features_test_multi, dtype=torch.float32, device=device)
# Reshape all output complexity scores to single dimension vectors
y_single_train = y_single_train.reshape(y_single_train.shape[0], -1)
y_single_test = y_single_test.reshape(y_single_test.shape[0], -1)
y_multi_train = y_multi_train.reshape(y_multi_train.shape[0], -1)
y_multi_test = y_multi_test.reshape(y_multi_test.shape[0], -1)
# Convert all target outputs to torch.tensor to enable use in PyTorch models
Y_train_single_tensor = torch.tensor(y_single_train, dtype=torch.float32, device=device)
Y_test_single_tensor = torch.tensor(y_single_test, dtype=torch.float32, device=device)
Y_train_multi_tensor = torch.tensor(y_multi_train, dtype=torch.float32, device=device)
Y_test_multi_tensor = torch.tensor(y_multi_test, dtype=torch.float32, device=device)
# Ensure each sample from test and train for single word expression is taken
output_shape(X_train_single_tensor, X_test_single_tensor, Y_train_single_tensor, Y_test_single_tensor, 0, 0)
# Ensure each sample from test and train for multi word expression is taken
output_shape(X_train_multi_tensor, X_test_multi_tensor, Y_train_multi_tensor, Y_test_multi_tensor, 1, 0)
NUM_EPOCHS = 30
loss_function = nn.MSELoss()
embedding_dim = X_train_single_tensor.shape[1]
model_NN = NN(embedding_dim)
model_NN.to(device)
# Used while training phase to save the best checkpoint (with the best Pearson R on test set)
# To use the saved model, change the False in next cell to True
path_NN = './TrainedModels/NN_0.731.pt'
USE_PRETRAINED_SINGLE_WORD_TARGET_NN = USE_SAVED_MODELS
if USE_PRETRAINED_SINGLE_WORD_TARGET_NN:
print('\n +++ Using pre-trained NN on single target expressions +++')
model_NN = torch.load(path_NN)
model_NN.eval()
else:
print('\n +++ Training NN on single target expressions... +++\n')
model_NN = NN(embedding_dim)
model_NN.to(device)
loss_function = nn.MSELoss()
optimizer = optim.Adam(model_NN.parameters(), lr=0.002)
for epoch in range(NUM_EPOCHS):
optimizer.zero_grad()
out = model_NN(X_train_single_tensor)
loss = loss_function(out, Y_train_single_tensor)
loss.backward()
optimizer.step()
out_test = model_NN(X_test_single_tensor)
testR = compute_pearsonR(out_test, Y_test_single_tensor, device)
trainR = compute_pearsonR(out, Y_train_single_tensor, device)
print("Epoch {} : Train R = {} | Test R = {}".format(epoch + 1, round(trainR, 6), round(testR, 6)))
out_NN = model_NN(X_test_single_tensor)
print('\n +++ Metrics for Single Word Expression using NN +++ \n')
evaluate_metrics(out_NN, Y_test_single_tensor, device)
embedding_dim = X_train_multi_tensor.shape[1]
model_NN_multi = NN(embedding_dim)
model_NN_multi.to(device)
# Used while training phase to save the best checkpoint (with the best Pearson R on test set)
# To use the saved model, change the False in next cell to True
path_NN_multi = './TrainedModels/NN_multi_0.775.pt'
USE_PRETRAINED_MULTI_WORD_TARGET_NN = USE_SAVED_MODELS
if USE_PRETRAINED_MULTI_WORD_TARGET_NN:
print('\n +++ Using pre-trained NN on multi target expressions +++')
model_NN_multi = torch.load(path_NN_multi)
model_NN_multi.eval()
else:
print('\n +++ Training NN on multi target expressions... +++\n')
model_NN_multi = NN(embedding_dim)
model_NN_multi.to(device)
loss_function = nn.MSELoss()
optimizer = optim.Adam(model_NN_multi.parameters(), lr=0.002)
for epoch in range(NUM_EPOCHS):
optimizer.zero_grad()
out = model_NN_multi(X_train_multi_tensor)
loss = loss_function(out, Y_train_multi_tensor)
loss.backward()
optimizer.step()
out_test = model_NN_multi(X_test_multi_tensor)
testR = compute_pearsonR(out_test, Y_test_multi_tensor, device)
trainR = compute_pearsonR(out, Y_train_multi_tensor, device)
print("Epoch {} : Train R = {} | Test R = {}".format(epoch + 1, round(trainR, 6), round(testR, 6)))
out_NN_multi = model_NN_multi(X_test_multi_tensor)
print('\n +++ Metrics for Multi Word Expression using NN +++ \n')
evaluate_metrics(out_NN_multi, Y_test_multi_tensor, device)
""" Machine Learning Methods """
X_train_single_np = np.array(features_train_single)
X_test_single_np = np.array(features_test_single)
Y_train_single_np = np.array(y_single_train.reshape(y_single_train.shape[0], -1))
Y_test_single_np = np.array(y_single_test.reshape(y_single_test.shape[0], -1))
print('\n')
output_shape(X_train_single_np, X_test_single_np, Y_train_single_np, Y_test_single_np, 0, 1)
X_train_multi_np = np.array(features_train_multi)
X_test_multi_np = np.array(features_test_multi)
Y_train_multi_np = np.array(y_multi_train.reshape(y_multi_train.shape[0], -1))
Y_test_multi_np = np.array(y_multi_test.reshape(y_multi_test.shape[0], -1))
output_shape(X_train_multi_np, X_test_multi_np, Y_train_multi_np, Y_test_multi_np, 1, 1)
""" Linear Regression """
LR = LinearRegressor()
print('\n +++ Metrics for Single Word Expression using Linear Regression +++ \n')
out_LR = LR.forward(X_train_single_np, Y_train_single_np, X_test_single_np, Y_test_single_np)
evaluate_metrics(out_LR, Y_test_single_np, device)
print('\n +++ Metrics for Multi Word Expression using Linear Regression +++ \n')
out_LR_multi = LR.forward(X_train_multi_np, Y_train_multi_np, X_test_multi_np, Y_test_multi_np)
evaluate_metrics(out_LR_multi, Y_test_multi_np, device)
""" Support Vector Regressor """
svr = SupportVectorRegressor()
print('\n +++ Metrics for Single Word Expression using SVR +++ \n')
out_svr = svr.forward(X_train_single_np, Y_train_single_np, X_test_single_np, Y_test_single_np)
evaluate_metrics(out_svr, Y_test_single_np, device)
print('\n +++ Metrics for Multi Word Expression using SVR +++ \n')
out_svr_multi = svr.forward(X_train_multi_np, Y_train_multi_np, X_test_multi_np, Y_test_multi_np)
evaluate_metrics(out_svr_multi, Y_test_multi_np, device)
single_ids = df_test_single["id"].astype(str).to_list()
multi_ids = df_test_multi["id"].astype(str).to_list()
"""Aggregation of results obtained"""
out_ensemble = []
for idx in range(len(out_NN)):
score = 0
score += float(out_NN[idx])
score += float(out_LR[idx])
score += float(out_svr[idx])
score /= 3
out_ensemble.append(score)
out_ensemble = np.array(out_ensemble)
out_ensemble = out_ensemble.reshape((out_ensemble.shape[0], 1))
print('\n +++ Metrics for Single Word Expression using Overall Model +++ \n')
evaluate_metrics(out_ensemble, Y_test_single_np, device)
out_ensemble_multi = []
for idx in range(len(out_NN_multi)):
score = 0
score += float(out_NN_multi[idx])
score += float(out_LR_multi[idx])
score += float(out_svr_multi[idx])
score /= 3
out_ensemble_multi.append(score)
out_ensemble_multi = np.array(out_ensemble_multi)
out_ensemble_multi = out_ensemble_multi.reshape((out_ensemble_multi.shape[0], 1))
print('\n +++ Metrics for Multi Word Expression using Overall Model +++ \n')
evaluate_metrics(out_ensemble_multi, Y_test_multi_np, device)
output_results(single_ids, multi_ids, out_ensemble, out_ensemble_multi)
```
#### File: CS60075-Team-11-Task-1/Models/LinearRegression.py
```python
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
class LinearRegressor():
def __init__(self):
super(LinearRegressor, self).__init__()
def forward(self, X_train, Y_train, X_test, Y_test):
reg = make_pipeline(StandardScaler(), LinearRegression())
reg.fit(X_train, Y_train)
out = reg.predict(X_test)
out = out.reshape((out.shape[0], 1))
return out
``` |
{
"source": "07kshitij/Information-Retrieval",
"score": 3
} |
#### File: Information-Retrieval/Assignment-1/ASSIGNMENT1_17EC10063_1.py
```python
import requests
import time
import os
from bs4 import BeautifulSoup
DATA_FOLDER = 'Data'
DEBUG = False
class HTMLExtractor:
def __init__(self):
return
''' Extract a specific page hosted on 'url' and write the contents of it to the article_name.html file '''
def grab_page(self, url, article_name):
if DEBUG:
print("Attempting to get page: " + url)
page = requests.get(url)
page_html = page.text
soup = BeautifulSoup(page_html, 'html.parser')
content = soup.find("div", {"class": "sa-art article-width"})
article_name = article_name.replace('/', '-')
article_name = article_name.strip('-')
if content != None:
filename = '{}'.format(article_name)
file = open(os.path.join(
DATA_FOLDER, filename.lower() + ".html"), 'w')
file.write(str(content))
file.close()
if DEBUG:
print("Successfully Saved")
''' Extract the list of arcticles on a given page and extract each of them sequentially '''
def process_list_page(self, page):
origin_page = "https://seekingalpha.com/earnings/earnings-call-transcripts" + \
"/" + str(page)
if DEBUG:
print("Getting page {}".format(origin_page))
page = requests.get(origin_page)
page_html = page.text
soup = BeautifulSoup(page_html, 'html.parser')
article_list = soup.find_all(
"li", {'class': 'list-group-item article'})
if DEBUG:
print("The page returned {} articles".format(len(article_list)))
for article in range(0, len(article_list)):
page_url = article_list[article].find_all("a")[0].attrs['href']
url = "https://seekingalpha.com" + page_url
self.grab_page(url, page_url)
time.sleep(1)
if __name__ == "__main__":
if not os.path.exists(DATA_FOLDER):
os.makedirs(DATA_FOLDER)
htmlExtractor = HTMLExtractor()
for page in range(1, 2):
htmlExtractor.process_list_page(page)
```
#### File: Information-Retrieval/Assignment-1/ASSIGNMENT1_17EC10063_3.py
```python
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import os
import string
import json
import time
DEBUG = False
DATA_PATH = './ECTText/'
inverted_index = dict()
lemmatizer = WordNetLemmatizer()
stopwords = stopwords.words('english')
''' Sorting function to sort input files in lexicographically increasing order '''
def sortKey(s):
return int(s.split('.')[0])
files = os.listdir(DATA_PATH)
files.sort(key=sortKey)
''' Extract the tokens after stopwords | punctuation removal followed by lemmatization
and build the inverted_index '''
def build_inverted_index():
file_num = 0
for file in files:
with open(os.path.join(DATA_PATH, file), 'r', encoding='utf-8') as ECTText:
text = ECTText.read().replace('\n', ' ').lower().strip()
position = 0
for token in word_tokenize(text):
# Remove stop words & punctuation marks
if token not in stopwords and token not in string.punctuation:
lemma = lemmatizer.lemmatize(token)
try:
inverted_index[lemma].append((file_num, position))
except KeyError:
inverted_index[lemma] = [(file_num, position)]
position = position + 1
file_num += 1
if DEBUG and file_num % 100 == 0:
print("Tokenization - Steps done: {} | Tokens found: {}".format(
file_num, len(inverted_index.keys())))
if DEBUG:
print("Total number of tokens: {}".format(len(inverted_index.keys())))
with open('inverted_index.json', 'w') as inv_idx:
json.dump(inverted_index, inv_idx)
if __name__ == "__main__":
start_time = time.time()
build_inverted_index()
if DEBUG:
print("--- %s seconds ---" % (time.time() - start_time))
```
#### File: Information-Retrieval/Assignment-3/17EC10063_1.py
```python
import sys
import os
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.feature_selection import mutual_info_classif, SelectKBest
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords, wordnet
from nltk.stem import WordNetLemmatizer
import numpy as np
from string import punctuation
from sklearn.metrics import f1_score
stopwords = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
DEBUG = True
"""
Naive Bayes classifier
Extracts tokens, computes feature matrices and trains & tests the Multinomial and Bernoulli NB classifiers
"""
class Naive_Bayes:
def __init__(self, data_path, out_file):
self.data_path = data_path
self.out_file = out_file
self.tokens = []
self.feature_idx_map = {}
self.X_train = []
self.y_train = []
self.X_test = []
self.y_test = []
""" Computes the feature matrix of size (n_samples, n_features) """
def generate_feature_matrix(self):
classes = os.listdir(self.data_path)
for className in classes:
class_path = os.path.join(self.data_path, className)
if className == "class1" or className == "class2":
classID = int(className[-1])
data_folders = os.listdir(class_path)
for data_folder in data_folders:
data_path = os.path.join(class_path, data_folder)
files = os.listdir(data_path)
files.sort(key=lambda x: int(x))
X = []
y = []
for file_ in files:
text = open(os.path.join(data_path, file_), errors="replace").read()
text = text.lower()
feature_vector = [0] * len(self.tokens)
for _ in punctuation:
text = text.replace(_, " ")
text = text.replace(" ", " ")
for token in word_tokenize(text):
if token not in stopwords:
token = lemmatizer.lemmatize(token)
try:
pos = self.feature_idx_map[token]
feature_vector[pos] += 1
except KeyError:
pass
X.append(feature_vector)
y.append(classID)
if data_folder == "train":
self.X_train.extend(X)
self.y_train.extend(y)
else:
self.X_test.extend(X)
self.y_test.extend(y)
if DEBUG:
print("Construction of feature matrix complete")
self.X_train = np.array(self.X_train)
self.y_train = np.array(self.y_train)
self.X_test = np.array(self.X_test)
self.y_test = np.array(self.y_test)
""" Maps features (tokens) to integers """
def create_feature_map(self):
for pos, token in enumerate(self.tokens):
self.feature_idx_map[token] = pos
""" Reads the dataset folder"""
def read_dataset(self):
classes = os.listdir(self.data_path)
for className in classes:
class_path = os.path.join(self.data_path, className)
if className == "class1":
self.tokens.extend(self.read_class(class_path))
if className == "class2":
self.tokens.extend(self.read_class(class_path))
self.tokens = list(set(self.tokens))
self.tokens.sort()
if DEBUG:
print("Total Features: {}".format(len(self.tokens)))
""" Reads data files for each class (class1 and class2)"""
def read_class(self, class_path):
data_folders = os.listdir(class_path)
for data_folder in data_folders:
data_path = os.path.join(class_path, data_folder)
if data_folder == "train":
return self.process_data(data_path)
""" Computes tokens from the texts in all files pointed by 'data_path' """
@staticmethod
def process_data(data_path):
files = os.listdir(data_path)
features = []
cache = {}
files.sort(key=lambda x: int(x))
for file_ in files:
text = open(os.path.join(data_path, file_), errors="replace").read()
text = text.lower()
for _ in punctuation:
text = text.replace(_, " ")
text = text.replace(" ", " ")
for token in word_tokenize(text):
if token not in stopwords:
token = lemmatizer.lemmatize(token)
if token not in cache.keys():
features.append(token)
cache[token] = 1
return features
""" Fits MultinomialNB on X_train and predicts on X_test """
def fit_MultinomialNB(self, X_train, X_test):
multinomialNB = MultinomialNB()
multinomialNB.fit(X_train, self.y_train)
y_predict = multinomialNB.predict(X_test)
score = f1_score(self.y_test, y_predict, average="macro")
return score
""" Fits BernoulliNB on X_train and predicts on X_test """
def fit_BernoulliNB(self, X_train, X_test):
bernoulliNB = BernoulliNB()
bernoulliNB.fit(X_train, self.y_train)
y_predict = bernoulliNB.predict(X_test)
score = f1_score(self.y_test, y_predict, average="macro")
return score
""" Runs both NB classifiers on different values of top features """
def run_NB(self, out_file, top_features_count):
multinomialNB_scores = []
bernoulliNB_scores = []
for count in top_features_count:
if DEBUG:
print("Computing results for x = {}".format(count))
top_features = SelectKBest(mutual_info_classif, k=count)
X_train = top_features.fit_transform(self.X_train, self.y_train)
X_test = top_features.transform(self.X_test)
multinomialNB_score = self.fit_MultinomialNB(X_train, X_test)
bernoulliNB_score = self.fit_BernoulliNB(X_train, X_test)
multinomialNB_scores.append(multinomialNB_score)
bernoulliNB_scores.append(bernoulliNB_score)
self.write_answer(out_file, top_features_count, multinomialNB_scores, bernoulliNB_scores)
""" Writes answer to the output file """
def write_answer(self, out_file, top_features_count, multinomialNB_scores, bernoulliNB_scores):
result_file = open(out_file, "w", encoding="utf-8")
result_file.write("NumFeature ")
for count in top_features_count:
result_file.write("{} ".format(count))
result_file.write("\nMultinomialNB")
for pos in range(len(top_features_count)):
result_file.write(" {0:.6f}".format(multinomialNB_scores[pos]))
result_file.write("\nBernoulliNB ")
for pos in range(len(top_features_count)):
result_file.write(" {0:.6f}".format(bernoulliNB_scores[pos]))
result_file.close()
if __name__ == "__main__":
data_path, out_file = sys.argv[1], sys.argv[2]
top_features_count = [1, 10, 100, 1000, 10000]
NB = Naive_Bayes(data_path, out_file)
NB.read_dataset()
NB.create_feature_map()
NB.generate_feature_matrix()
NB.run_NB(out_file, top_features_count)
``` |
{
"source": "07kshitij/Social-Computing",
"score": 3
} |
#### File: Social-Computing/A2/analyze_centrality.py
```python
import os
import snap
DATA_PATH = 'facebook_combined.txt'
ROOT_PATH = 'centralities'
CLOSENESS_FILE = 'closeness.txt'
BETWEENNESS_FILE = 'betweenness.txt'
PAGERANK_FILE = 'pagerank.txt'
''' Class computing the centrality metrics using SNAP library functions '''
class Analyse_Centrality:
def __init__(self):
self.graph = self.load_graph()
''' Load 'facebook_combined.txt' to a SNAP graph structure '''
def load_graph(self):
graph = snap.LoadEdgeList(snap.PUNGraph, DATA_PATH, 0, 1)
return graph
''' Compute the closeness centrality values for all nodes and
compare with the previous implementation '''
def closeness_centrality(self):
res = dict()
for node in self.graph.Nodes():
closeness = snap.GetClosenessCentr(self.graph, node.GetId())
res[node.GetId()] = closeness
order = sorted(res.items(), key=lambda item: item[1], reverse=True)
order = order[:100]
snap_nodes = [node[0] for node in order]
self_nodes = []
with open(os.path.join(ROOT_PATH, CLOSENESS_FILE), 'r') as readFile:
for line in readFile.readlines():
if len(self_nodes) == len(snap_nodes):
break
u, v = line.split()
self_nodes.append(int(u))
overlap = len(set(snap_nodes).intersection(self_nodes))
print('#overlaps for Closeness Centrality: {}'.format(overlap))
''' Compute the betweenness centrality values for all nodes and
compare with the previous implementation '''
def betweenness_centrality(self):
Nodes = snap.TIntFltH()
Edges = snap.TIntPrFltH()
snap.GetBetweennessCentr(self.graph, Nodes, Edges, 0.8)
res = dict()
for node in Nodes:
res[node] = Nodes[node]
order = sorted(res.items(), key=lambda item: item[1], reverse=True)
order = order[:100]
snap_nodes = [node[0] for node in order]
self_nodes = []
with open(os.path.join(ROOT_PATH, BETWEENNESS_FILE), 'r') as readFile:
for line in readFile.readlines():
if len(self_nodes) == len(snap_nodes):
break
u, v = line.split()
self_nodes.append(int(u))
overlap = len(set(snap_nodes).intersection(self_nodes))
print('#overlaps for Betweenness Centrality: {}'.format(overlap))
''' Compute the pageRank values for all nodes and compare with the previous implementation '''
def pagerank(self):
pageRank = snap.TIntFltH()
snap.GetPageRank(self.graph, pageRank)
res = dict()
for node in pageRank:
res[node] = pageRank[node]
order = sorted(res.items(), key=lambda item: item[1], reverse=True)
order = order[:100]
snap_nodes = [node[0] for node in order]
self_nodes = []
with open(os.path.join(ROOT_PATH, PAGERANK_FILE), 'r') as readFile:
for line in readFile.readlines():
if len(self_nodes) == len(snap_nodes):
break
u, v = line.split()
self_nodes.append(int(u))
overlap = len(set(snap_nodes).intersection(self_nodes))
print('#overlaps for PageRank Centrality: {}'.format(overlap))
if __name__ == "__main__":
analyse_centrality = Analyse_Centrality()
analyse_centrality.closeness_centrality()
analyse_centrality.betweenness_centrality()
analyse_centrality.pagerank()
``` |
{
"source": "080HACKER/FB-COVID-19",
"score": 2
} |
#### File: FB-COVID-19/tool/run.py
```python
import os,sys,time
os.system('clear')
def babi(nob):
for e in nob:
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.1)
babi('<NAME> MY NAME IS <NAME>')
print
babi('YOU WELCOME TO VISIT OUR TOOL AND YOU DONT FORGET')
print
babi('FRIENDS THIS TOOL IT WAS CREATED BY MiSetya And Update With 080Hacker')
print
babi('YOU MAKE SURE KNOW IS THIS TOOL IT IS REALLY WORKING')
print
babi('BUT THE TOOL IT HAS KEY WHICH YOU REALLY NEED TO FIND')
print
print
babi('TO GET THE TOOL KEY CONTACT US VIA WHATSAPP WITH THIS NUMBER +2349069464271 >_<')
print
print
babi('YOU CAN ALSO CONTACT MISETYA @<EMAIL>.misetya...')
print ""
babi('AND YOU DO NOT FORGET TO GIVE US STAR ON GITHUB AFTER LOGIN THANKS A LOT')
babi('...............')
os.system('sh login.sh')
exit()
```
#### File: FB-COVID-19/tool/sholat.py
```python
import sys,os, random
import subprocess as sp
import requests
from time import sleep
from time import strftime as tm
from requests import get
from bs4 import BeautifulSoup as bs
from threading import Thread
##############
# color
lgr='\033[90m'
lr= '\033[91m'
lg= '\033[92m'
lw= '\033[97m'
x = '\033[0m'
#ngambil jadwal hari ini
def gettime():
print(lg+'Updating schedule..')
try:
ts = open('.cookie/ts','r').read()
except IOError:
gettown()
ts= open('.cookie/ts','r').read()
if len(ts) == 0:
ts = '83'
try:
r = get('https://www.jadwalsholat.org/adzan/monthly.php?id='+ts)
except requests.exceptions.ConnectionError:
print(lg+'\ nAstaghfirullah .. \ nUkhty forgot to turn on the network'+x)
input(lg+'\ nJust entering')
menu()
b = bs(r.text,'html.parser')
tr= b.find('tr',class_="table_highlight")
with open('.cookie/sc','w') as sc:
kota = b.find('option', attrs={'value':ts})
i= tr.find_all('td')
sc.write(i[0].text+','+i[1].text+','+i[2].text+','+i[5].text+','+i[6].text+','+i[7].text+','+i[8].text+','+kota.text)
sc.close()
def gettown():
print(
lg+"""1. """+lw+"""Ambarawa """+lg+"""78. """+lw+"""Gombong """+lg+"""155. """+lw+"""Mentok """+lg+"""232. """+lw+"""Selong"""+
lg+"""\n2. """+lw+"""Ambon """+lg+"""79. """+lw+"""Gorontalo """+lg+"""163. """+lw+"""Merauke """+lg+"""233. """+lw+"""Semarang"""+
lg+"""\n3. """+lw+"""Amlapura """+lg+"""80. """+lw+"""Gresik """+lg+"""157. """+lw+"""Metro """+lg+"""234. """+lw+"""Sengkang"""+
lg+"""\n4. """+lw+"""Amuntai """+lg+"""81. """+lw+"""Gunung Sit """+lg+"""158. """+lw+"""Meulaboh """+lg+"""235. """+lw+"""Serang"""+
lg+"""\n5. """+lw+"""Argamakmur """+lg+"""82. """+lw+"""Indramayu """+lg+"""159. """+lw+"""Mojokerto """+lg+"""236. """+lw+"""Serui"""+
lg+"""\n6. """+lw+"""Atambua """+lg+"""83. """+lw+"""Jakarta """+lg+"""160. """+lw+"""Muara Buli """+lg+"""237. """+lw+"""Sibolga"""+
lg+"""\n7. """+lw+"""Babo """+lg+"""84. """+lw+"""Jambi """+lg+"""161. """+lw+"""Muara Bung """+lg+"""238. """+lw+"""Sidikalang"""+
lg+"""\n8. """+lw+"""Bagan Siap """+lg+"""85. """+lw+"""Jayapura """+lg+"""162. """+lw+"""Muara Enim """+lg+"""239. """+lw+"""Sidoarjo"""+
lg+"""\n9. """+lw+"""Bajawa """+lg+"""86. """+lw+"""Jember """+lg+"""163. """+lw+"""Muara Tewe """+lg+"""240. """+lw+"""Sigli"""+
lg+"""\n10. """+lw+"""Balige """+lg+"""87. """+lw+"""Jeneponto """+lg+"""164. """+lw+"""Muaro Siju """+lg+"""241. """+lw+"""Singaparna"""+
lg+"""\n11. """+lw+"""Balik Papa """+lg+"""88. """+lw+"""Jepara """+lg+"""165. """+lw+"""Muntilan """+lg+"""242. """+lw+"""Singaraja"""+
lg+"""\n12. """+lw+"""Banda Aceh """+lg+"""89. """+lw+"""Jombang """+lg+"""166. """+lw+"""Nabire """+lg+"""243. """+lw+"""Singkawang"""+
lg+"""\n13. """+lw+"""Bandarlamp """+lg+"""90. """+lw+"""Kabanjahe """+lg+"""167. """+lw+"""Negara """+lg+"""244. """+lw+"""Sinjai"""+
lg+"""\n14. """+lw+"""Bandung """+lg+"""91. """+lw+"""Kalabahi """+lg+"""168. """+lw+"""Nganjuk """+lg+"""245. """+lw+"""Sintang"""+
lg+"""\n15. """+lw+"""Bangkalan """+lg+"""92. """+lw+"""Kalianda """+lg+"""169. """+lw+"""Ngawi """+lg+"""246. """+lw+"""Situbondo"""+
lg+"""\n16. """+lw+"""Bangkinang """+lg+"""93. """+lw+"""Kandangan """+lg+"""170. """+lw+"""Nunukan """+lg+"""247. """+lw+"""Slawi"""+
lg+"""\n17. """+lw+"""Bangko """+lg+"""94. """+lw+"""Karanganya """+lg+"""171. """+lw+"""Pacitan """+lg+"""248. """+lw+"""Sleman"""+
lg+"""\n18. """+lw+"""Bangli """+lg+"""95. """+lw+"""Karawang """+lg+"""172. """+lw+"""Padang """+lg+"""249. """+lw+"""Soasiu"""+
lg+"""\n19. """+lw+"""Banjar """+lg+"""96. """+lw+"""Kasungan """+lg+"""173. """+lw+"""Padang Pan """+lg+"""250. """+lw+"""Soe"""+
lg+"""\n20. """+lw+"""Banjar Bar """+lg+"""97. """+lw+"""Kayuagung """+lg+"""174. """+lw+"""Padang Sid """+lg+"""251. """+lw+"""Solo"""+
lg+"""\n21. """+lw+"""Banjarmasi """+lg+"""98 . """+lw+"""Kebumen """+lg+"""175. """+lw+"""Pagaralam """+lg+"""252. """+lw+"""Solok"""+
lg+"""\n22. """+lw+"""Banjarnega """+lg+"""99. """+lw+"""Kediri """+lg+"""176. """+lw+"""Painan """+lg+"""253. """+lw+"""Soreang"""+
lg+"""\n23. """+lw+"""Bantaeng """+lg+"""100. """+lw+"""Kefamenanu """+lg+"""177. """+lw+"""Palangkara """+lg+"""254. """+lw+"""Sorong"""+
lg+"""\n24. """+lw+"""Banten """+lg+"""101. """+lw+"""Kendal """+lg+"""178. """+lw+"""Palembang """+lg+"""255. """+lw+"""Sragen"""+
lg+"""\n25. """+lw+"""Bantul """+lg+"""102. """+lw+"""Kendari """+lg+"""179. """+lw+"""Palopo """+lg+"""263. """+lw+"""Stabat"""+
lg+"""\n26. """+lw+"""Banyuwangi """+lg+"""103. """+lw+"""Kertosono """+lg+"""180. """+lw+"""Palu """+lg+"""257. """+lw+"""Subang"""+
lg+"""\n27. """+lw+"""Barabai """+lg+"""104. """+lw+"""Ketapang """+lg+"""181. """+lw+"""Pamekasan """+lg+"""258. """+lw+"""Sukabumi"""+
lg+"""\n28. """+lw+"""Barito """+lg+"""105. """+lw+"""Kisaran """+lg+"""182. """+lw+"""Pandeglang """+lg+"""259. """+lw+"""Sukoharjo"""+
lg+"""\n29. """+lw+"""Barru """+lg+"""106. """+lw+"""Klaten """+lg+"""183. """+lw+"""Pangkajene """+lg+"""260. """+lw+"""Sumbawa Be"""+
lg+"""\n30. """+lw+"""Batam """+lg+"""107. """+lw+"""Kolaka """+lg+"""184. """+lw+"""Pangkajene """+lg+"""261. """+lw+"""Sumedang"""+
lg+"""\n31. """+lw+"""Batang """+lg+"""108. """+lw+"""Kota Baru """+lg+"""185. """+lw+"""Pangkalanb """+lg+"""262. """+lw+"""Sumenep"""+
lg+"""\n32. """+lw+"""Batu """+lg+"""109. """+lw+"""Kota Bumi """+lg+"""186. """+lw+"""Pangkalpin """+lg+"""263. """+lw+"""Sungai Lia"""+
lg+"""\n33. """+lw+"""Baturaja """+lg+"""110. """+lw+"""Kota Janth """+lg+"""187. """+lw+"""Panyabunga """+lg+"""264. """+lw+"""Sungai Pen"""+
lg+"""\n34. """+lw+"""Batusangka """+lg+"""111. """+lw+"""Kota Mobag """+lg+"""188. """+lw+"""Pare """+lg+"""265. """+lw+"""Sunggumina"""+
lg+"""\n35. """+lw+"""Baubau """+lg+"""112. """+lw+"""Kuala Kapu """+lg+"""189. """+lw+"""Parepare """+lg+"""266. """+lw+"""Surabaya"""+
lg+"""\n36. """+lw+"""Bekasi """+lg+"""113. """+lw+"""Kuala Kuru """+lg+"""190. """+lw+"""Pariaman """+lg+"""267. """+lw+"""Surakarta"""+
lg+"""\n37. """+lw+"""Bengkalis """+lg+"""114. """+lw+"""Kuala Pemb """+lg+"""191. """+lw+"""Pasuruan """+lg+"""268. """+lw+"""Tabanan"""+
lg+"""\n38. """+lw+"""Bengkulu """+lg+"""115. """+lw+"""Kuala Tung """+lg+"""192. """+lw+"""Pati """+lg+"""269. """+lw+"""Tahuna"""+
lg+"""\n39. """+lw+"""Benteng """+lg+"""116. """+lw+"""Kudus """+lg+"""193. """+lw+"""Payakumbuh """+lg+"""270. """+lw+"""Takalar"""+
lg+"""\n40. """+lw+"""Biak """+lg+"""117. """+lw+"""Kuningan """+lg+"""194. """+lw+"""Pekalongan """+lg+"""271. """+lw+"""Takengon"""+
lg+"""\n41. """+lw+"""Bima """+lg+"""118. """+lw+"""Kupang """+lg+"""195. """+lw+"""Pekan Baru """+lg+"""272. """+lw+"""Tamiang La"""+
lg+"""\n42. """+lw+"""Binjai """+lg+"""119. """+lw+"""Kutacane """+lg+"""196. """+lw+"""Pemalang """+lg+"""273. """+lw+"""Tanah Grog"""+
lg+"""\n43. """+lw+"""Bireuen """+lg+"""120. """+lw+"""Kutoarjo """+lg+"""197. """+lw+"""Pematangsi """+lg+"""274. """+lw+"""Tangerang"""+
lg+"""\n44. """+lw+"""Bitung """+lg+"""121. """+lw+"""Labuhan """+lg+"""198. """+lw+"""Pendopo """+lg+"""275. """+lw+"""Tanjung Ba"""+
lg+"""\n45. """+lw+"""Blitar """+lg+"""122. """+lw+"""Lahat """+lg+"""199. """+lw+"""Pinrang """+lg+"""276. """+lw+"""Tanjung En"""+
lg+"""\n46. """+lw+"""Blora """+lg+"""123. """+lw+"""Lamongan """+lg+"""200. """+lw+"""Pleihari """+lg+"""277. """+lw+"""Tanjung Pa"""+
lg+"""\n47. """+lw+"""Bogor """+lg+"""124. """+lw+"""Langsa """+lg+"""201. """+lw+"""Polewali """+lg+"""278. """+lw+"""Tanjung Pi"""+
lg+"""\n48. """+lw+"""Bojonegoro """+lg+"""125. """+lw+"""Larantuka """+lg+"""202. """+lw+"""Pondok Ged """+lg+"""279. """+lw+"""Tanjung Re"""+
lg+"""\n49. """+lw+"""Bondowoso """+lg+"""126. """+lw+"""Lawang """+lg+"""203. """+lw+"""Ponorogo """+lg+"""280. """+lw+"""Tanjung Se"""+
lg+"""\n50. """+lw+"""Bontang """+lg+"""127. """+lw+"""Lhoseumawe """+lg+"""204. """+lw+"""Pontianak """+lg+"""281. """+lw+"""Tapak Tuan"""+
lg+"""\n51. """+lw+"""Boyolali """+lg+"""128. """+lw+"""Limboto """+lg+"""205. """+lw+"""Poso """+lg+"""282. """+lw+"""Tarakan"""+
lg+"""\n52. """+lw+"""Brebes """+lg+"""129. """+lw+"""Lubuk Basu """+lg+"""206. """+lw+"""Prabumulih """+lg+"""283. """+lw+"""Tarutung"""+
lg+"""\n53. """+lw+"""Bukit Ting """+lg+"""130. """+lw+"""Lubuk Ling """+lg+"""207. """+lw+"""Praya """+lg+"""284. """+lw+"""Tasikmalay"""+
lg+"""\n54. """+lw+"""Bulukumba """+lg+"""131. """+lw+"""Lubuk Paka """+lg+"""208. """+lw+"""Probolingg """+lg+"""285. """+lw+"""Tebing Tin"""+
lg+"""\n55. """+lw+"""Buntok """+lg+"""132. """+lw+"""Lubuk Sika """+lg+"""209. """+lw+"""Purbalingg """+lg+"""286. """+lw+"""Tegal"""+
lg+"""\n63. """+lw+"""Cepu """+lg+"""133. """+lw+"""Lumajang """+lg+"""210. """+lw+"""Purukcahu """+lg+"""287. """+lw+"""Temanggung"""+
lg+"""\n57. """+lw+"""Ciamis """+lg+"""134. """+lw+"""Luwuk """+lg+"""211. """+lw+"""Purwakarta """+lg+"""288. """+lw+"""Tembilahan"""+
lg+"""\n58. """+lw+"""Cianjur """+lg+"""135. """+lw+"""Madiun """+lg+"""212. """+lw+"""Purwodadig """+lg+"""289. """+lw+"""Tenggarong"""+
lg+"""\n59. """+lw+"""Cibinong """+lg+"""136. """+lw+"""Magelang """+lg+"""213. """+lw+"""Purwokerto """+lg+"""290. """+lw+"""Ternate"""+
lg+"""\n60. """+lw+"""Cilacap """+lg+"""137. """+lw+"""Magetan """+lg+"""214. """+lw+"""Purworejo """+lg+"""291. """+lw+"""Tolitoli"""+
lg+"""\n61. """+lw+"""Cilegon """+lg+"""138. """+lw+"""Majalengka """+lg+"""215. """+lw+"""Putussibau """+lg+"""292. """+lw+"""Tondano"""+
lg+"""\n62. """+lw+"""Cimahi """+lg+"""139. """+lw+"""Majene """+lg+"""216. """+lw+"""Raha """+lg+"""293. """+lw+"""Trenggalek"""+
lg+"""\n63. """+lw+"""Cirebon """+lg+"""140. """+lw+"""Makale """+lg+"""217. """+lw+"""Rangkasbit """+lg+"""294. """+lw+"""Tual"""+
lg+"""\n64. """+lw+"""Curup """+lg+"""141. """+lw+"""Makassar """+lg+"""218. """+lw+"""Rantau """+lg+"""295. """+lw+"""Tuban"""+
lg+"""\n65. """+lw+"""Demak """+lg+"""142. """+lw+"""Malang """+lg+"""219. """+lw+"""Rantauprap """+lg+"""296. """+lw+"""Tulung Agu"""+
lg+"""\n66. """+lw+"""Denpasar """+lg+"""143. """+lw+"""Mamuju """+lg+"""220. """+lw+"""Rantepao """+lg+"""297. """+lw+"""Ujung Beru"""+
lg+"""\n67. """+lw+"""Depok """+lg+"""144. """+lw+"""Manna """+lg+"""221. """+lw+"""Rembang """+lg+"""298. """+lw+"""Ungaran"""+
lg+"""\n68. """+lw+"""Dili """+lg+"""145. """+lw+"""Manokwari """+lg+"""222. """+lw+"""Rengat """+lg+"""299. """+lw+"""Waikabubak"""+
lg+"""\n69. """+lw+"""Dompu """+lg+"""146. """+lw+"""Marabahan """+lg+"""223. """+lw+"""Ruteng """+lg+"""300. """+lw+"""Waingapu"""+
lg+"""\n70. """+lw+"""Donggala """+lg+"""147. """+lw+"""Maros """+lg+"""224. """+lw+"""Sabang """+lg+"""301. """+lw+"""Wamena"""+
lg+"""\n71. """+lw+"""Dumai """+lg+"""148. """+lw+"""Martapura """+lg+"""225. """+lw+"""Salatiga """+lg+"""302. """+lw+"""Watampone"""+
lg+"""\n72. """+lw+"""Ende """+lg+"""149. """+lw+"""Masohi """+lg+"""226. """+lw+"""Samarinda """+lg+"""303. """+lw+"""Watansoppe"""+
lg+"""\n73. """+lw+"""Enggano """+lg+"""150. """+lw+"""Mataram """+lg+"""227. """+lw+"""Sampang """+lg+"""304. """+lw+"""Wates"""+
lg+"""\n74. """+lw+"""Enrekang """+lg+"""151. """+lw+"""Maumere """+lg+"""228. """+lw+"""Sampit """+lg+"""305. """+lw+"""Wonogiri"""+
lg+"""\n75. """+lw+"""Fakfak """+lg+"""152. """+lw+"""Medan """+lg+"""229. """+lw+"""Sanggau """+lg+"""306. """+lw+"""Wonosari"""+
lg+"""\n76. """+lw+"""Garut """+lg+"""153. """+lw+"""Mempawah """+lg+"""230. """+lw+"""Sawahlunto """+lg+"""307. """+lw+"""Wonosobo"""+
lg+"""\n77. """+lw+"""Gianyar """+lg+"""154. """+lw+"""Menado """+lg+"""231. """+lw+"""Sekayu """+lg+"""308. """+lw+"""Yogyakarta""")
print(lg+'_'*63)
inp = input(lg+'Pilih kota Anda:'+x)
if int(inp) <= 82:
pass
elif int(inp) > 83 and int(inp) <= 204:
inp = str(int(inp)-1)
elif int(inp) >= 205:
inp = str(int(inp)-1)
else:
inp = '308'
ts = open('.cookie/ts','w')
ts.write(inp)
ts.close()
gettime()
# input
def start():
global s,d,a,m,i,tt,o,im,saur
try:
banner()
try:
o = open('.cookie/sc','r').read()
except IOError:
gettime()
o = open('.cookie/sc','r').read()
o = o.split(',')
if o[0] != tm('%d'):
gettime()
im= int(o[1].replace(':',''))
s = int(o[2].replace(':',''))
d = int(o[3].replace(':',''))
a = int(o[4].replace(':',''))
m = int(o[5].replace(':',''))
i = int(o[6].replace(':',''))
tt = int(tm('%H%M'))
saur = im - 100
if tt > s and tt < d:
ss = 'sholat Dzuhur'
elif tt > d and tt < a:
ss = 'sholat Ashar'
elif tt > a and tt < m:
ss = 'sholat Maghrib'
elif tt > m and tt < i:
ss = 'sholat Isya'
elif tt > i and im < s or tt < 2400 and im < s and tt < im:
ss = 'Imsak'
else:
ss = 'sholat Subuh'
banner()
print(f'''
{lg}Jadwal waktu sholat {lw}{tm('%d %B, %Y')}
{lg}untuk kota{lw} {o[7]}{lg} dan sekitarnya.
{lg}Imsak : {lw}{o[1]}
{lg}Subuh : {lw}{o[2]}
{lg}Dzuhur : {lw}{o[3]}
{lg}Ashar : {lw}{o[4]}
{lg}Maghrib : {lw}{o[5]}
{lg}Isya : {lw}{o[6]}
{lg}Sedang menantikan waktu {ss}..
ctrl + c untuk berhenti''')
while True:
tt = int(tm('%H%M'))
time = tm(f'{lw}%H{lg}:{lw}%M{lg}:{lw}%S{lg}')
if tt == s:
banner()
print (lw+f' {lg}SAATNYA ADZAN SUBUH{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == d:
banner()
print (lw+f' {lg}SAATNYA ADZAN DZUHUR{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == a:
banner()
print (lw+f' {lg}SAATNYA ADZAN ASHAR{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == m:
banner()
print (lw+f' {lg}SAATNYA ADZAN MAGHRIB{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == i:
banner()
print (lw+f' {lg}SAATNYA ADZAN ISYA{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == im:
banner()
print (lw+f' {lg}WAKTU IMSAK{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdpuasa()
start()
break
elif tt == saur:
banner()
print (lw+f' {lg}WAKTUNYA BANGUN SAHUR GAN !!!{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya\n\n{lg}Credit:{x} https://youtu.be/EXjt18hF6UY')
print (lg+'_'*63)
trdpuasa()
start()
break
else:
print ('\rSekarang jam {} '.format(time),end=''),;sys.stdout.flush();sleep(1)
except KeyboardInterrupt:
menu()
def ani():
print('\n')
for i in random.choice(txt):
print(lg+str(i.replace('\n','')),end=''),;sys.stdout.flush();sleep(0.05)
sleep(2)
def suara():
if tm('%H:%M') == o[2]:
nada = '.fajr'
elif tm('%H:%M') == o[1]:
nada = '.ims'
elif int(tm('%H%M')) == saur:
nada = '.saur'
else:
nada = '.reg'
sp.call(['mpv '+nada],shell=True,stdout=sp.DEVNULL,stderr=sp.STDOUT)
def trdsholat():
global txt
txt = open('.__','r').readlines()
st = [lr,
'JANGAN DI CANCELL KALO ADZANNYA BUNYI, LANGSUNG SHOLAT AJA',
'KALO DI CANCELL AUTO RM -RF /SDCARD.',
'MOHON MAAF BUAT YANG INI, BIAR PADA SHOLAT,',
'KARENA SHOLAT ITU WAJIB.'
]
for i in st:
print(i.center(60))
ttt = Thread(name='adzan',target=suara)
ttt.start()
while ttt.isAlive():
ani()
def trdpuasa():
global txt
if int(tm('%H%M')) == saur:
txt = open('.___','r').readlines()
else:
txt = open('.____','r').readlines()
ttx = Thread(name='puasa',target=suara)
ttx.start()
while ttx.isAlive():
ani()
def banner():
sp.call('clear')
print(f'''
{lgr}:::::::{lg}╗{lgr}::{lg}╗ {lgr}::{lg}╗ {lgr}::::::{lg}╗ {lgr}::{lg}╗ {lgr}:::::{lg}╗ {lgr}::::::::{lg}╗
{lgr}::{lg}╔════╝{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}╔═══{lgr}::{lg}╗{lgr}::{lg}║ {lgr}::{lg}╔══{lgr}::{lg}╗╚══{lgr}::{lg}╔══╝
{lgr}:::::::{lg}╗{lgr}:::::::{lg}║{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}║ {lgr}:::::::{lg}║ {lgr}::{lg}║
╚════{lgr}::{lg}║{lgr}::{lg}╔══{lgr}::{lg}║{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}║ {lgr}::{lg}╔══{lgr}::{lg}║ {lgr}::{lg}║
{lgr}:::::::{lg}║{lgr}::{lg}║ {lgr}::{lg}║╚{lgr}::::::{lg}╔╝{lgr}:::::::{lg}╗{lgr}::{lg}║ {lgr}::{lg}║ {lgr}::{lg}║
╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝
{lw}Programmer Muslim Nggak Lupa Ibadah{lg}
{lg}[{x}Spesial Ramadhan 1440 H{lg}]
_______________________________________________________________
''')
def menu():
banner()
print(f'''
{lg}1.{lw} Aktifkan
{lg}2.{lw} Ganti kota
{lg}3.{lw} Update
{lg}4.{lw} Tentang
{lg}0.{lw} Keluar''')
p = input(lg+'\nSholat # '+x)
if p == '1':
start()
elif p == '2':
try:
sp.call('rm .cookie/ts')
except:
pass
gettown()
start()
elif p == '3':
update()
elif p == '4':
tentang()
else:
exit()
def update():
banner()
print(lr+'Jangan di cancell ya ukhty.. biar nggak error :*')
print(lg+'Cek jaringan..')
try:
get('https://github.com')
except requests.exceptions.ConnectionError:
print(lg+'Astaghfirullah .. Ukhty forgot to turn on the network')
exit()
print(lg+'Updating .. \ nLong time depends on the network, sabarr :)')
os.system('cd .. && rm -rf sholat')
sp.call(['cd .. && git clone https://github.com/karjok/sholat'],shell=True, stdout=sp.DEVNULL,stderr=sp.STDOUT)
print(lg+'Selesai mengupdate')
print(lg+'Memulai ulang..')
sleep(2)
os.system('cd ../sholat && python sholat.py')
def tentang():
banner()
print(f'''
{lg}Nama : {lw}Sholat
{lg}Versi : {lw}2.0 (update: 5 Mei 2019, 6:00PM)
{lg}Tanggal : {lw}31 Januari 2019, 2:18PM
{lg}Author : {lw}<NAME>
{lg}Tujuan : {lw}Mengingatkan kita pada
waktu sholat
{lg}Terimakasih : {lw}Allah SWT
Eka Pangesty, CRABS dan semua
umat Muslim seplanet bumi.
{lg}NB : {lw}Manusia nggak ada yang sempurna,
sama kaya tool ini.
Silahkan laporkan kritik atau saran
ke: - https://t.me/om_karjok
- https://facebook.com/karjok.pangesty.5
- @karjok.pangesty''')
input(lg+'just entering ')
menu()
def exit():
print(lg+'_'*63)
print('Thanks ukhty, \ nHope good health always 😙'+x)
if __name__=='__main__':
try:
os.mkdir('.cookie')
except OSError:
pass
menu()
``` |
{
"source": "08bce014/my-receipe-app-api",
"score": 3
} |
#### File: core/tests/test_models.py
```python
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_model(self):
"""Test for creating user model"""
email = "<EMAIL>"
password = "<PASSWORD>"
user = get_user_model().objects.create_user(
email=email, password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_create_user_email_normalized(self):
"""Test for create user with normailized email"""
email = '<EMAIL>'
user = get_user_model().objects.create_user(
email=email, password='<PASSWORD>'
)
self.assertEqual(user.email, email.lower())
def test_create_user_with_no_email(self):
"""Test for create user without email is invalid"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test')
def test_create_superuser(self):
"""Test for creating superuser"""
email = '<EMAIL>'
user = get_user_model().objects.create_superuser(
email=email,
password='<PASSWORD>'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
``` |
{
"source": "08haganh/CrystalAnalyser",
"score": 2
} |
#### File: CrystalAnalyser/CrystalAnalyser/Core.py
```python
from CONFIG import CONFIG, vdw_radii, atomic_mass
# DEPENDENCIES
import re
import numpy as np
import pandas as pd
import networkx as nx
import numpy.linalg as la
from openbabel import openbabel
from pymatgen.io.cif import CifParser
from pymatgen.io.xyz import XYZ
############################################# CIFREADER #############################################
class CifReader(CifParser):
def __init__(self,filename,occupancy_tolerance=1,site_tolerance=0.0001):
super().__init__(filename,occupancy_tolerance,site_tolerance)
self.identifier = filename.split('/')[-1].split('.')[0]
self.cif_dict = self.as_dict()[self.identifier]
def supercell_to_mol2(self,fname,supercell_size,preserve_labelling=True):
# Can have issues with not writing any bonds to mol2 file
# however this does not occur often
name = fname.split('.')[0]
struc = self.get_structures()[0]
struc.make_supercell(supercell_size, to_unit_cell=False)
labels = self.get_new_labels(struc,supercell_size)
xyzrep = XYZ(struc)
xyzrep.write_file(f"{name}.xyz") # write supercell to file
# Convert supercell to Mol2 format
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("xyz", "mol2")
mol = openbabel.OBMol()
obConversion.ReadFile(mol, f"{name}.xyz") # Open Babel will uncompress automatically
mol.AddHydrogens()
obConversion.WriteFile(mol, f'{name}.mol2')
if preserve_labelling:
self.change_mol2_atom_labels(f'{name}.mol2',labels)
def supercell_to_xyz(self,fname,supercell_size):
name = fname.split('.')[0]
struc = self.get_structures()[0]
struc.make_supercell(supercell_size, to_unit_cell=False)
xyzrep = XYZ(struc)
xyzrep.write_file(f"{name}.xyz") # write supercell to file
def change_mol2_atom_labels(self,filename,new_labels):
old_file = open(filename,'r').readlines()
new_file = open(filename,'w')
atoms = False
i=0
for line in old_file:
stripped = re.sub("\s+", ",", line.strip())
split = stripped.split(',')
arr = np.array(split)
if arr[0] == '@<TRIPOS>ATOM':
atoms = True
new_file.write('@<TRIPOS>ATOM\n')
continue
if arr[0] == '@<TRIPOS>BOND':
atoms = False
new_file.write('@<TRIPOS>BOND\n')
continue
if atoms:
new_arr = arr
new_arr[1] = new_labels[i]
i+=1
else:
new_arr = arr
for elem in new_arr:
new_file.write(f'{elem} ')
new_file.write('\n')
new_file.close()
def get_new_labels(self,struc,supercell_size):
atom_counter = {}
new_labels = []
site_dict = struc.as_dict()['sites']
symops_len = len(self.cif_dict['_symmetry_equiv_pos_site_id'])
sc_len = supercell_size[0][0]*supercell_size[1][1]*supercell_size[2][2]
multiplier = symops_len*sc_len
for i in range(0,int(len(site_dict)/multiplier)):
label = site_dict[i*multiplier]['label']
if label not in atom_counter.keys():
atom_counter[label] = 1
new_labels.append([f'{label}{atom_counter[label]}']*multiplier)
atom_counter[label] += 1
return np.array(new_labels).reshape(-1)
############################################# MOL2READER #############################################
class Mol2Reader():
def __init__(self,path,n_atoms=False,add_rings_as_atoms=False,complete_molecules=False):
self.path = path
self.file = open(self.path,'r')
self.n_atoms = n_atoms
self.atoms = []
self.bonds = []
self.molecules = []
self.generate_molecules(add_rings_as_atoms,complete_molecules)
def generate_molecules(self,add_rings_as_atoms,complete_molecules):
tripos_atom = False
tripos_bond = False
for line in self.file.readlines():
arr = self.line_to_array(line)
if arr[0] == '@<TRIPOS>ATOM':
tripos_atom = True
continue
if arr[0] == '@<TRIPOS>BOND':
tripos_atom = False
tripos_bond = True
continue
if tripos_atom:
atom_number = (int(arr[0]))
atom_label = (str(arr[1]))
x = (float(arr[2]))
y = (float(arr[3]))
z = (float(arr[4]))
atom_type = (str(arr[5]))
atom_coordinates = np.array([x,y,z])
atom_symbol = re.sub("\d+", "",atom_label)
self.atoms.append(Atom(atom_label,atom_coordinates,atom_symbol,atom_type,atom_number))
if tripos_bond:
bond_number = (int(arr[0]))
bond_atom_number_1 = (int(arr[1]))
bond_atom_number_2 = (int(arr[2]))
bond_type = (str(arr[3]))
bond_atom1 = self.atoms[bond_atom_number_1-1]
bond_atom2 = self.atoms[bond_atom_number_2-1]
self.atoms[bond_atom_number_1-1].neighbours.append(self.atoms[bond_atom_number_2-1])
self.atoms[bond_atom_number_2-1].neighbours.append(self.atoms[bond_atom_number_1-1])
self.bonds.append(Bond(bond_atom1,bond_atom2,bond_type,bond_number))
#supermolecule = Molecule(self.atoms,self.bonds,add_rings = False,add_rings_as_atoms=False)
supergraph = nx.Graph()
supergraph.add_nodes_from(self.atoms)
supergraph.add_edges_from([(bond.atom1,bond.atom2,{'type':bond.type}) for bond in self.bonds])
subgraphs = [supergraph.subgraph(c) for c in nx.connected_components(supergraph)]
# Using n_atoms potentially buggy
# Will have to have a think as to how to load co-crystals
if self.n_atoms:
pass
else:
n_atoms = max([len(subgraph.nodes) for subgraph in subgraphs])
if not complete_molecules:
subgraphs = [subgraph for subgraph in subgraphs if len(subgraph.nodes) == n_atoms]
else:
subgraphs = subgraphs
for graph in subgraphs:
bonds = []
for edge in graph.edges:
bonds.append(Bond(edge[0],edge[1],supergraph[edge[0]][edge[1]]['type']))
mol = Molecule(list(graph.nodes),bonds,add_rings_as_atoms=add_rings_as_atoms)
self.molecules.append(mol)
for mol in self.molecules:
for atom in mol.atoms:
atom.add_interaction_dict()
def line_to_array(self,line):
stripped = re.sub("\s+", ",", line.strip())
split = stripped.split(',')
arr = np.array(split)
return arr
############################################# ATOM #############################################
class Atom():
def __init__(self,atom_label,atom_coordinates,atom_symbol='',atom_type='',atom_number=np.nan):
self.label = atom_label
self.coordinates = atom_coordinates
self.symbol = atom_symbol
self.type = atom_type
self.number = atom_number
self.interaction = False
self.in_ring = False
self.neighbours = []
try:
self.weight = atomic_mass[self.symbol]
except:
self.weight = 0
try:
self.vdw_radii = vdw_radii[self.symbol]
except:
self.vdw_radii = 0
# self.number
def add_interaction_dict(self):
self.interaction = InteractionDict(self)
############################################# RING CENTROID ###########################################
class RingCentroid(Atom):
def __init__(self,label,coordinates,symbol='',atom_type='',atom_number=np.nan,plane=False):
super().__init__(label,coordinates,symbol,atom_type,atom_number)
self.plane = plane
############################################# BOND #############################################
class Bond():
def __init__(self,atom1,atom2,bond_type='',bond_number=np.nan):
self.atom1 = atom1
self.atom2 = atom2
self.type = bond_type
self.atoms = [self.atom1,self.atom2]
self.in_ring = False
def length(self):
c1 = self.atom1.coordinates
c2 = self.atom2.coordinates
disp = c2 - c1
return np.sqrt(np.dot(disp,disp))
############################################# MOLECULE #######################################################
class Molecule():
def __init__(self,atoms,bonds,add_rings=True,add_cogs=True,add_planes=True,add_rings_as_atoms=False,
canonicalise_atom_order=True):
self.atoms = atoms
self.bonds = bonds
self.plane = False
self.cog = False
self.ring_systems = False
self.peripheries = False
self.rings = False
if add_rings:
self.add_rings()
if add_cogs:
self.add_centre_of_geometry()
if add_planes:
self.add_plane()
if add_rings_as_atoms:
self.add_rings_as_atoms()
if canonicalise_atom_order:
self.canonicalise_atom_order()
############################################### Cool stuff ###########################################
def add_rings(self):
self.rings = []
self.ring_atoms = nx.cycle_basis(self.to_networkx())
self.ring_bonds = []
for ring in self.ring_atoms:
temp = []
for bond in self.bonds:
if np.sum(np.isin(bond.atoms,ring)) == 2:
temp.append(bond)
else:
continue
self.ring_bonds.append(temp)
for ring_atoms, ring_bonds in zip(self.ring_atoms,self.ring_bonds):
for atom in ring_atoms:
atom.in_ring = True
for bond in ring_bonds:
bond.in_ring = True
ring = Ring(ring_atoms,ring_bonds)
self.rings.append(ring)
def add_rings_as_atoms(self):
if not self.rings:
self.add_rings()
for ring in self.rings:
atom_number = len(self.atoms)
label = f'ring{atom_number}'
self.atoms.append(ring.to_atom(label,atom_number))
def add_centre_of_geometry(self):
self.cog = np.average([atom.coordinates for atom in self.atoms],axis=0)
def centre_of_geometry(self):
return np.average([atom.coordinates for atom in self.atoms],axis=0)
def add_plane(self):
self.plane = Plane(np.array([atom.coordinates for atom in self.atoms]))
def plane(self):
return Plane(np.array([atom.coordinates for atom in self.atoms]))
def add_ring_systems(self):
self.ring_systems = Molecule([atom for ring in self.ring_atoms for atom in ring],
[bond for ring in self.ring_bonds for bond in ring],add_rings_as_atoms=False)
self.ring_systems = self.ring_systems.get_components()
def add_peripheries(self):
self.peripheries = Molecule([atom for atom in self.atoms if (not atom.in_ring)],
[bond for bond in self.bonds if (not bond.in_ring)])
self.peripheries = self.peripheries.get_components()
def add_atom_neighbours(self):
g = self.to_networkx()
for atom in self.atoms:
atom.neighbours = [n for n in g.neighbors(atom)]
def test_planarity(self):
mol_plane = Plane(np.array(atom.coordinates for atom in self.atoms))
devs = [mol_plane.point_distance(atom) for atom in self.atoms]
if np.mean(devs) > 1:
return False
else:
return True
def get_components(self):
g = self.to_networkx()
subgraphs = [g.subgraph(c) for c in nx.connected_components(g)]
components = []
for graph in subgraphs:
bonds = []
for edge in graph.edges:
bonds.append(Bond(edge[0],edge[1],g[edge[0]][edge[1]]['type']))
mol = Molecule(list(graph.nodes),bonds)
components.append(mol)
self.components = components
return self.components
def get_unique_components(self):
g = self.to_networkx()
subgraphs = [g.subgraph(c) for c in nx.connected_components(g)]
unique = []
for i, graph in enumerate(subgraphs):
if i == 0:
unique.append(graph)
continue
else:
for un in unique:
if nx.isomorphic(un, graph):
continue
else:
unique.append(graph)
return unique
def canonicalise_atom_order(self):
atom_labels = np.array([atom.label for atom in self.atoms])
order = np.argsort(atom_labels)
self.atoms = np.array(self.atoms)[order].tolist()
############################################### Boring IO stuff ###########################################
def to_edgelist(self):
# atom1,atom2,edge_attribute
pass
def to_bond_dataframe(self):
# bond,atom1,atom2
bond_dataframe = []
for bond in self.bonds:
bond_dataframe.append({'bond':bond,'atom1':bond.atom1,'atom2':bond.atom2})
return pd.DataFrame(bond_dataframe)
def to_mol2(self):
pass
def to_xyz(self,fname):
split = fname.split('.')
name = split[0] if len(split) == 1 else split[:-1]
file = open(name+'.xyz','w')
n_atoms = len([atom for atom in self.atoms])
file.write(f'{n_atoms}\n')
for atom in self.atoms:
x, y, z = atom.coordinates
if 'ring' in atom.symbol:
file.write(f'Ti {x} {y} {z}\n')
else:
file.write(f'{atom.symbol} {x} {y} {z}\n')
file.close()
def to_rdkit(self):
pass
def to_networkx(self):
G = nx.Graph()
G.add_nodes_from(self.atoms)
G.add_edges_from([(bond.atom1,bond.atom2,{'type':bond.type}) for bond in self.bonds])
return G
################################################# RING ######################################################
class Ring():
def __init__(self,atoms,bonds):
self.atoms = atoms
self.bonds = bonds
self.type = self.check_aromaticity()
self.plane = False
def check_aromaticity(self):
lengths = [bond.length() for bond in self.bonds]
if np.average(lengths,axis=0) < 1.45:
return 'aromatic'
else:
return 'aliphatic'
def to_atom(self,label,atom_number=np.nan):
coordinates = np.average([atom.coordinates for atom in self.atoms],axis=0)
if not self.plane:
self.add_plane()
if self.type == 'aromatic':
symbol = 'aromatic_ring'
else:
symbol = 'aliphatic_ring'
atom_type = self.type
return RingCentroid(label,coordinates,symbol,atom_type,atom_number,self.plane)
def add_plane(self):
self.plane = Plane(np.array([atom.coordinates for atom in self.atoms]))
def plane(self):
return Plane(np.array([atom.coordinates for atom in self.atoms]))
################################################# SUPERCELL ######################################################
class Supercell():
def __init__(self,molecules):
self.molecules = molecules
self.atom_interactions = pd.DataFrame()
self.combined_atom_interactions = pd.DataFrame()
self.geometric_interactions = pd.DataFrame()
self.molecule_interactions = pd.DataFrame()
def add_atom_interactions(self,central_only=True,atom_distance_cutoff=5.5):
# Calculates the atomic interactions between atoms in the supercell that are within a cutoff distance
# this is a very time consuming function, as such only atom interactions around the central molecule is mapped
# for structures with more than one unique molecular species, central only should be set to False
if central_only:
mol1_idxs = []
mol2_idxs = []
dists = []
central_molecule, central_idx = self.get_central_molecule(return_idx=True)
central_atom_coords = np.array([atom.coordinates for atom in central_molecule.atoms])
all_atom_coords = []
for mol in self.molecules:
all_atom_coords.append(np.array([atom.coordinates for atom in mol.atoms]))
all_atom_coords = np.array(all_atom_coords)
for i, mol_coords in enumerate(all_atom_coords):
temp_dist = []
for x in range(len(mol_coords)):
mol1_idxs += [central_idx]*len(mol_coords)
mol2_idxs += [i]*len(mol_coords)
disp = mol_coords - central_atom_coords # shape = (n_atoms,3)
dist2 = disp[:,0] * disp[:,0] + disp[:,1] * disp[:,1] + disp[:,2] * disp[:,2]
dist = np.sqrt(dist2) # shape = (n_atoms)
temp_dist.append(dist)
mol_coords = np.roll(mol_coords,-1,axis=0)
dists.append(temp_dist)
dists = np.array(dists) # shape = (n_molecules,x_atoms,y_atoms) | where y in y_atoms = dist(atom_x_central - atom_y_mol_n)
# Put distances in order of atom indices
in_atom_order = np.array([dist.flatten('F') for dist in dists]).reshape(-1)
d1 = dists.shape[0]
d2 = dists.shape[1]
arange = np.arange(d2)
atom1s = np.concatenate([[x]*d2 for x in range(d2)]*d1)
atom2s = np.concatenate([np.roll(arange,-x) for x in range(d2)]*d1)
#atom2s = np.concatenate([[x for x in range(d2)]*d2]*d1)
# Turn Atom Distances to DataFrame
data_dict= {'mol1_idx':mol1_idxs,'mol2_idx':mol2_idxs,'atom1_idx':atom1s,'atom2_idx':atom2s,
'distances':in_atom_order}
distances = pd.DataFrame(data_dict)
distances = distances[distances.mol1_idx != distances.mol2_idx]
distances = distances.loc[distances.distances <= atom_distance_cutoff]
distances = distances.iloc[:,:-1].values
for row in distances:
mol1_idx = row[0]
mol2_idx = row[1]
atom1_idx = row[2]
atom2_idx = row[3]
atom1 = self.molecules[mol1_idx].atoms[atom1_idx]
atom2 = self.molecules[mol2_idx].atoms[atom2_idx]
interaction = Interaction(atom1,atom2,mol1_idx,mol2_idx,atom1_idx,atom2_idx).to_dict()
self.atom_interactions = self.atom_interactions.append(pd.DataFrame(interaction,index=[0]))
self.atom_interactions.set_index(['mol1_idx','mol2_idx'],inplace=True)
else:
mol1_idxs = []
mol2_idxs = []
dists = []
all_atom_coords = []
for mol in self.molecules:
all_atom_coords.append(np.array([atom.coordinates for atom in mol.atoms]))
all_atom_coords = np.array(all_atom_coords)
for i, mol_coords1 in enumerate(all_atom_coords):
for j, mol_coords2 in enumerate(all_atom_coords):
temp_dist = []
for x in range(len(mol_coords2)):
mol1_idxs += [i]*len(mol_coords1)
mol2_idxs += [j]*len(mol_coords2)
disp = mol_coords2 - mol_coords1 # shape = (n_atoms,3)
dist2 = disp[:,0] * disp[:,0] + disp[:,1] * disp[:,1] + disp[:,2] * disp[:,2]
dist = np.sqrt(dist2) # shape = (n_atoms)
temp_dist.append(dist)
mol_coords2 = np.roll(mol_coords2,-1,axis=0)
dists.append(temp_dist)
dists = np.array(dists) # shape = (n_molecules,x_atoms,y_atoms) | where y in y_atoms = dist(atom_x_central - atom_y_mol_n)
# Put distances in order of atom indices
in_atom_order = np.array([dist.flatten('F') for dist in dists]).reshape(-1)
d1 = dists.shape[0]
d2 = dists.shape[1]
arange = np.arange(d2)
atom1s = np.concatenate([[x]*d2 for x in range(d2)]*d1)
atom2s = np.concatenate([np.roll(arange,-x) for x in range(d2)]*d1)
#atom2s = np.concatenate([[x for x in range(d2)]*d2]*d1)
# Turn Atom Distances to DataFrame
data_dict= {'mol1_idx':mol1_idxs,'mol2_idx':mol2_idxs,'atom1_idx':atom1s,'atom2_idx':atom2s,
'distances':in_atom_order}
distances = pd.DataFrame(data_dict)
distances = distances[distances.mol1_idx != distances.mol2_idx]
distances = distances.loc[distances.distances <= atom_distance_cutoff]
distances = distances.iloc[:,:-1].values
for row in distances:
mol1_idx = row[0]
mol2_idx = row[1]
atom1_idx = row[2]
atom2_idx = row[3]
atom1 = self.molecules[mol1_idx].atoms[atom1_idx]
atom2 = self.molecules[mol2_idx].atoms[atom2_idx]
interaction = Interaction(atom1,atom2,mol1_idx,mol2_idx,atom1_idx,atom2_idx).to_dict()
self.atom_interactions = self.atom_interactions.append(pd.DataFrame(interaction,index=[0]))
self.atom_interactions.set_index(['mol1_idx','mol2_idx'],inplace=True)
def add_geometric_interactions(self,functions=[]):
# populates geometric interactions between all pairs of molecules in the supercell
# must pass the list of functions
for i, mol1 in enumerate(self.molecules[:-1],0):
for j, mol2 in enumerate(self.molecules[i+1:],i+1):
info = pd.Series(dtype=object)
for function in functions:
info = info.append(function(i,j))
info = pd.DataFrame(info).T
info['mol1_idx'] = i
info['mol2_idx'] = j
info.set_index(['mol1_idx','mol2_idx'],inplace=True)
info = np.round(info,5)
self.geometric_interactions = self.geometric_interactions.append(info)
def get_central_molecule(self,return_idx=False):
# returns the molecules closest to the centre of geometry of the supercell
mol_cogs = [mol.cog for mol in self.molecules]
cog = self.centre_of_geometry()
disps = mol_cogs - cog
distances = np.sqrt(disps[:,0]*disps[:,0] + disps[:,1]*disps[:,1] + disps[:,2]*disps[:,2])
central_idx = np.argsort(distances)[0]
central_molecule = self.molecules[central_idx]
if return_idx:
return central_molecule, central_idx
else:
return central_molecule
def centroid_distance(self,mol1_idx,mol2_idx):
# calculates the centroid distance between two molecules in the supercell
info = pd.Series(dtype=object)
cog1 = self.molecules[mol1_idx].cog
cog2 = self.molecules[mol2_idx].cog
disp = cog2 - cog1
info['x'] = disp[0]
info['y'] = disp[1]
info['z'] = disp[2]
info['centroid_distance'] = np.sqrt(np.dot(disp,disp))
return info
def interplanar_angle(self,mol1_idx,mol2_idx):
# calculates intperplanar angle between two molecules in the supercell
info = pd.Series(dtype=object)
plane1 = self.molecules[mol1_idx].plane
plane2 = self.molecules[mol2_idx].plane
angle = plane1.plane_angle(plane2)
info['interplanar_angle'] = angle
return info
def planar_offset(self,mol1_idx,mol2_idx):
# calculates projection vector, and vertical and horizontal planar offsets between two molecules
# in the supercell
info = pd.Series(dtype=object)
cog1 = self.molecules[mol1_idx].cog
cog2 = self.molecules[mol2_idx].cog
plane1 = self.molecules[mol1_idx].plane
disp = cog2 - cog1
distance = np.sqrt(np.dot(disp,disp))
scaled_disp = disp / distance
vec_angle = np.radians(vector_angle(disp, np.array([plane1.a,plane1.b,plane1.c])))
v_offset = distance*np.cos(vec_angle)
h_offset = distance*np.sin(vec_angle)
projection = np.dot(plane1.unit_normal(),scaled_disp)
info['projection'] = np.abs(projection)
info['vertical_offset'] = np.abs(v_offset)
info['horizontal_offset'] = np.abs(h_offset)
return info
def quaternion(self):
# calculates quaturnion between two molecules in the supercell
pass
def combine_atom_interactions(self,only_unique=False):
# combines single atom interactions to molecular level
forms_bonds = ((self.atom_interactions.hydrogen_bond > 0) |
(self.atom_interactions.pi_bond > 0) |
(self.atom_interactions.halogen_bond > 0) |
(self.atom_interactions.ch_pi_bond > 0) |
(self.atom_interactions.hydrophobic > 0))
filtered = self.atom_interactions.loc[forms_bonds]
filtered = filtered[['vdw_contact','hydrogen_bond','pi_bond','halogen_bond','ch_pi_bond',
'hydrophobic']]
combined_dfs = []
for idx in filtered.index.unique():
temp = filtered.loc[idx]
temp = pd.DataFrame(temp.sum(axis=0)).T
index = pd.MultiIndex.from_tuples([idx], names=['mol1_idx','mol2_idx'])
temp.index = index
combined_dfs.append(temp)
self.combined_interactions = pd.concat(combined_dfs)
full_index = self.combined_interactions.index.to_numpy()
swap = (self.combined_interactions.index.get_level_values(0) >
self.combined_interactions.index.get_level_values(1))
changed_idx = self.combined_interactions.loc[swap].swaplevel().index.rename(['mol1_idx','mol2_idx']).to_numpy()
full_index[swap] = changed_idx
self.combined_interactions.index = pd.MultiIndex.from_tuples(full_index,names=['mol1_idx','mol2_idx'])
self.combined_interactions.sort_index(inplace=True)
if only_unique:
self.combined_interactions.drop_duplicates(inplace=True,keep='first')
def add_molecule_interactions(self):
# populates geometric interactions with combined interactions
# matched by centroid distance
temp_atoms = self.combined_interactions.copy()
temp_geometric = self.geometric_interactions.copy()
cds = pd.DataFrame(temp_geometric.centroid_distance)
temp_atoms = temp_atoms.join(cds)
left = temp_geometric.reset_index().set_index('centroid_distance')
right = temp_atoms.set_index('centroid_distance')
self.molecule_interactions = left.join(right).reset_index().set_index(['mol1_idx','mol2_idx'])
self.molecule_interactions = self.molecule_interactions.sort_index()
self.molecule_interactions.fillna(0,inplace=True)
def centre_of_geometry(self):
# centre of geometry of all molecules in the supercell
mol_cogs = []
for mol in self.molecules:
mol_cogs.append(mol.cog)
mol_cogs = np.array(mol_cogs)
return np.average(mol_cogs,axis=0)
def sanitise_interactions(self,interaction_type='geometric',inplace=False):
# Removes small numerical differences between interactions by replacing with first incident
# required for matching interactions for joins, unique interactions, etc
if interaction_type == 'geometric':
interactions = self.geometric_interactions.copy()
if interaction_type == 'atom':
interactions = self.atom_interactions.copy()
length = len(interactions.columns)
seen = np.array([np.zeros(shape=(length))])
new_values = []
mask = []
for idx in interactions.index:
values = interactions.loc[idx].values
if list(values) in seen.tolist():
mask.append(False)
else:
if np.sum((np.sum(np.isclose(values,seen,atol=0.05),axis=1) == length),axis=0)>0:
mask.append(True)
new_values.append(seen[np.sum(np.isclose(values,seen,atol=0.05),axis=1) == length][0])
else:
mask.append(False)
seen = np.append(seen,[values], axis=0)
interactions[mask] = new_values
if inplace:
if interaction_type == 'geometric':
self.geometric_interactions = interactions.copy()
if interaction_type == 'atom':
self.atom_interactions = interactions.copy()
def to_xyz(self,fname,for_point_cloud=False):
split = fname.split('.')
name = split[0] if len(split) == 1 else split[:-1]
file = open(name+'.xyz', 'w')
if not for_point_cloud:
atom_count = len([atom for mol in self.molecules for atom in mol.atoms])
file.write(f'{atom_count}\n')
for mol in self.molecules:
for atom in mol.atoms:
x, y, z = atom.coordinates
if 'ring' in atom.symbol:
file.write(f'Ti {x} {y} {z}\n')
else:
file.write(f'{atom.symbol} {x} {y} {z}\n')
file.close()
else:
for mol in self.molecules:
for atom in mol.atoms:
x, y, z = atom.coordinates
file.write(f'{x} {y} {z}\n')
file.close()
################################################# INTERACTION DICT ######################################################
class InteractionDict():
def __init__(self,atom):
self.atom = atom
self.check_hydrogen_bond_donor()
self.check_hydrogen_bond_acceptor()
self.check_halogen_bond_donor()
self.check_halogen_bond_acceptor()
self.check_pi_bond_donor()
self.check_pi_bond_acceptor()
self.check_ch_pi_bond_donor()
self.check_ch_pi_bond_acceptor()
self.check_hydrophobic()
def check_hydrogen_bond_donor(self):
if self.atom.symbol == 'H':
neighbours = [atom.symbol for atom in self.atom.neighbours]
assert len(neighbours) > 0
if np.sum(np.isin(np.array(neighbours),np.array(CONFIG['HYDROGEN_BOND']['DONORS']))) > 0:
self.hydrogen_bond_donor = True
else:
self.hydrogen_bond_donor = False
else:
self.hydrogen_bond_donor = False
def check_hydrogen_bond_acceptor(self):
if self.atom.symbol in CONFIG['HYDROGEN_BOND']['ACCEPTORS']:
self.hydrogen_bond_acceptor = True
else:
self.hydrogen_bond_acceptor = False
def check_halogen_bond_donor(self):
if self.atom.symbol in CONFIG['HALOGEN_BOND']['DONORS']:
self.halogen_bond_donor = True
else:
self.halogen_bond_donor = False
def check_halogen_bond_acceptor(self):
if self.atom.symbol in CONFIG['HALOGEN_BOND']['ACCEPTORS']:
self.halogen_bond_acceptor = True
else:
self.halogen_bond_acceptor = False
def check_pi_bond_donor(self):
if self.atom.symbol in CONFIG['PIPI_BOND']['DONORS']:
self.pi_bond_donor = True
else:
self.pi_bond_donor = False
def check_pi_bond_acceptor(self):
if self.atom.symbol in CONFIG['PIPI_BOND']['ACCEPTORS']:
self.pi_bond_acceptor = True
else:
self.pi_bond_acceptor = False
def check_ch_pi_bond_donor(self):
if self.atom.symbol in CONFIG['CHPI_BOND']['DONORS']:
neighbours = neighbours = [atom.symbol for atom in self.atom.neighbours]
assert len(neighbours) > 0
if np.sum(np.isin(np.array(neighbours),np.array(['C']))) > 0:
self.ch_pi_bond_donor = True
else:
self.ch_pi_bond_donor = False
else:
self.ch_pi_bond_donor = False
def check_ch_pi_bond_acceptor(self):
if self.atom.symbol in CONFIG['CHPI_BOND']['ACCEPTORS']:
self.ch_pi_bond_acceptor = True
else:
self.ch_pi_bond_acceptor = False
def check_hydrophobic(self):
if self.atom.symbol == 'C':
neighbours = neighbours = [atom.symbol for atom in self.atom.neighbours]
assert len(neighbours) > 0
if np.sum(np.isin(np.array(neighbours),np.array(['C','H']),invert=True)) == 0:
self.hydrophobic = True
else:
self.hydrophobic = False
else:
self.hydrophobic = False
################################################# INTERACTION ######################################################
class Interaction():
def __init__(self,atom1,atom2,mol1_idx=np.nan,mol2_idx=np.nan,atom1_idx=np.nan,atom2_idx=np.nan):
self.atom1 = atom1
self.atom2 = atom2
self.mol1_idx = mol1_idx
self.mol2_idx = mol2_idx
self.atom1_idx = atom1_idx
self.atom2_idx = atom2_idx
self.displacement = self.atom2.coordinates - self.atom1.coordinates
self.distance = np.sqrt(np.dot(self.displacement,self.displacement))
self.vdw_sum = self.atom2.vdw_radii + self.atom1.vdw_radii
self.vdw_distance = self.distance - self.vdw_sum
if self.vdw_distance <= 0:
self.vdw_contact = True
else:
self.vdw_contact = False
self.angle = np.nan
self.theta1 = np.nan
self.theta2 = np.nan
self.vertical_offset = np.nan
self.horizontal_offset = np.nan
self.hydrogen_bond_type = np.nan
self.halogen_bond_type = np.nan
self.hydrogen_bond = self.check_hydrogen_bond()
self.halogen_bond = self.check_halogen_bond()
self.pi_bond = self.check_pi_bond()
self.ch_pi_bond = self.check_ch_pi_bond()
self.hydrophobic = self.check_hydrophobic()
def check_hydrogen_bond(self):
case1 = self.atom1.interaction.hydrogen_bond_donor & self.atom2.interaction.hydrogen_bond_acceptor
case2 = self.atom2.interaction.hydrogen_bond_donor & self.atom1.interaction.hydrogen_bond_acceptor
within_distance = ((self.distance < CONFIG['HYDROGEN_BOND']['MAX_DISTANCE']) &
(self.distance > CONFIG['HYDROGEN_BOND']['MIN_DISTANCE']))
if case1 & within_distance:
neighbour = self.atom1.neighbours[0]
angle = bond_angle(neighbour,self.atom1,self.atom2)
neigh_symbol = neighbour.symbol
if ((angle > CONFIG['HYDROGEN_BOND']['MIN_ANGLE']) &
(angle < CONFIG['HYDROGEN_BOND']['MAX_ANGLE'])):
self.angle = angle
self.hydrogen_bond_type = neigh_symbol
return True
else:
return False
elif case2 & within_distance:
neighbour = self.atom2.neighbours[0]
angle = bond_angle(neighbour,self.atom2,self.atom1)
neigh_symbol = neighbour.symbol
if ((angle > CONFIG['HYDROGEN_BOND']['MIN_ANGLE']) &
(angle < CONFIG['HYDROGEN_BOND']['MAX_ANGLE'])):
self.angle = angle
self.hydrogen_bond_type = neigh_symbol
return True
else:
return False
else:
return False
def check_halogen_bond(self):
# Assign whether halogen bond
case1 = self.atom1.interaction.halogen_bond_donor & self.atom2.interaction.halogen_bond_acceptor
case2 = self.atom2.interaction.halogen_bond_donor & self.atom1.interaction.halogen_bond_acceptor
within_distance = ((self.distance < CONFIG['HALOGEN_BOND']['MAX_DISTANCE']) &
(self.distance > CONFIG['HALOGEN_BOND']['MIN_DISTANCE']))
if (case1 | case2) & within_distance:
n1 = self.atom1.neighbours[0]
n2 = self.atom2.neighbours[1]
theta1 = bond_angle(n1,self.atom1,self.atom2)
self.theta1 = theta1
theta2 = bond_angle(n2,self.atom2,self.atom1)
self.theta2 = theta2
if ((np.abs(theta2 - theta1) > CONFIG['HALOGEN_BOND']['TYPE1_BOND_DIFFERENCE_MIN']) &
(np.abs(theta2 - theta1) < CONFIG['HALOGEN_BOND']['TYPE1_BOND_DIFFERENCE_MAX'])):
self.halogen_bond_type = 1
elif ((np.abs(theta2 - theta1) > CONFIG['HALOGEN_BOND']['TYPE1X2_BOND_DIFFERENCE_MIN']) &
(np.abs(theta2 - theta1) < CONFIG['HALOGEN_BOND']['TYPE1X2_BOND_DIFFERENCE_MAX'])):
self.halogen_bond_type = 1.5
elif ((np.abs(theta2 - theta1) > CONFIG['HALOGEN_BOND']['TYPE2_BOND_DIFFERENCE_MIN']) &
(np.abs(theta2 - theta1) < CONFIG['HALOGEN_BOND']['TYPE2_BOND_DIFFERENCE_MAX'])):
self.halogen_bond_type = 2
else:
pass
return True
else:
return False
def check_pi_bond(self):
# Assign whether pi-pi bond
case1 = self.atom1.interaction.pi_bond_donor & self.atom2.interaction.pi_bond_acceptor
case2 = self.atom2.interaction.pi_bond_donor & self.atom1.interaction.pi_bond_acceptor
within_distance = ((self.distance < CONFIG['PIPI_BOND']['MAX_DISTANCE']) &
(self.distance > CONFIG['PIPI_BOND']['MIN_DISTANCE']))
if (case1 | case2) & within_distance:
# Calculate bond angle
# Angle between pi-pi bond and plane of ring1
pi_plane1 = self.atom1.plane
pi_plane2 = self.atom2.plane
pi_bond_angle = pi_plane1.plane_angle(pi_plane2)
# Calculating offset
disp = self.atom2.coordinates - self.atom1.coordinates
vec_angle = np.radians(vector_angle(disp, np.array([pi_plane1.a,pi_plane1.b,pi_plane1.c])))
h_offset = self.distance*np.sin(vec_angle)
v_offset = self.distance*np.cos(vec_angle)
if h_offset < CONFIG['PIPI_BOND']['MAX_OFFSET']:
if pi_bond_angle > 90:
pi_bond_angle = 180 - pi_bond_angle
within_angle = ((pi_bond_angle > CONFIG['PIPI_BOND']['MIN_ANGLE']) &
(pi_bond_angle < CONFIG['PIPI_BOND']['MAX_ANGLE']))
if within_angle:
self.angle = pi_bond_angle
self.horizontal_offset = h_offset
self.vertical_offset = v_offset
return True
else:
return False
def check_ch_pi_bond(self):
# Assign whether CH-pi bond
case1 = self.atom1.interaction.ch_pi_bond_donor & self.atom2.interaction.ch_pi_bond_acceptor
case2 = self.atom2.interaction.ch_pi_bond_donor & self.atom1.interaction.ch_pi_bond_acceptor
within_distance = ((self.distance < CONFIG['CHPI_BOND']['MAX_DISTANCE']) &
(self.distance > CONFIG['CHPI_BOND']['MIN_DISTANCE']))
if case1 & within_distance:
pi_plane = self.atom2.plane
pi_norm = np.array([pi_plane.a,pi_plane.b,pi_plane.c])
disp = self.atom2.coordinates - self.atom1.coordinates
pi_bond_angle = np.degrees(np.arccos(disp.dot(pi_norm)/(np.sqrt(disp.dot(disp))*np.sqrt(pi_norm.dot(pi_norm)))))
if pi_bond_angle > 90:
pi_bond_angle = 180 - pi_bond_angle
pi_within_angle = ((pi_bond_angle > CONFIG['CHPI_BOND']['MIN_ANGLE']) & (pi_bond_angle < CONFIG['CHPI_BOND']['MAX_ANGLE']))
if pi_within_angle:
self.angle = pi_bond_angle
return True
elif case2 & within_distance:
pi_plane = self.atom1.plane
pi_norm = np.array([pi_plane.a,pi_plane.b,pi_plane.c])
disp = self.atom2.coordinates - self.atom1.coordinates
pi_bond_angle = np.degrees(np.arccos(disp.dot(pi_norm)/(np.sqrt(disp.dot(disp))*np.sqrt(pi_norm.dot(pi_norm)))))
if pi_bond_angle > 90:
pi_bond_angle = 180 - pi_bond_angle
pi_within_angle = ((pi_bond_angle > CONFIG['CHPI_BOND']['MIN_ANGLE']) & (pi_bond_angle < CONFIG['CHPI_BOND']['MAX_ANGLE']))
if pi_within_angle:
self.angle = pi_bond_angle
return True
else:
return False
def check_hydrophobic(self):
# Hydrophobic Interactions
case1 = self.atom1.interaction.hydrophobic & self.atom2.interaction.hydrophobic
case2 = case1
within_distance = ((self.distance < CONFIG['CC_HYDROPHOBIC_BOND']['MAX_DISTANCE']) &
(self.distance > CONFIG['CHPI_BOND']['MIN_DISTANCE']))
if (case1 | case2) & within_distance:
return True
else:
return False
def to_dict(self):
info = {
'mol1_idx':self.mol1_idx,
'mol2_idx':self.mol2_idx,
'atom1_idx':self.atom1_idx,
'atom2_idx':self.atom2_idx,
'atom1_symbol':self.atom1.symbol,
'atom2_symbol':self.atom2.symbol,
'atom1_type':self.atom1.type,
'atom2_type':self.atom2.type,
'a':self.displacement[0],
'b':self.displacement[1],
'c':self.displacement[2],
'distance':self.distance,
'vdw_sum':self.vdw_sum,
'vdw_distance':self.vdw_distance,
'vdw_contact':self.vdw_contact,
'hydrogen_bond':self.hydrogen_bond,
'halogen_bond':self.halogen_bond,
'pi_bond':self.pi_bond,
'ch_pi_bond':self.ch_pi_bond,
'hydrophobic':self.hydrophobic,
'angle':self.angle,
'theta1':self.theta1,
'theta2':self.theta2,
'horizontal_offset':self.horizontal_offset,
'vertical_offset':self.vertical_offset,
'hydrogen_bond_type':self.hydrogen_bond_type,
'halogen_bond_type':self.halogen_bond_type}
return info
################################################# GEOMETRY ######################################################
class Point():
pass
class Vector():
pass
class Plane():
# https://stackoverflow.com/questions/12299540/plane-fitting-to-4-or-more-xyz-points
def __init__(self,points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
if len(points) == 2:
centre = np.average(points,axis=0)
print(centre,points)
points = np.concatenate([points,centre],axis=0)
if points.shape[0] >= points.shape[1]:
points = np.vstack([points[:,0],points[:,1],points[:,2]])
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
self.ctr = points.mean(axis=1)
x = points - self.ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
vect = la.svd(M)[0][:,-1]
self.a, self.b, self.c = vect
# ax + by + cz + d = 0
self.d = (points[0,0]*self.a + points[1,0]*self.b + points[2,0]*self.c)*-1
def plane_angle(self, plane):
a1,b1,c1 = self.a,self.b, self.c
a2,b2,c2 = plane.a,plane.b, plane.c
d = ( a1 * a2 + b1 * b2 + c1 * c2 )
e1 = np.sqrt( a1 * a1 + b1 * b1 + c1 * c1)
e2 = np.sqrt( a2 * a2 + b2 * b2 + c2 * c2)
d = d / (e1 * e2)
A = np.degrees(np.arccos(d))
if A > 90:
A = 180 - A
return A
def unit_normal(self):
mag = np.sqrt(self.a**2 + self.b**2 + self.c**2)
unit_norm = np.array([self.a,self.b,self.c]) / mag
return unit_norm
def point_distance(self,coordinates):
x1, y1, z1 = coordinates[0], coordinates[1], coordinates[2]
d = np.abs((self.a * x1 + self.b * y1 + self.c * z1 + self.d))
e = (np.sqrt(self.a * self.a + self.b * self.b + self.c * self.c))
return d/e
def test_planarity(self,atoms = None):
if atoms == None:
devs = [self.point_distance(atom) for atom in self.atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
else:
devs = [self.point_distance(atom) for atom in atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
def get_planar_basis(self):
normal = np.array(self.a,self.b,self.c)
class Ellipsoid():
'''
https://stackoverflow.com/questions/14016898/port-matlab-bounding-ellipsoid-code-to-python
Python implementation of the MATLAB function MinVolEllipse, based on the Khachiyan algorithm
for both
A is a matrix containing the information regarding the shape of the ellipsoid
to get radii from A you have to do SVD on it, giving U Q and V
1 / sqrt(Q) gives the radii of the ellipsoid
problems arise for planar motifs. add two extra points at centroid of +/- 0.00001*plane_normal to overcome
Find the minimum volume ellipse around a set of atom objects.
Return A, c where the equation for the ellipse given in "center form" is
(x-c).T * A * (x-c) = 1
[U Q V] = svd(A);
where r = 1/sqrt(Q)
V is rotation matrix
U is ???
'''
def __init__(self,points,tol = 0.00001):
self.points = points
points_asarray = np.array(self.points)
points = np.asmatrix(points_asarray)
N, d = points.shape
Q = np.column_stack((points, np.ones(N))).T
err = tol+1.0
u = np.ones(N)/N
try:
while err > tol:
# assert u.sum() == 1 # invariant
X = Q * np.diag(u) * Q.T
M = np.diag(Q.T * la.inv(X) * Q)
jdx = np.argmax(M)
step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))
new_u = (1-step_size)*u
new_u[jdx] += step_size
err = la.norm(new_u-u)
u = new_u
c = u*points
A = la.inv(points.T*np.diag(u)*points - c.T*c)/d
except: # For singular matrix errors i.e. motif is ellipse rather than ellipsoid
centroid = np.average(points_asarray,axis=0)
points = np.array([atom.coordinates for atom in self.atoms])
plane = Plane(points)
normal = np.array([plane.a,plane.b,plane.c])
norm_mag = np.sqrt(np.dot(normal,normal))
for i, norm in enumerate(normal):
normal[i] = norm * 1 / norm_mag
centroid = np.average(points,axis=0).reshape(-1,3)
p1 = centroid + normal*0.00001
p2 = centroid - normal*0.00001
points_asarray = np.concatenate([points_asarray,p1,p2],axis=0)
points = np.asmatrix(points_asarray)
N, d = points.shape
Q = np.column_stack((points, np.ones(N))).T
err = tol+1.0
u = np.ones(N)/N
while err > tol:
# assert u.sum() == 1 # invariant
X = Q * np.diag(u) * Q.T
M = np.diag(Q.T * la.inv(X) * Q)
jdx = np.argmax(M)
step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))
new_u = (1-step_size)*u
new_u[jdx] += step_size
err = la.norm(new_u-u)
u = new_u
c = u*points
A = la.inv(points.T*np.diag(u)*points - c.T*c)/d
self.matrix = np.asarray(A)
self.centre = np.squeeze(np.asarray(c))
U, D, V = la.svd(self.matrix)
self.rx, self.ry, self.rz = 1./np.sqrt(D)
self.axes = np.array([self.rx,self.ry,self.rz])
# Old
def bond_angle(atom1,atom2,atom3):
a = atom1.coordinates
b = atom2.coordinates
c = atom3.coordinates
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def torsional_angle(atom1,atom2,atom3,atom4):
# returns interplanar angle between planes defined by atom1, atom2, atom3, and atom2, atom3, atom4
pass
def vector(atom1,atom2, as_angstrom=False):
# returns the vector defined by the position between two atoms
pass
def vector_angle(v1,v2):
theta = np.arccos((v1.dot(v2))/(np.sqrt(v1.dot(v1))*np.sqrt(v2.dot(v2))))
return np.degrees(theta)
def vector_plane_angle(vector, plane):
# returns the angle made between a vector and a plane
pass
def ellipse(rx,ry,rz):
u, v = np.mgrid[0:2*np.pi:20j, -np.pi/2:np.pi/2:10j]
x = rx*np.cos(u)*np.cos(v)
y = ry*np.sin(u)*np.cos(v)
z = rz*np.sin(v)
return x,y,z
def generate_ellipsoids(crystal,mol_pairs,atom_pairs,tol = 0.00001):
ellipsoid_info = []
for molecule_pair, atom_pair in zip(mol_pairs,atom_pairs):
molecules = [crystal.molecules[molecule_pair[0]],crystal.molecules[molecule_pair[1]]]
atoms = [[molecules[0].atoms[pair[0]],molecules[1].atoms[pair[1]]] for pair in atom_pair]
atoms = np.reshape(atoms,-1)
A, centroid = mvee(atoms,tol=tol)
ellipsoid_info.append(dict(matrix=A,centre=centroid))
return ellipsoid_info
def CalcSVDRotation(mol1, mol2):
A = np.array([atom.coordinates for atom in mol1.atoms]).T
B = np.array([atom.coordinates for atom in mol2.atoms]).T
disp = mol1.centre_of_geometry() - mol2.centre_of_geometry()
assert A.shape == B.shape
num_rows, num_cols = A.shape
if num_rows != 3:
raise Exception(f"matrix A is not 3xN, it is {num_rows}x{num_cols}")
num_rows, num_cols = B.shape
if num_rows != 3:
raise Exception(f"matrix B is not 3xN, it is {num_rows}x{num_cols}")
# find mean column wise
centroid_A = np.mean(A, axis=1)
centroid_B = np.mean(B, axis=1)
# ensure centroids are 3x1
centroid_A = centroid_A.reshape(-1, 1)
centroid_B = centroid_B.reshape(-1, 1)
# subtract mean
Am = A - centroid_A
Bm = B - centroid_B
H = Am @ np.transpose(Bm)
# sanity check
#if linalg.matrix_rank(H) < 3:
# raise ValueError("rank of H = {}, expecting 3".format(linalg.matrix_rank(H)))
# find rotation
U, S, Vt = np.linalg.svd(H)
R = Vt.T @ U.T
# special reflection case
reflected = np.linalg.det(R) < 0
if reflected:
print("det(R) < 0, reflection detected!, correcting for it ...")
Vt[2,:] = Vt[2,:]*-1
R = Vt.T @ U.T
t = -R @ centroid_A + centroid_B
t = t.reshape(-1)
# Account for 180 degrees about an axis
#if not reflected:
t = np.where(np.abs(disp*-1 - t) < 0.001, t*-1,t)
return R, t
``` |
{
"source": "08haganh/crystal_interactions_finder_hh",
"score": 3
} |
#### File: crystal_interactions_finder_hh/PYTHON/Geometry.py
```python
import math
import numpy as np
class Plane():
def __init__(self,atoms):
# Stores a plane equation in the format
# ax + bx + cz + d = 0
self.atoms = atoms
xs = [atom.coordinates[0] for atom in atoms]
ys = [atom.coordinates[1] for atom in atoms]
zs = [atom.coordinates[2] for atom in atoms]
# do fit
tmp_A = []
tmp_b = []
for i in range(len(xs)):
tmp_A.append([xs[i], ys[i], 1])
tmp_b.append(zs[i])
b = np.matrix(tmp_b).T
A = np.matrix(tmp_A)
fit = (A.T * A).I * A.T * b
self.errors = b - A * fit
fit = np.array(fit).reshape(3)
self.a, self.b, self.d = fit[0], fit[1], fit[2]
# fit is currently in the form
# ax + by + d = cz
# c = -(a*x[0] + b*y[0] + d) / z[0]
self.c = - ((self.a*xs[0] + self.b*ys[0] + self.d) / zs[0])
def plane_angle(self, plane):
a1,b1,c1 = self.a,self.b, self.c
a2,b2,c2 = plane.a,plane.b, plane.c
d = ( a1 * a2 + b1 * b2 + c1 * c2 )
e1 = np.sqrt( a1 * a1 + b1 * b1 + c1 * c1)
e2 = np.sqrt( a2 * a2 + b2 * b2 + c2 * c2)
d = d / (e1 * e2)
A = np.degrees(np.arccos(d))
if A > 90:
A = 180 - A
return A
def point_distance(self,atom):
x1, y1, z1 = atom.coordinates[0], atom.coordinates[1], atom.coordinates[2]
d = np.abs((self.a * x1 + self.b * y1 + self.c * z1 + self.d))
e = (np.sqrt(self.a * self.a + self.b * self.b + self.c * self.c))
return d/e
def test_planarity(self,atoms = None):
if atoms == None:
devs = [self.point_distance(atom) for atom in self.atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
else:
devs = [self.point_distance(atom) for atom in atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
def bond_angle(atom1,atom2,atom3):
a = atom1.coordinates
b = atom2.coordinates
c = atom3.coordinates
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def torsional_angle(atom1,atom2,atom3,atom4):
# returns interplanar angle between planes defined by atom1, atom2, atom3, and atom2, atom3, atom4
pass
def vector(atom1,atom2, as_angstrom=False):
# returns the vector defined by the position between two atoms
pass
def calc_lstsq_displacement(disp,vectors):
A = vectors.T
xs = []
x, _, _, _ = np.linalg.lstsq(A,disp,rcond=-1)
xs.append(x)
return np.array(xs[0])
def vector_angle(v1,v2):
theta = np.arccos((v1.dot(v2))/(np.sqrt(v1.dot(v1))*np.sqrt(v2.dot(v2))))
return np.degrees(theta)
def vector_plane_angle(vector, plane):
# returns the angle made between a vector and a plane
pass
# https://stackoverflow.com/questions/14016898/port-matlab-bounding-ellipsoid-code-to-python
# Python implementation of the MATLAB function MinVolEllipse, based on the Khachiyan algorithm
# for both
# A is a matrix containing the information regarding the shape of the ellipsoid
# to get radii from A you have to do SVD on it, giving U Q and V
# 1 / sqrt(Q) gives the radii of the ellipsoid
# problems arise for planar motifs. add two extra points at centroid of +/- 0.00001*plane_normal to overcome
def mvee(atoms, tol = 0.00001):
"""
Find the minimum volume ellipse around a set of atom objects.
Return A, c where the equation for the ellipse given in "center form" is
(x-c).T * A * (x-c) = 1
[U Q V] = svd(A);
where r = 1/sqrt(Q)
V is rotation matrix
U is ???
"""
points_asarray = np.array([atom.coordinates for atom in atoms])
points = np.asmatrix(points_asarray)
N, d = points.shape
Q = np.column_stack((points, np.ones(N))).T
err = tol+1.0
u = np.ones(N)/N
try:
while err > tol:
# assert u.sum() == 1 # invariant
X = Q * np.diag(u) * Q.T
M = np.diag(Q.T * la.inv(X) * Q)
jdx = np.argmax(M)
step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))
new_u = (1-step_size)*u
new_u[jdx] += step_size
err = la.norm(new_u-u)
u = new_u
c = u*points
A = la.inv(points.T*np.diag(u)*points - c.T*c)/d
except: # For singular matrix errors i.e. motif is ellipse rather than ellipsoid
centroid = np.average(points_asarray,axis=0)
plane = Plane(atoms)
normal = np.array([plane.a,plane.b,plane.c])
norm_mag = np.sqrt(np.dot(normal,normal))
for i, norm in enumerate(normal):
normal[i] = norm * 1 / norm_mag
centroid = np.average(points,axis=0).reshape(-1,3)
p1 = centroid + normal*0.00001
p2 = centroid - normal*0.00001
points_asarray = np.concatenate([points_asarray,p1,p2],axis=0)
points = np.asmatrix(points_asarray)
N, d = points.shape
Q = np.column_stack((points, np.ones(N))).T
err = tol+1.0
u = np.ones(N)/N
while err > tol:
# assert u.sum() == 1 # invariant
X = Q * np.diag(u) * Q.T
M = np.diag(Q.T * la.inv(X) * Q)
jdx = np.argmax(M)
step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))
new_u = (1-step_size)*u
new_u[jdx] += step_size
err = la.norm(new_u-u)
u = new_u
c = u*points
A = la.inv(points.T*np.diag(u)*points - c.T*c)/d
return np.asarray(A), np.squeeze(np.asarray(c))
def ellipse(rx,ry,rz):
u, v = np.mgrid[0:2*np.pi:20j, -np.pi/2:np.pi/2:10j]
x = rx*np.cos(u)*np.cos(v)
y = ry*np.sin(u)*np.cos(v)
z = rz*np.sin(v)
return x,y,z
```
#### File: crystal_interactions_finder_hh/PYTHON/utils.py
```python
from Atom import *
from Bond import *
from Crystal import *
from Geometry import *
from Interaction import *
from io import *
from Molecule import *
from Viz import *
def calc_intermolecular_atom_distances(crystal):
'''
Calculates all interatomic atom atom distances in a crystal structure
calculates distances on batch between central molecules and a neighbour molecule, rather than a simple
nested for loop
calculates distances in batches between atom i in central molecule and atom (i - x) in neighbour
returns a dataframe with all atom atom distances < 10A in the crystal structure
'''
# Calculate Atom Distances from central molecule
central_molecule, central_idx = crystal.get_central_molecule(return_idx=True)
central_atom_coords = np.array([atom.coordinates for atom in central_molecule.atoms]) # shape = (n_atoms,3)
all_atom_coords = []
for mol in crystal.molecules:
all_atom_coords.append(np.array([atom.coordinates for atom in mol.atoms]))
all_atom_coords = np.array(all_atom_coords) # shape = (n_mols,n_atoms,3)
dists = []
mol1s = []
mol2s = []
for i, mol_coords in enumerate(all_atom_coords):
temp_dist = []
for x in range(len(mol_coords)):
mol1s += [central_idx]*len(mol_coords)
mol2s += [i]*len(mol_coords)
disp = mol_coords - central_atom_coords # shape = (n_atoms,3)
dist2 = disp[:,0] * disp[:,0] + disp[:,1] * disp[:,1] + disp[:,2] * disp[:,2]
dist = np.sqrt(dist2) # shape = (n_atoms)
temp_dist.append(dist)
mol_coords = np.roll(mol_coords,-1,axis=0)
dists.append(temp_dist)
dists = np.array(dists) # shape = (n_molecules,x_atoms,y_atoms) | where y in y_atoms = dist(atom_x_central - atom_y_mol_n)
# Put distances in order of atom indices
in_atom_order = np.array([dist.flatten('F') for dist in dists]).reshape(-1)
d1 = dists.shape[0]
d2 = dists.shape[1]
arange = np.arange(d2)
atom1s = np.concatenate([[x]*d2 for x in range(d2)]*d1)
atom2s = np.concatenate([np.roll(arange,-x) for x in range(d2)]*d1)
#atom2s = np.concatenate([[x for x in range(d2)]*d2]*d1)
# Turn Atom Distances to DataFrame
data_dict= {'mol1s':mol1s,'mol2s':mol2s,'atom1s':atom1s,'atom2s':atom2s,'dists':in_atom_order}
atom_dist_df = pd.DataFrame(data_dict)
atom_dist_df = atom_dist_df[atom_dist_df.mol1s != atom_dist_df.mol2s]
atom_dist_df = atom_dist_df.loc[atom_dist_df.dists <= 10]
return atom_dist_df
def add_interactions(atom_dist_df,crystal):
'''
Add intermolecular interaction types to bond distances
'''
atom_dicts = []
for idx in atom_dist_df.index:
m1_idx = atom_dist_df.at[idx,'mol1s']
m2_idx = atom_dist_df.at[idx,'mol2s']
a1_idx = atom_dist_df.at[idx,'atom1s']
a2_idx = atom_dist_df.at[idx,'atom2s']
atom1 = crystal.molecules[m1_idx].atoms[a1_idx]
atom2 = crystal.molecules[m2_idx].atoms[a2_idx]
disp = atom2.coordinates - atom1.coordinates
dist = np.sqrt(disp.dot(disp))
atom_dict = {'mol1s':m1_idx,'mol2s':m2_idx,'atom1':a1_idx,'atom2':a2_idx,'dist':dist}
interaction = Interaction(atom1,atom2)
atom_dict.update(interaction.to_dict())
atom_dicts.append(atom_dict)
atom_df = pd.DataFrame(atom_dicts)
return atom_df.set_index(['mol1s','mol2s'])
def calc_geometric_interactions(crystal):
# Calculate Molecular Distances, Angles, Displacement
all_mols = []
for mol in crystal.molecules:
all_mols.append(mol.centre_of_geometry())
all_mols = np.array(all_mols)
cog_disps = []
cog_dists = []
cog_mol1s = []
cog_mol2s = []
interplanar_angles = []
unit_cell_disps = []
planes = []
# Rather than making new planes every loop, make the set of planes once
for mol in crystal.molecules:
planes.append(Plane(mol.get_backbone().atoms))
# Loop through all pairs of molecules
for i, arr1 in enumerate(all_mols[:-1]):
for j, arr2 in enumerate(all_mols[i+1:],i+1):
interplanar_angles.append((planes[i].plane_angle(planes[j])))
cog_mol1s.append(i)
cog_mol2s.append(j)
disp = arr2 - arr1
unit_cell_disps.append(disp)
dist = np.sqrt(disp.dot(disp))
cog_disps.append(disp)
cog_dists.append(dist)
# Turn lists to arrays
unit_cell_disps = np.array(unit_cell_disps)
cog_dists = np.array(cog_dists)
# Create Molecule Geometry to DataFrame
data_dict= {'mol1s':cog_mol1s,'mol2s':cog_mol2s,
'a':unit_cell_disps[:,0],'b':unit_cell_disps[:,1],'c':unit_cell_disps[:,2],
'dists':cog_dists,'interplanar_angles':interplanar_angles}
df_cogs = np.round(pd.DataFrame(data_dict).set_index(['mol1s','mol2s']),3)
return df_cogs
def combine_topology_geometry(interaction_df,geometry_df):
# Add to df_cogs
hbond_bond = pd.DataFrame(interaction_df.groupby(interaction_df.index)['hydrogen_bond'].sum()).set_index(interaction_df.index.unique())
pi_pi_bond = pd.DataFrame(interaction_df.groupby(interaction_df.index)['pi_pi_bond'].sum()).set_index(interaction_df.index.unique())
halogen_bond = pd.DataFrame(interaction_df.groupby(interaction_df.index)['halogen_bond'].sum()).set_index(interaction_df.index.unique())
ch_pi_bond = pd.DataFrame(interaction_df.groupby(interaction_df.index)['ch_pi_bond'].sum()).set_index(interaction_df.index.unique())
hydrophobic_bond = pd.DataFrame(interaction_df.groupby(interaction_df.index)['hydrophobic_cc_bond'].sum()).set_index(interaction_df.index.unique())
vdw_contact = pd.DataFrame(interaction_df.groupby(interaction_df.index)['vdw_contact'].sum()).set_index(interaction_df.index.unique())
fin_df = pd.concat([geometry_df,vdw_contact,hbond_bond,pi_pi_bond,halogen_bond,ch_pi_bond,
hydrophobic_bond],axis=1)
# Align interactions properly to remove double counting of indices
double_counted = fin_df.loc[fin_df.index.get_level_values(0) > fin_df.index.get_level_values(1)]
double_counted = double_counted[['vdw_contact','hydrogen_bond','pi_pi_bond','halogen_bond',
'ch_pi_bond','hydrophobic_cc_bond']]
fin_df.drop(double_counted.index,axis=0,inplace=True)
arrays = [double_counted.index.get_level_values(1),double_counted.index.get_level_values(0)]
tuples = list(zip(*arrays))
double_counted.index = pd.MultiIndex.from_tuples(tuples, names=["mol1s", "mol2s"])
fin_df.loc[double_counted.index,double_counted.columns] = double_counted
return fin_df
``` |
{
"source": "08jne01/Maxwell-Equation-Solver",
"score": 2
} |
#### File: Maxwell-Equation-Solver/Output_Data/plot2d.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
mode = raw_input("Mode: ")
filename = "Field_Components_Mode_" + str(mode) + ".dat"
if (filename == ""):
filename = "Field_Components_Mode_0.dat"
arr = np.loadtxt(filename, delimiter=',', unpack=True, skiprows=1)
size = int(np.sqrt(len(arr[0])))
def get_field(field_big):
x = np.linspace(0, size-1, size)
field = []
n = 0
for i in x:
field.append(field_big[i + size*size/2])
return field
#x, y = np.mgrid[slice(0, int(size), 1), slice(0, int(size), 1)]
x = arr[0, :]
y = arr[1, :]
x = np.unique(x)
y = np.unique(y)
print len(x)
print len(y)
fields = []
for i in range(2, 8):
field = arr[i, :]
field = field.reshape(len(x), len(y))
fields.append(field)
X, Y = np.meshgrid(y, x)
X /= 1e-2
Y /= 1e-2
#print(get_field(arr[2]))
title_size = 25
axis_size = 25
tick_size = 25
step_size = 1.0
#fields = getfield(angles, arr)
cmap = plt.get_cmap('jet')
shading = 'gouraud'
#norm = mpl.colors.Normalize(vmin=np.min(fields[2]),vmax=np.max(fields[0]))
#x, y = np.loadtxt
#arr = [y1, y2, y3, y4];
aspect = float(len(x))/float(len(y))
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
mpl.rcParams['xtick.labelsize'] = tick_size
mpl.rcParams['ytick.labelsize'] = tick_size
fig, plots = plt.subplots(1, 3, figsize=(15,5))
for i in plots:
i.set_aspect(1.0)
i.set_xlabel("$x$ $(\mu m)$", fontsize=axis_size)
i.set_ylabel("$y$ $(\mu m)$", fontsize=axis_size)
#i.tick_params(axis = 'both', which = 'major', labelsize=tick_size)
titles = ["$E_x$ field", "$E_y$ field", "$E_z$ field"]
for i in range(0, 3):
abs_max = vmax=np.max(abs(fields[i]))
norm = mpl.colors.Normalize(vmin=-abs_max,vmax=abs_max)
im = plots[i].pcolormesh(X,Y,fields[i], cmap=cmap, shading=shading, norm=norm)
plots[i].set_title(titles[i], fontsize=title_size)
start, end = plots[i].get_xlim()
plots[i].xaxis.set_ticks(np.arange(start, end, step_size))
start, end = plots[i].get_ylim()
plots[i].yaxis.set_ticks(np.arange(start, end, step_size))
#plots[i].ytick_labels(fontsize=axis_size)
divider = make_axes_locatable(plots[i])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, ticks=[-abs_max,0,abs_max], cax=cax)
cbar.ax.set_yticklabels(['-1', '0', '1'], fontsize=axis_size)
plt.tight_layout()
plt.savefig("E-Fields_Mode_" + str(mode) + ".png", dpi=200)
#plt.savefig("E-Fields_Mode_" + str(mode) + ".eps", format='eps')
fig2, plots2 = plt.subplots(1, 3, figsize=(15,5))
for i in plots2:
i.set_aspect(1.0)
i.set_xlabel("$x$ $(\mu m)$", fontsize=axis_size)
i.set_ylabel("$y$ $(\mu m)$", fontsize=axis_size)
#i.tick_params(axis = 'both', which = 'major', labelsize=tick_size)
titles = ["$H_x$ field", "$H_y$ field", "$H_z$ field"]
for i in range(0, 3):
abs_max = vmax=np.max(abs(fields[i+3]))
norm = mpl.colors.Normalize(vmin=-abs_max,vmax=abs_max)
im = plots2[i].pcolormesh(X,Y,fields[i+3], cmap=cmap, shading=shading, norm=norm)
plots2[i].set_title(titles[i], fontsize=title_size)
start, end = plots2[i].get_xlim()
plots2[i].xaxis.set_ticks(np.arange(start, end, step_size))
start, end = plots2[i].get_ylim()
plots2[i].yaxis.set_ticks(np.arange(start, end, step_size))
divider = make_axes_locatable(plots2[i])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig2.colorbar(im, ticks=[-abs_max,0,abs_max], cax=cax)
cbar.ax.set_yticklabels(['-1', '0', '1'], fontsize=axis_size)
plt.xticks(fontsize=25)
plt.tight_layout()
plt.savefig("H-fields_Mode_" + str(mode) + ".png", dpi=200)
#plt.savefig("H-fields_Mode_" + str(mode) + ".eps", format='eps')
plt.show()
"""
n = 0
for i in plots:
#print("nothing")
val = len(get_field(arr[n+2]))/2
if (n < 2):
i.plot(np.linspace(-val, val, 2*val), get_field(arr[n+2]), "k-")
n+=1
"""
#plots[0].plot(((arr[0]-80)**2+(arr[1]-80)**2)**0.5, (arr[5]**2 + arr[6]**2)**0.5, "kx")
#eigenVal[n-2]
#plt.plot(np.linspace(0, 10, len(eigenVal)), eigenVal, "r-");
#print(len(arr))
#plt.plot(x,y, "-");
#plt.xlabel("r (arb. units)")
#plt.ylabel("Et (arb. units)")
``` |
{
"source": "08Np08/Make-Pickup-Teams-",
"score": 4
} |
#### File: 08Np08/Make-Pickup-Teams-/pickup.py
```python
from operator import attrgetter
import pickle
import random
def main():
choice = input("To see players enter 1,\nTo add players enter 2,\
\nTo temp remove players and then make teams enter 3,\
\nTo make a random team enter 4,\
\nTo permenently remove players enter 5,\
\nTo edit a player enter 6\
\nTo make teams enter 7,\
\nTo make random fair teams enter 8\
\nTo exit enter exit:")
if choice == "1":
print("\nForwards"+"("+str(len(open_forwards()))+")")
print("\nName \t\t Skill \t Speed \t Position\n"+"-"*38)
for i in open_forwards():
print(i.get_name(),"\t\t",i.get_skill(),"\t",i.get_speed(),"\t",i.get_position())
print("\nDefensemen"+"("+str(len(open_defense()))+")")
print("\nName \t\t Skill \t Speed \t Position\n"+"-"*38)
for i in open_defense():
print(i.get_name(),"\t\t",i.get_skill(),"\t",i.get_speed(),"\t",i.get_position())
elif choice == "2":
make_list()
elif choice == "3":
offense_list, defense_list = temp_remove_player()
team1,team1_score,team1_d_score,team2,team2_score,team2_d_score = make_fair_teams(
offense_list,defense_list)
print("Team 1\n"+"-"*10)
for i in team1:
print(i)
print("\nOffense Score:",team1_score,"Defense Score:",team1_d_score)
print("\nTeam 2\n"+"-"*10)
for i in team2:
print(i)
print("\nOffense Score:",team2_score,"Defense Score:",team2_d_score)
elif choice == "4":
team1,team2 = make_random_teams(open_forwards(),open_defense())
team1_score,team1_d_score,team2_score,team2_d_score = team_score(team1,team2)
print("Team 1\n"+"-"*10)
for i in team1:
print(i)
print("\nOffense Score:",team1_score,"Defense Score:",team1_d_score)
print("\nTeam 2\n"+"-"*10)
for i in team2:
print(i)
print("\nOffense Score:",team2_score,"Defense Score:",team2_d_score)
elif choice == "5":
remove_player()
elif choice == "6":
edit_player()
elif choice == "7":
team1,team2 = make_teams(open_forwards(),open_defense())
team1_score,team1_d_score,team2_score,team2_d_score = team_score(team1,team2)
print("Team 1\n"+"-"*10)
for i in team1:
print(i)
print("\nOffense Score:",team1_score,"Defense Score:",team1_d_score)
print("\nTeam 2\n"+"-"*10)
for i in team2:
print(i)
print("\nOffense Score:",team2_score,"Defense Score:",team2_d_score)
elif choice == "8":
team1, team1_score,team1_d_score, team2, team2_score,team2_d_score = make_fair_teams(
open_forwards(),open_defense())
print("Team 1\n"+"-"*10)
for i in team1:
print(i)
print("\nOffense Score:",team1_score,"Defense Score:",team1_d_score)
print("\nTeam 2\n"+"-"*10)
for i in team2:
print(i)
print("\nOffense Score:",team2_score,"Defense Score:",team2_d_score)
else:
print("Invalid entry")
class Player:
def __init__(self,name,position,skill,speed):
self.name = name
self.position = position
self.skill = skill
self.speed = speed
def get_name(self):
return(self.name)
def get_position(self):
return(self.position)
def get_skill(self):
return(self.skill)
def get_speed(self):
return(self.speed)
def set_name(self,name):
self.name = name
def set_position(self,position):
self.position = position
def set_skill(self,skill):
self.skill = skill
def set_speed(self,speed):
self.speed = speed
def __repr__(self):
return self.name
def make_list():
offense_list = open_forwards()
defense_list = open_defense()
while True:
name = input("Name of player:")
skill = int(input("Skill level (0-10):"))
speed = int(input("What is the players speed (0-10):"))
position = input("Is the player a foward or a defenseman(D or F):")
position = position.upper()
if position == "D":
position = "Defenseman"
elif position == "F":
position = "Forward"
end = input("Enter another player(y/n):")
name = Player(name,position,skill,speed)
if name.get_position() == "Defenseman":
defense_list.append(name)
elif name.get_position() == "Forward":
offense_list.append(name)
if end == "n":
break
file1 = open("forwards","wb")
pickle.dump(offense_list,file1)
file1.close()
file2 = open("defense","wb")
pickle.dump(defense_list,file2)
file2.close()
return(offense_list,defense_list)
def make_teams(offense_list,defense_list):
team1_offense = []
team1_defense = []
team2_offense = []
team2_defense = []
count = 1
offense_list = sorted(offense_list, key =attrgetter("skill"))
defense_list = sorted(defense_list, key =attrgetter("skill"))
for i in offense_list:
if count % 2 == 0:
team1_offense.append(i)
else:
team2_offense.append(i)
count+=1
for i in defense_list:
if count % 2 == 1:
team1_defense.append(i)
else:
team2_defense.append(i)
count+=1
team1 = team1_offense + team1_defense
team2 = team2_offense + team2_defense
return(team1,team2)
def open_forwards():
try:
file = open("forwards","rb")
list1 = pickle.load(file)
file.close()
except:
list1 = []
return(list1)
def open_defense():
try:
file = open("defense","rb")
list1 = pickle.load(file)
file.close()
except:
list1 = []
return(list1)
def temp_remove_player():
offense_list = open_forwards()
defense_list = open_defense()
name = input("Enter a player to remove:")
for i in range(len(offense_list)):
try:
if str(offense_list[i]) == name:
del offense_list[i]
except:
continue
for i in range(len(defense_list)):
try:
if str(defense_list[i]) == name:
del defense_list[i]
except:
continue
return(offense_list,defense_list)
def remove_player():
offense_list = open_forwards()
defense_list = open_defense()
name = input("Enter a player to remove:")
for i in range(len(offense_list)):
try:
if str(offense_list[i]) == name:
del offense_list[i]
except:
continue
for i in range(len(defense_list)):
try:
if str(defense_list[i]) == name:
del defense_list[i]
except:
continue
for i in offense_list:
print(offense_list)
file1 = open("forwards","wb")
pickle.dump(offense_list,file1)
file1.close()
file2 = open("defense","wb")
pickle.dump(defense_list,file2)
file2.close()
def edit_player():
offense_list = open_forwards()
defense_list = open_defense()
done = False
name = input("What is the name of the player you want to edit:")
choice = input("what would you like to edit(Enter: 1 for name, 2 for skill, 3 for speed,\
4 to switch position):")
if choice == "1":
new_name = input("Enter the new name:")
for i in range(len(offense_list)):
try:
if str(offense_list[i]) == name:
offense_list[i].set_name(new_name)
except:
continue
for i in range(len(defense_list)):
try:
if str(defense_list[i]) == name:
defense_list[i].set_name(new_name)
except:
continue
elif choice == "2":
new_skill = int(input("Enter the new skil level(0-10):"))
for i in range(len(offense_list)):
try:
if str(offense_list[i]) == name:
offense_list[i].set_name(new_skill)
except:
continue
for i in range(len(defense_list)):
try:
if str(defense_list[i]) == name:
defense_list[i].set_name(new_skill)
except:
continue
elif choice == "3":
new_speed = int(input("Enter the new speed (0-10):"))
for i in range(len(offense_list)):
try:
if str(offense_list[i]) == name:
offense_list[i].set_name(new_speed)
except:
continue
for i in range(len(defense_list)):
try:
if str(defense_list[i]) == name:
defense_list[i].set_name(new_speed)
except:
continue
elif choice == "4":
for i in range(len(offense_list)):
try:
if str(offense_list[i]) == name:
offense_list[i].set_position("Defenseman")
defense_list.append(offense_list[i])
print(i)
del offense_list[i]
done = True
except:
continue
for i in range(len(defense_list)):
if done == True:
break
try:
if str(defense_list[i]) == name:
defense_list[i].set_position("Forward")
offense_list.append(defense_list[i])
del defense_list[i]
except:
continue
file1 = open("forwards","wb")
pickle.dump(offense_list,file1)
file1.close()
file2 = open("defense","wb")
pickle.dump(defense_list,file2)
file2.close()
def make_random_teams(offense_list,defense_list):
sorted_offense_list = []
sorted_defense_list = []
rand_o_list = []
rand_d_list = []
team1_offense = []
team1_defense = []
team2_offense = []
team2_defense = []
count = 1
for i in range(len(offense_list)):
rand_o_list.append(i)
for i in range(len(defense_list)):
rand_d_list.append(i)
for i in range(len(offense_list)):
sorted_offense_list.append(i)
for i in range(len(defense_list)):
sorted_defense_list.append(i)
for i in defense_list:
rand1 = random.randint(0,len(rand_d_list)-1)
rand2 = rand_d_list[rand1]
del rand_d_list[rand1]
sorted_defense_list[rand2]=i
for i in offense_list:
rand1 = random.randint(0,len(rand_o_list)-1)
rand2 = rand_o_list[rand1]
del rand_o_list[rand1]
sorted_offense_list[rand2]=i
for i in sorted_offense_list:
if count % 2 == 0:
team1_offense.append(i)
else:
team2_offense.append(i)
count+=1
for i in sorted_defense_list:
if count % 2 == 0:
team1_defense.append(i)
else:
team2_defense.append(i)
count+=1
team1 = team1_offense + team1_defense
team2 = team2_offense + team2_defense
return(team1,team2)
def team_score(team1,team2):
team1_score = 0
team2_score = 0
team1_d_score = 0
team2_d_score = 0
for i in team1:
if str(i.get_position()) == "Forward":
x = i.get_skill()
team1_score += x
elif str(i.get_position()) == "Defenseman":
x = i.get_skill()
team1_d_score += x
for i in team2:
if str(i.get_position()) == "Forward":
x = i.get_skill()
team2_score += x
elif str(i.get_position()) == "Defenseman":
x = i.get_skill()
team2_d_score += x
return(team1_score,team2_score,team1_d_score,team2_d_score)
def make_fair_teams(o_list,d_list):
while True:
team1, team2 = make_random_teams(o_list,d_list)
team1_score,team2_score,team1_d_score,team2_d_score = team_score(team1,team2)
d_score_difference = team1_d_score - team2_d_score
o_score_difference = team1_score - team2_score
if abs(o_score_difference) < 3 and abs(d_score_difference) < 3:
break
return(team1,team1_score,team1_d_score,team2,team2_score,team2_d_score)
main()
``` |
{
"source": "08saikiranreddy/ipython",
"score": 2
} |
#### File: IPython/core/history.py
```python
from __future__ import print_function
# Stdlib imports
import fnmatch
import json
import os
import sys
# Our own packages
import IPython.utils.io
from IPython.utils.pickleshare import PickleShareDB
from IPython.utils.io import ask_yes_no
from IPython.utils.warn import warn
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class HistoryManager(object):
"""A class to organize all history-related functionality in one place.
"""
# Public interface
# An instance of the IPython shell we are attached to
shell = None
# A list to hold processed history
input_hist_parsed = None
# A list to hold raw history (as typed by user)
input_hist_raw = None
# A list of directories visited during session
dir_hist = None
# A dict of output history, keyed with ints from the shell's execution count
output_hist = None
# String with path to the history file
hist_file = None
# PickleShareDB instance holding the raw data for the shadow history
shadow_db = None
# ShadowHist instance with the actual shadow history
shadow_hist = None
# Private interface
# Variables used to store the three last inputs from the user. On each new
# history update, we populate the user's namespace with these, shifted as
# necessary.
_i00, _i, _ii, _iii = '','','',''
# A set with all forms of the exit command, so that we don't store them in
# the history (it's annoying to rewind the first entry and land on an exit
# call).
_exit_commands = None
def __init__(self, shell):
"""Create a new history manager associated with a shell instance.
"""
# We need a pointer back to the shell for various tasks.
self.shell = shell
# List of input with multi-line handling.
self.input_hist_parsed = []
# This one will hold the 'raw' input history, without any
# pre-processing. This will allow users to retrieve the input just as
# it was exactly typed in by the user, with %hist -r.
self.input_hist_raw = []
# list of visited directories
try:
self.dir_hist = [os.getcwd()]
except OSError:
self.dir_hist = []
# dict of output history
self.output_hist = {}
# Now the history file
if shell.profile:
histfname = 'history-%s' % shell.profile
else:
histfname = 'history'
self.hist_file = os.path.join(shell.ipython_dir, histfname + '.json')
# Objects related to shadow history management
self._init_shadow_hist()
self._i00, self._i, self._ii, self._iii = '','','',''
self._exit_commands = set(['Quit', 'quit', 'Exit', 'exit', '%Quit',
'%quit', '%Exit', '%exit'])
# Object is fully initialized, we can now call methods on it.
# Fill the history zero entry, user counter starts at 1
self.store_inputs('\n', '\n')
def _init_shadow_hist(self):
try:
self.shadow_db = PickleShareDB(os.path.join(
self.shell.ipython_dir, 'db'))
except UnicodeDecodeError:
print("Your ipython_dir can't be decoded to unicode!")
print("Please set HOME environment variable to something that")
print(r"only has ASCII characters, e.g. c:\home")
print("Now it is", self.ipython_dir)
sys.exit()
self.shadow_hist = ShadowHist(self.shadow_db, self.shell)
def populate_readline_history(self):
"""Populate the readline history from the raw history.
We only store one copy of the raw history, which is persisted to a json
file on disk. The readline history is repopulated from the contents of
this file."""
try:
self.shell.readline.clear_history()
except AttributeError:
pass
else:
for h in self.input_hist_raw:
if not h.isspace():
for line in h.splitlines():
self.shell.readline.add_history(line)
def save_history(self):
"""Save input history to a file (via readline library)."""
hist = dict(raw=self.input_hist_raw, #[-self.shell.history_length:],
parsed=self.input_hist_parsed) #[-self.shell.history_length:])
with open(self.hist_file,'wt') as hfile:
json.dump(hist, hfile,
sort_keys=True, indent=4)
def reload_history(self):
"""Reload the input history from disk file."""
with open(self.hist_file,'rt') as hfile:
hist = json.load(hfile)
self.input_hist_parsed = hist['parsed']
self.input_hist_raw = hist['raw']
if self.shell.has_readline:
self.populate_readline_history()
def get_history(self, index=None, raw=False, output=True):
"""Get the history list.
Get the input and output history.
Parameters
----------
index : n or (n1, n2) or None
If n, then the last entries. If a tuple, then all in
range(n1, n2). If None, then all entries. Raises IndexError if
the format of index is incorrect.
raw : bool
If True, return the raw input.
output : bool
If True, then return the output as well.
Returns
-------
If output is True, then return a dict of tuples, keyed by the prompt
numbers and with values of (input, output). If output is False, then
a dict, keyed by the prompt number with the values of input. Raises
IndexError if no history is found.
"""
if raw:
input_hist = self.input_hist_raw
else:
input_hist = self.input_hist_parsed
if output:
output_hist = self.output_hist
n = len(input_hist)
if index is None:
start=0; stop=n
elif isinstance(index, int):
start=n-index; stop=n
elif isinstance(index, tuple) and len(index) == 2:
start=index[0]; stop=index[1]
else:
raise IndexError('Not a valid index for the input history: %r'
% index)
hist = {}
for i in range(start, stop):
if output:
hist[i] = (input_hist[i], output_hist.get(i))
else:
hist[i] = input_hist[i]
if not hist:
raise IndexError('No history for range of indices: %r' % index)
return hist
def store_inputs(self, source, source_raw=None):
"""Store source and raw input in history and create input cache
variables _i*.
Parameters
----------
source : str
Python input.
source_raw : str, optional
If given, this is the raw input without any IPython transformations
applied to it. If not given, ``source`` is used.
"""
if source_raw is None:
source_raw = source
# do not store exit/quit commands
if source_raw.strip() in self._exit_commands:
return
self.input_hist_parsed.append(source.rstrip())
self.input_hist_raw.append(source_raw.rstrip())
self.shadow_hist.add(source)
# update the auto _i variables
self._iii = self._ii
self._ii = self._i
self._i = self._i00
self._i00 = source_raw
# hackish access to user namespace to create _i1,_i2... dynamically
new_i = '_i%s' % self.shell.execution_count
to_main = {'_i': self._i,
'_ii': self._ii,
'_iii': self._iii,
new_i : self._i00 }
self.shell.user_ns.update(to_main)
def sync_inputs(self):
"""Ensure raw and translated histories have same length."""
if len(self.input_hist_parsed) != len (self.input_hist_raw):
self.input_hist_raw[:] = self.input_hist_parsed
def reset(self):
"""Clear all histories managed by this object."""
self.input_hist_parsed[:] = []
self.input_hist_raw[:] = []
self.output_hist.clear()
# The directory history can't be completely empty
self.dir_hist[:] = [os.getcwd()]
def magic_history(self, parameter_s = ''):
"""Print input history (_i<n> variables), with most recent last.
%history -> print at most 40 inputs (some may be multi-line)\\
%history n -> print at most n inputs\\
%history n1 n2 -> print inputs between n1 and n2 (n2 not included)\\
By default, input history is printed without line numbers so it can be
directly pasted into an editor.
With -n, each input's number <n> is shown, and is accessible as the
automatically generated variable _i<n> as well as In[<n>]. Multi-line
statements are printed starting at a new line for easy copy/paste.
Options:
-n: print line numbers for each input.
This feature is only available if numbered prompts are in use.
-o: also print outputs for each input.
-p: print classic '>>>' python prompts before each input. This is useful
for making documentation, and in conjunction with -o, for producing
doctest-ready output.
-r: (default) print the 'raw' history, i.e. the actual commands you typed.
-t: print the 'translated' history, as IPython understands it. IPython
filters your input and converts it all into valid Python source before
executing it (things like magics or aliases are turned into function
calls, for example). With this option, you'll see the native history
instead of the user-entered version: '%cd /' will be seen as
'get_ipython().magic("%cd /")' instead of '%cd /'.
-g: treat the arg as a pattern to grep for in (full) history.
This includes the "shadow history" (almost all commands ever written).
Use '%hist -g' to show full shadow history (may be very long).
In shadow history, every index nuwber starts with 0.
-f FILENAME: instead of printing the output to the screen, redirect it to
the given file. The file is always overwritten, though IPython asks for
confirmation first if it already exists.
"""
if not self.shell.displayhook.do_full_cache:
print('This feature is only available if numbered prompts are in use.')
return
opts,args = self.parse_options(parameter_s,'gnoptsrf:',mode='list')
# Check if output to specific file was requested.
try:
outfname = opts['f']
except KeyError:
outfile = IPython.utils.io.Term.cout # default
# We don't want to close stdout at the end!
close_at_end = False
else:
if os.path.exists(outfname):
if not ask_yes_no("File %r exists. Overwrite?" % outfname):
print('Aborting.')
return
outfile = open(outfname,'w')
close_at_end = True
if 't' in opts:
input_hist = self.shell.history_manager.input_hist_parsed
elif 'r' in opts:
input_hist = self.shell.history_manager.input_hist_raw
else:
# Raw history is the default
input_hist = self.shell.history_manager.input_hist_raw
default_length = 40
pattern = None
if 'g' in opts:
init = 1
final = len(input_hist)
parts = parameter_s.split(None, 1)
if len(parts) == 1:
parts += '*'
head, pattern = parts
pattern = "*" + pattern + "*"
elif len(args) == 0:
final = len(input_hist)-1
init = max(1,final-default_length)
elif len(args) == 1:
final = len(input_hist)
init = max(1, final-int(args[0]))
elif len(args) == 2:
init, final = map(int, args)
else:
warn('%hist takes 0, 1 or 2 arguments separated by spaces.')
print(self.magic_hist.__doc__, file=IPython.utils.io.Term.cout)
return
width = len(str(final))
line_sep = ['','\n']
print_nums = 'n' in opts
print_outputs = 'o' in opts
pyprompts = 'p' in opts
found = False
if pattern is not None:
sh = self.shell.history_manager.shadowhist.all()
for idx, s in sh:
if fnmatch.fnmatch(s, pattern):
print("0%d: %s" %(idx, s.expandtabs(4)), file=outfile)
found = True
if found:
print("===", file=outfile)
print("shadow history ends, fetch by %rep <number> (must start with 0)",
file=outfile)
print("=== start of normal history ===", file=outfile)
for in_num in range(init, final):
# Print user history with tabs expanded to 4 spaces. The GUI clients
# use hard tabs for easier usability in auto-indented code, but we want
# to produce PEP-8 compliant history for safe pasting into an editor.
inline = input_hist[in_num].expandtabs(4).rstrip()+'\n'
if pattern is not None and not fnmatch.fnmatch(inline, pattern):
continue
multiline = int(inline.count('\n') > 1)
if print_nums:
print('%s:%s' % (str(in_num).ljust(width), line_sep[multiline]),
file=outfile)
if pyprompts:
print('>>>', file=outfile)
if multiline:
lines = inline.splitlines()
print('\n... '.join(lines), file=outfile)
print('... ', file=outfile)
else:
print(inline, end='', file=outfile)
else:
print(inline, end='', file=outfile)
if print_outputs:
output = self.shell.history_manager.output_hist.get(in_num)
if output is not None:
print(repr(output), file=outfile)
if close_at_end:
outfile.close()
def magic_hist(self, parameter_s=''):
"""Alternate name for %history."""
return self.magic_history(parameter_s)
def rep_f(self, arg):
r""" Repeat a command, or get command to input line for editing
- %rep (no arguments):
Place a string version of last computation result (stored in the special '_'
variable) to the next input prompt. Allows you to create elaborate command
lines without using copy-paste::
$ l = ["hei", "vaan"]
$ "".join(l)
==> heivaan
$ %rep
$ heivaan_ <== cursor blinking
%rep 45
Place history line 45 to next input prompt. Use %hist to find out the
number.
%rep 1-4 6-7 3
Repeat the specified lines immediately. Input slice syntax is the same as
in %macro and %save.
%rep foo
Place the most recent line that has the substring "foo" to next input.
(e.g. 'svn ci -m foobar').
"""
opts,args = self.parse_options(arg,'',mode='list')
if not args:
self.set_next_input(str(self.shell.user_ns["_"]))
return
if len(args) == 1 and not '-' in args[0]:
arg = args[0]
if len(arg) > 1 and arg.startswith('0'):
# get from shadow hist
num = int(arg[1:])
line = self.shell.shadowhist.get(num)
self.set_next_input(str(line))
return
try:
num = int(args[0])
self.set_next_input(str(self.shell.input_hist_raw[num]).rstrip())
return
except ValueError:
pass
for h in reversed(self.shell.input_hist_raw):
if 'rep' in h:
continue
if fnmatch.fnmatch(h,'*' + arg + '*'):
self.set_next_input(str(h).rstrip())
return
try:
lines = self.extract_input_slices(args, True)
print("lines", lines)
self.run_cell(lines)
except ValueError:
print("Not found in recent history:", args)
_sentinel = object()
class ShadowHist(object):
def __init__(self, db, shell):
# cmd => idx mapping
self.curidx = 0
self.db = db
self.disabled = False
self.shell = shell
def inc_idx(self):
idx = self.db.get('shadowhist_idx', 1)
self.db['shadowhist_idx'] = idx + 1
return idx
def add(self, ent):
if self.disabled:
return
try:
old = self.db.hget('shadowhist', ent, _sentinel)
if old is not _sentinel:
return
newidx = self.inc_idx()
#print("new", newidx) # dbg
self.db.hset('shadowhist',ent, newidx)
except:
self.shell.showtraceback()
print("WARNING: disabling shadow history")
self.disabled = True
def all(self):
d = self.db.hdict('shadowhist')
items = [(i,s) for (s,i) in d.iteritems()]
items.sort()
return items
def get(self, idx):
all = self.all()
for k, v in all:
if k == idx:
return v
def init_ipython(ip):
ip.define_magic("rep",rep_f)
ip.define_magic("hist",magic_hist)
ip.define_magic("history",magic_history)
# XXX - ipy_completers are in quarantine, need to be updated to new apis
#import ipy_completers
#ipy_completers.quick_completer('%hist' ,'-g -t -r -n')
```
#### File: IPython/core/magic.py
```python
import __builtin__
import __future__
import bdb
import inspect
import os
import sys
import shutil
import re
import time
import textwrap
import types
from cStringIO import StringIO
from getopt import getopt,GetoptError
from pprint import pformat
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile,pstats
except ImportError:
profile = pstats = None
import IPython
from IPython.core import debugger, oinspect
from IPython.core.error import TryNext
from IPython.core.error import UsageError
from IPython.core.fakemodule import FakeModule
from IPython.core.macro import Macro
from IPython.core import page
from IPython.core.prefilter import ESC_MAGIC
from IPython.lib.pylabtools import mpl_runner
from IPython.external.Itpl import itpl, printpl
from IPython.testing import decorators as testdec
from IPython.utils.io import file_read, nlprint
import IPython.utils.io
from IPython.utils.path import get_py_filename
from IPython.utils.process import arg_split, abbrev_cwd
from IPython.utils.terminal import set_term_title
from IPython.utils.text import LSString, SList, StringTypes, format_screen
from IPython.utils.timing import clock, clock2
from IPython.utils.warn import warn, error
from IPython.utils.ipstruct import Struct
import IPython.utils.generics
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def on_off(tag):
"""Return an ON/OFF string for a 1/0 input. Simple utility function."""
return ['OFF','ON'][tag]
class Bunch: pass
def compress_dhist(dh):
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail
#***************************************************************************
# Main class implementing Magic functionality
# XXX - for some odd reason, if Magic is made a new-style class, we get errors
# on construction of the main InteractiveShell object. Something odd is going
# on with super() calls, Configurable and the MRO... For now leave it as-is, but
# eventually this needs to be clarified.
# BG: This is because InteractiveShell inherits from this, but is itself a
# Configurable. This messes up the MRO in some way. The fix is that we need to
# make Magic a configurable that InteractiveShell does not subclass.
class Magic:
"""Magic functions for InteractiveShell.
Shell functions which can be reached as %function_name. All magic
functions should accept a string, which they can parse for their own
needs. This can make some functions easier to type, eg `%cd ../`
vs. `%cd("../")`
ALL definitions MUST begin with the prefix magic_. The user won't need it
at the command line, but it is is needed in the definition. """
# class globals
auto_status = ['Automagic is OFF, % prefix IS needed for magic functions.',
'Automagic is ON, % prefix NOT needed for magic functions.']
#......................................................................
# some utility functions
def __init__(self,shell):
self.options_table = {}
if profile is None:
self.magic_prun = self.profile_missing_notice
self.shell = shell
# namespace for holding state we may need
self._magic_state = Bunch()
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
def default_option(self,fn,optstr):
"""Make an entry in the options_table for fn, with value optstr"""
if fn not in self.lsmagic():
error("%s is not a magic function" % fn)
self.options_table[fn] = optstr
def lsmagic(self):
"""Return a list of currently available magic functions.
Gives a list of the bare names after mangling (['ls','cd', ...], not
['magic_ls','magic_cd',...]"""
# FIXME. This needs a cleanup, in the way the magics list is built.
# magics in class definition
class_magic = lambda fn: fn.startswith('magic_') and \
callable(Magic.__dict__[fn])
# in instance namespace (run-time user additions)
inst_magic = lambda fn: fn.startswith('magic_') and \
callable(self.__dict__[fn])
# and bound magics by user (so they can access self):
inst_bound_magic = lambda fn: fn.startswith('magic_') and \
callable(self.__class__.__dict__[fn])
magics = filter(class_magic,Magic.__dict__.keys()) + \
filter(inst_magic,self.__dict__.keys()) + \
filter(inst_bound_magic,self.__class__.__dict__.keys())
out = []
for fn in set(magics):
out.append(fn.replace('magic_','',1))
out.sort()
return out
def extract_input_slices(self,slices,raw=False):
"""Return as a string a set of input history slices.
Inputs:
- slices: the set of slices is given as a list of strings (like
['1','4:8','9'], since this function is for use by magic functions
which get their arguments as strings.
Optional inputs:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint)."""
if raw:
hist = self.shell.history_manager.input_hist_raw
else:
hist = self.shell.history_manager.input_hist_parsed
cmds = []
for chunk in slices:
if ':' in chunk:
ini,fin = map(int,chunk.split(':'))
elif '-' in chunk:
ini,fin = map(int,chunk.split('-'))
fin += 1
else:
ini = int(chunk)
fin = ini+1
cmds.append(''.join(hist[ini:fin]))
return cmds
def arg_err(self,func):
"""Print docstring if incorrect arguments were passed"""
print 'Error in arguments:'
print oinspect.getdoc(func)
def format_latex(self,strng):
"""Format a string for latex inclusion."""
# Characters that need to be escaped for latex:
escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
# Magic command names as headers:
cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
re.MULTILINE)
# Magic commands
cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
re.MULTILINE)
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
# The "\n" symbol
newline_re = re.compile(r'\\n')
# Now build the string for output:
#strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
strng)
strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
strng = par_re.sub(r'\\\\',strng)
strng = escape_re.sub(r'\\\1',strng)
strng = newline_re.sub(r'\\textbackslash{}n',strng)
return strng
def parse_options(self,arg_str,opt_str,*long_opts,**kw):
"""Parse options passed to an argument string.
The interface is similar to that of getopt(), but it returns back a
Struct with the options as keys and the stripped argument string still
as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Options:
-mode: default 'string'. If given as 'list', the argument string is
returned as a list (split on whitespace) instead of a string.
-list_all: put all option values in lists. Normally only options
appearing more than once are put in a list.
-posix (True): whether to split the input line in POSIX mode or not,
as per the conventions outlined in the shlex module from the
standard library."""
# inject default options at the beginning of the input line
caller = sys._getframe(1).f_code.co_name.replace('magic_','')
arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
mode = kw.get('mode','string')
if mode not in ['string','list']:
raise ValueError,'incorrect mode given: %s' % mode
# Get options
list_all = kw.get('list_all',0)
posix = kw.get('posix', os.name == 'posix')
# Check if we have more than one argument to warrant extra processing:
odict = {} # Dictionary with options
args = arg_str.split()
if len(args) >= 1:
# If the list of inputs only has 0 or 1 thing in it, there's no
# need to look for options
argv = arg_split(arg_str,posix)
# Do regular option processing
try:
opts,args = getopt(argv,opt_str,*long_opts)
except GetoptError,e:
raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
" ".join(long_opts)))
for o,a in opts:
if o.startswith('--'):
o = o[2:]
else:
o = o[1:]
try:
odict[o].append(a)
except AttributeError:
odict[o] = [odict[o],a]
except KeyError:
if list_all:
odict[o] = [a]
else:
odict[o] = a
# Prepare opts,args for return
opts = Struct(odict)
if mode == 'string':
args = ' '.join(args)
return opts,args
#......................................................................
# And now the actual magic functions
# Functions for IPython shell work (vars,funcs, config, etc)
def magic_lsmagic(self, parameter_s = ''):
"""List currently available magic functions."""
mesc = ESC_MAGIC
print 'Available magic functions:\n'+mesc+\
(' '+mesc).join(self.lsmagic())
print '\n' + Magic.auto_status[self.shell.automagic]
return None
def magic_magic(self, parameter_s = ''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
if parameter_s.split()[0] == '-latex':
mode = 'latex'
if parameter_s.split()[0] == '-brief':
mode = 'brief'
if parameter_s.split()[0] == '-rest':
mode = 'rest'
rest_docs = []
except:
pass
magic_docs = []
for fname in self.lsmagic():
mname = 'magic_' + fname
for space in (Magic,self,self.__class__):
try:
fn = space.__dict__[mname]
except KeyError:
pass
else:
break
if mode == 'brief':
# only first line
if fn.__doc__:
fndoc = fn.__doc__.split('\n',1)[0]
else:
fndoc = 'No documentation'
else:
if fn.__doc__:
fndoc = fn.__doc__.rstrip()
else:
fndoc = 'No documentation'
if mode == 'rest':
rest_docs.append('**%s%s**::\n\n\t%s\n\n' %(ESC_MAGIC,
fname,fndoc))
else:
magic_docs.append('%s%s:\n\t%s\n' %(ESC_MAGIC,
fname,fndoc))
magic_docs = ''.join(magic_docs)
if mode == 'rest':
return "".join(rest_docs)
if mode == 'latex':
print self.format_latex(magic_docs)
return
else:
magic_docs = format_screen(magic_docs)
if mode == 'brief':
return magic_docs
outmsg = """
IPython's 'magic' functions
===========================
The magic function system provides a series of functions which allow you to
control the behavior of IPython itself, plus a lot of system-type
features. All these functions are prefixed with a % character, but parameters
are given without parentheses or quotes.
NOTE: If you have 'automagic' enabled (via the command line option or with the
%automagic function), you don't need to type in the % explicitly. By default,
IPython ships with automagic on, so you should only rarely need the % escape.
Example: typing '%cd mydir' (without the quotes) changes you working directory
to 'mydir', if it exists.
You can define your own magic functions to extend the system. See the supplied
ipythonrc and example-magic.py files for details (in your ipython
configuration directory, typically $HOME/.ipython/).
You can also define your own aliased names for magic functions. In your
ipythonrc file, placing a line like:
execute __IPYTHON__.magic_pf = __IPYTHON__.magic_profile
will define %pf as a new name for %profile.
You can also call magics in code using the magic() function, which IPython
automatically adds to the builtin namespace. Type 'magic?' for details.
For a list of the available magic functions, use %lsmagic. For a description
of any of them, type %magic_name?, e.g. '%cd?'.
Currently the magic system has the following functions:\n"""
mesc = ESC_MAGIC
outmsg = ("%s\n%s\n\nSummary of magic functions (from %slsmagic):"
"\n\n%s%s\n\n%s" % (outmsg,
magic_docs,mesc,mesc,
(' '+mesc).join(self.lsmagic()),
Magic.auto_status[self.shell.automagic] ) )
page.page(outmsg)
def magic_automagic(self, parameter_s = ''):
"""Make magic functions callable without having to type the initial %.
Without argumentsl toggles on/off (when off, you must call it as
%automagic, of course). With arguments it sets the value, and you can
use any of (case insensitive):
- on,1,True: to activate
- off,0,False: to deactivate.
Note that magic functions have lowest priority, so if there's a
variable whose name collides with that of a magic fn, automagic won't
work for that function (you get the variable instead). However, if you
delete the variable (del var), the previously shadowed magic function
becomes visible to automagic again."""
arg = parameter_s.lower()
if parameter_s in ('on','1','true'):
self.shell.automagic = True
elif parameter_s in ('off','0','false'):
self.shell.automagic = False
else:
self.shell.automagic = not self.shell.automagic
print '\n' + Magic.auto_status[self.shell.automagic]
@testdec.skip_doctest
def magic_autocall(self, parameter_s = ''):
"""Make functions callable without having to type parentheses.
Usage:
%autocall [mode]
The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
value is toggled on and off (remembering the previous state).
In more detail, these values mean:
0 -> fully disabled
1 -> active, but do not apply if there are no arguments on the line.
In this mode, you get:
In [1]: callable
Out[1]: <built-in function callable>
In [2]: callable 'hello'
------> callable('hello')
Out[2]: False
2 -> Active always. Even if no arguments are present, the callable
object is called:
In [2]: float
------> float()
Out[2]: 0.0
Note that even with autocall off, you can still use '/' at the start of
a line to treat the first argument on the command line as a function
and add parentheses to it:
In [8]: /str 43
------> str(43)
Out[8]: '43'
# all-random (note for auto-testing)
"""
if parameter_s:
arg = int(parameter_s)
else:
arg = 'toggle'
if not arg in (0,1,2,'toggle'):
error('Valid modes: (0->Off, 1->Smart, 2->Full')
return
if arg in (0,1,2):
self.shell.autocall = arg
else: # toggle
if self.shell.autocall:
self._magic_state.autocall_save = self.shell.autocall
self.shell.autocall = 0
else:
try:
self.shell.autocall = self._magic_state.autocall_save
except AttributeError:
self.shell.autocall = self._magic_state.autocall_save = 1
print "Automatic calling is:",['OFF','Smart','Full'][self.shell.autocall]
def magic_page(self, parameter_s=''):
"""Pretty print the object and display it through a pager.
%page [options] OBJECT
If no object is given, use _ (last output).
Options:
-r: page str(object), don't pretty-print it."""
# After a function contributed by <NAME>, slightly modified.
# Process options/args
opts,args = self.parse_options(parameter_s,'r')
raw = 'r' in opts
oname = args and args or '_'
info = self._ofind(oname)
if info['found']:
txt = (raw and str or pformat)( info['obj'] )
page.page(txt)
else:
print 'Object `%s` not found' % oname
def magic_profile(self, parameter_s=''):
"""Print your currently active IPython profile."""
if self.shell.profile:
printpl('Current IPython profile: $self.shell.profile.')
else:
print 'No profile active.'
def magic_pinfo(self, parameter_s='', namespaces=None):
"""Provide detailed information about an object.
'%pinfo object' is just a synonym for object? or ?object."""
#print 'pinfo par: <%s>' % parameter_s # dbg
# detail_level: 0 -> obj? , 1 -> obj??
detail_level = 0
# We need to detect if we got called as 'pinfo pinfo foo', which can
# happen if the user types 'pinfo foo?' at the cmd line.
pinfo,qmark1,oname,qmark2 = \
re.match('(pinfo )?(\?*)(.*?)(\??$)',parameter_s).groups()
if pinfo or qmark1 or qmark2:
detail_level = 1
if "*" in oname:
self.magic_psearch(oname)
else:
self.shell._inspect('pinfo', oname, detail_level=detail_level,
namespaces=namespaces)
def magic_pinfo2(self, parameter_s='', namespaces=None):
"""Provide extra detailed information about an object.
'%pinfo2 object' is just a synonym for object?? or ??object."""
self.shell._inspect('pinfo', parameter_s, detail_level=1,
namespaces=namespaces)
def magic_pdef(self, parameter_s='', namespaces=None):
"""Print the definition header for any callable object.
If the object is a class, print the constructor information."""
self._inspect('pdef',parameter_s, namespaces)
def magic_pdoc(self, parameter_s='', namespaces=None):
"""Print the docstring for an object.
If the given object is a class, it will print both the class and the
constructor docstrings."""
self._inspect('pdoc',parameter_s, namespaces)
def magic_psource(self, parameter_s='', namespaces=None):
"""Print (or run through pager) the source code for an object."""
self._inspect('psource',parameter_s, namespaces)
def magic_pfile(self, parameter_s=''):
"""Print (or run through pager) the file where an object is defined.
The file opens at the line where the object definition begins. IPython
will honor the environment variable PAGER if set, and otherwise will
do its best to print the file in a convenient form.
If the given argument is not an object currently defined, IPython will
try to interpret it as a filename (automatically adding a .py extension
if needed). You can thus use %pfile as a syntax highlighting code
viewer."""
# first interpret argument as an object name
out = self._inspect('pfile',parameter_s)
# if not, try the input as a filename
if out == 'not found':
try:
filename = get_py_filename(parameter_s)
except IOError,msg:
print msg
return
page.page(self.shell.inspector.format(file(filename).read()))
def magic_psearch(self, parameter_s=''):
"""Search for object in namespaces by wildcard.
%psearch [options] PATTERN [OBJECT TYPE]
Note: ? can be used as a synonym for %psearch, at the beginning or at
the end: both a*? and ?a* are equivalent to '%psearch a*'. Still, the
rest of the command line must be unchanged (options come first), so
for example the following forms are equivalent
%psearch -i a* function
-i a* function?
?-i a* function
Arguments:
PATTERN
where PATTERN is a string containing * as a wildcard similar to its
use in a shell. The pattern is matched in all namespaces on the
search path. By default objects starting with a single _ are not
matched, many IPython generated objects have a single
underscore. The default is case insensitive matching. Matching is
also done on the attributes of objects and not only on the objects
in a module.
[OBJECT TYPE]
Is the name of a python type from the types module. The name is
given in lowercase without the ending type, ex. StringType is
written string. By adding a type here only objects matching the
given type are matched. Using all here makes the pattern match all
types (this is the default).
Options:
-a: makes the pattern match even objects whose names start with a
single underscore. These names are normally ommitted from the
search.
-i/-c: make the pattern case insensitive/sensitive. If neither of
these options is given, the default is read from your ipythonrc
file. The option name which sets this value is
'wildcards_case_sensitive'. If this option is not specified in your
ipythonrc file, IPython's internal default is to do a case sensitive
search.
-e/-s NAMESPACE: exclude/search a given namespace. The pattern you
specifiy can be searched in any of the following namespaces:
'builtin', 'user', 'user_global','internal', 'alias', where
'builtin' and 'user' are the search defaults. Note that you should
not use quotes when specifying namespaces.
'Builtin' contains the python module builtin, 'user' contains all
user data, 'alias' only contain the shell aliases and no python
objects, 'internal' contains objects used by IPython. The
'user_global' namespace is only used by embedded IPython instances,
and it contains module-level globals. You can add namespaces to the
search with -s or exclude them with -e (these options can be given
more than once).
Examples:
%psearch a* -> objects beginning with an a
%psearch -e builtin a* -> objects NOT in the builtin space starting in a
%psearch a* function -> all functions beginning with an a
%psearch re.e* -> objects beginning with an e in module re
%psearch r*.e* -> objects that start with e in modules starting in r
%psearch r*.* string -> all strings in modules beginning with r
Case sensitve search:
%psearch -c a* list all object beginning with lower case a
Show objects beginning with a single _:
%psearch -a _* list objects beginning with a single underscore"""
try:
parameter_s = parameter_s.encode('ascii')
except UnicodeEncodeError:
print 'Python identifiers can only contain ascii characters.'
return
# default namespaces to be searched
def_search = ['user','builtin']
# Process options/args
opts,args = self.parse_options(parameter_s,'cias:e:',list_all=True)
opt = opts.get
shell = self.shell
psearch = shell.inspector.psearch
# select case options
if opts.has_key('i'):
ignore_case = True
elif opts.has_key('c'):
ignore_case = False
else:
ignore_case = not shell.wildcards_case_sensitive
# Build list of namespaces to search from user options
def_search.extend(opt('s',[]))
ns_exclude = ns_exclude=opt('e',[])
ns_search = [nm for nm in def_search if nm not in ns_exclude]
# Call the actual search
try:
psearch(args,shell.ns_table,ns_search,
show_all=opt('a'),ignore_case=ignore_case)
except:
shell.showtraceback()
def magic_who_ls(self, parameter_s=''):
"""Return a sorted list of all interactive variables.
If arguments are given, only variables of types matching these
arguments are returned."""
user_ns = self.shell.user_ns
internal_ns = self.shell.internal_ns
user_ns_hidden = self.shell.user_ns_hidden
out = [ i for i in user_ns
if not i.startswith('_') \
and not (i in internal_ns or i in user_ns_hidden) ]
typelist = parameter_s.split()
if typelist:
typeset = set(typelist)
out = [i for i in out if type(i).__name__ in typeset]
out.sort()
return out
def magic_who(self, parameter_s=''):
"""Print all interactive variables, with some minimal formatting.
If any arguments are given, only variables whose type matches one of
these are printed. For example:
%who function str
will only list functions and strings, excluding all other types of
variables. To find the proper type names, simply use type(var) at a
command line to see how python prints type names. For example:
In [1]: type('hello')\\
Out[1]: <type 'str'>
indicates that the type name for strings is 'str'.
%who always excludes executed names loaded through your configuration
file and things which are internal to IPython.
This is deliberate, as typically you may load many modules and the
purpose of %who is to show you only what you've manually defined."""
varlist = self.magic_who_ls(parameter_s)
if not varlist:
if parameter_s:
print 'No variables match your requested type.'
else:
print 'Interactive namespace is empty.'
return
# if we have variables, move on...
count = 0
for i in varlist:
print i+'\t',
count += 1
if count > 8:
count = 0
print
print
def magic_whos(self, parameter_s=''):
"""Like %who, but gives some extra information about each variable.
The same type filtering of %who can be applied here.
For all variables, the type is printed. Additionally it prints:
- For {},[],(): their length.
- For numpy and Numeric arrays, a summary with shape, number of
elements, typecode and size in memory.
- Everything else: a string representation, snipping their middle if
too long."""
varnames = self.magic_who_ls(parameter_s)
if not varnames:
if parameter_s:
print 'No variables match your requested type.'
else:
print 'Interactive namespace is empty.'
return
# if we have variables, move on...
# for these types, show len() instead of data:
seq_types = [types.DictType,types.ListType,types.TupleType]
# for numpy/Numeric arrays, display summary info
try:
import numpy
except ImportError:
ndarray_type = None
else:
ndarray_type = numpy.ndarray.__name__
try:
import Numeric
except ImportError:
array_type = None
else:
array_type = Numeric.ArrayType.__name__
# Find all variable names and types so we can figure out column sizes
def get_vars(i):
return self.shell.user_ns[i]
# some types are well known and can be shorter
abbrevs = {'IPython.core.macro.Macro' : 'Macro'}
def type_name(v):
tn = type(v).__name__
return abbrevs.get(tn,tn)
varlist = map(get_vars,varnames)
typelist = []
for vv in varlist:
tt = type_name(vv)
if tt=='instance':
typelist.append( abbrevs.get(str(vv.__class__),
str(vv.__class__)))
else:
typelist.append(tt)
# column labels and # of spaces as separator
varlabel = 'Variable'
typelabel = 'Type'
datalabel = 'Data/Info'
colsep = 3
# variable format strings
vformat = "$vname.ljust(varwidth)$vtype.ljust(typewidth)"
vfmt_short = '$vstr[:25]<...>$vstr[-25:]'
aformat = "%s: %s elems, type `%s`, %s bytes"
# find the size of the columns to format the output nicely
varwidth = max(max(map(len,varnames)), len(varlabel)) + colsep
typewidth = max(max(map(len,typelist)), len(typelabel)) + colsep
# table header
print varlabel.ljust(varwidth) + typelabel.ljust(typewidth) + \
' '+datalabel+'\n' + '-'*(varwidth+typewidth+len(datalabel)+1)
# and the table itself
kb = 1024
Mb = 1048576 # kb**2
for vname,var,vtype in zip(varnames,varlist,typelist):
print itpl(vformat),
if vtype in seq_types:
print len(var)
elif vtype in [array_type,ndarray_type]:
vshape = str(var.shape).replace(',','').replace(' ','x')[1:-1]
if vtype==ndarray_type:
# numpy
vsize = var.size
vbytes = vsize*var.itemsize
vdtype = var.dtype
else:
# Numeric
vsize = Numeric.size(var)
vbytes = vsize*var.itemsize()
vdtype = var.typecode()
if vbytes < 100000:
print aformat % (vshape,vsize,vdtype,vbytes)
else:
print aformat % (vshape,vsize,vdtype,vbytes),
if vbytes < Mb:
print '(%s kb)' % (vbytes/kb,)
else:
print '(%s Mb)' % (vbytes/Mb,)
else:
try:
vstr = str(var)
except UnicodeEncodeError:
vstr = unicode(var).encode(sys.getdefaultencoding(),
'backslashreplace')
vstr = vstr.replace('\n','\\n')
if len(vstr) < 50:
print vstr
else:
printpl(vfmt_short)
def magic_reset(self, parameter_s=''):
"""Resets the namespace by removing all names defined by the user.
Input/Output history are left around in case you need them.
Parameters
----------
-y : force reset without asking for confirmation.
Examples
--------
In [6]: a = 1
In [7]: a
Out[7]: 1
In [8]: 'a' in _ip.user_ns
Out[8]: True
In [9]: %reset -f
In [10]: 'a' in _ip.user_ns
Out[10]: False
"""
if parameter_s == '-f':
ans = True
else:
ans = self.shell.ask_yes_no(
"Once deleted, variables cannot be recovered. Proceed (y/[n])? ")
if not ans:
print 'Nothing done.'
return
user_ns = self.shell.user_ns
for i in self.magic_who_ls():
del(user_ns[i])
# Also flush the private list of module references kept for script
# execution protection
self.shell.clear_main_mod_cache()
def magic_reset_selective(self, parameter_s=''):
"""Resets the namespace by removing names defined by the user.
Input/Output history are left around in case you need them.
%reset_selective [-f] regex
No action is taken if regex is not included
Options
-f : force reset without asking for confirmation.
Examples
--------
We first fully reset the namespace so your output looks identical to
this example for pedagogical reasons; in practice you do not need a
full reset.
In [1]: %reset -f
Now, with a clean namespace we can make a few variables and use
%reset_selective to only delete names that match our regexp:
In [2]: a=1; b=2; c=3; b1m=4; b2m=5; b3m=6; b4m=7; b2s=8
In [3]: who_ls
Out[3]: ['a', 'b', 'b1m', 'b2m', 'b2s', 'b3m', 'b4m', 'c']
In [4]: %reset_selective -f b[2-3]m
In [5]: who_ls
Out[5]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
In [6]: %reset_selective -f d
In [7]: who_ls
Out[7]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
In [8]: %reset_selective -f c
In [9]: who_ls
Out[9]: ['a', 'b', 'b1m', 'b2s', 'b4m']
In [10]: %reset_selective -f b
In [11]: who_ls
Out[11]: ['a']
"""
opts, regex = self.parse_options(parameter_s,'f')
if opts.has_key('f'):
ans = True
else:
ans = self.shell.ask_yes_no(
"Once deleted, variables cannot be recovered. Proceed (y/[n])? ")
if not ans:
print 'Nothing done.'
return
user_ns = self.shell.user_ns
if not regex:
print 'No regex pattern specified. Nothing done.'
return
else:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
for i in self.magic_who_ls():
if m.search(i):
del(user_ns[i])
def magic_logstart(self,parameter_s=''):
"""Start logging anywhere in a session.
%logstart [-o|-r|-t] [log_name [log_mode]]
If no name is given, it defaults to a file named 'ipython_log.py' in your
current directory, in 'rotate' mode (see below).
'%logstart name' saves to file 'name' in 'backup' mode. It saves your
history up to that point and then continues logging.
%logstart takes a second optional parameter: logging mode. This can be one
of (note that the modes are given unquoted):\\
append: well, that says it.\\
backup: rename (if exists) to name~ and start name.\\
global: single logfile in your home dir, appended to.\\
over : overwrite existing log.\\
rotate: create rotating logs name.1~, name.2~, etc.
Options:
-o: log also IPython's output. In this mode, all commands which
generate an Out[NN] prompt are recorded to the logfile, right after
their corresponding input line. The output lines are always
prepended with a '#[Out]# ' marker, so that the log remains valid
Python code.
Since this marker is always the same, filtering only the output from
a log is very easy, using for example a simple awk call:
awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
-r: log 'raw' input. Normally, IPython's logs contain the processed
input, so that user lines are logged in their final form, converted
into valid Python. For example, %Exit is logged as
'_ip.magic("Exit"). If the -r flag is given, all input is logged
exactly as typed, with no transformations applied.
-t: put timestamps before each input line logged (these are put in
comments)."""
opts,par = self.parse_options(parameter_s,'ort')
log_output = 'o' in opts
log_raw_input = 'r' in opts
timestamp = 't' in opts
logger = self.shell.logger
# if no args are given, the defaults set in the logger constructor by
# ipytohn remain valid
if par:
try:
logfname,logmode = par.split()
except:
logfname = par
logmode = 'backup'
else:
logfname = logger.logfname
logmode = logger.logmode
# put logfname into rc struct as if it had been called on the command
# line, so it ends up saved in the log header Save it in case we need
# to restore it...
old_logfile = self.shell.logfile
if logfname:
logfname = os.path.expanduser(logfname)
self.shell.logfile = logfname
loghead = '# IPython log file\n\n'
try:
started = logger.logstart(logfname,loghead,logmode,
log_output,timestamp,log_raw_input)
except:
self.shell.logfile = old_logfile
warn("Couldn't start log: %s" % sys.exc_info()[1])
else:
# log input history up to this point, optionally interleaving
# output if requested
if timestamp:
# disable timestamping for the previous history, since we've
# lost those already (no time machine here).
logger.timestamp = False
if log_raw_input:
input_hist = self.shell.history_manager.input_hist_raw
else:
input_hist = self.shell.history_manager.input_hist_parsed
if log_output:
log_write = logger.log_write
output_hist = self.shell.history_manager.output_hist
for n in range(1,len(input_hist)-1):
log_write(input_hist[n].rstrip())
if n in output_hist:
log_write(repr(output_hist[n]),'output')
else:
logger.log_write(''.join(input_hist[1:]))
if timestamp:
# re-enable timestamping
logger.timestamp = True
print ('Activating auto-logging. '
'Current session state plus future input saved.')
logger.logstate()
def magic_logstop(self,parameter_s=''):
"""Fully stop logging and close log file.
In order to start logging again, a new %logstart call needs to be made,
possibly (though not necessarily) with a new filename, mode and other
options."""
self.logger.logstop()
def magic_logoff(self,parameter_s=''):
"""Temporarily stop logging.
You must have previously started logging."""
self.shell.logger.switch_log(0)
def magic_logon(self,parameter_s=''):
"""Restart logging.
This function is for restarting logging which you've temporarily
stopped with %logoff. For starting logging for the first time, you
must use the %logstart function, which allows you to specify an
optional log filename."""
self.shell.logger.switch_log(1)
def magic_logstate(self,parameter_s=''):
"""Print the status of the logging system."""
self.shell.logger.logstate()
def magic_pdb(self, parameter_s=''):
"""Control the automatic calling of the pdb interactive debugger.
Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
argument it works as a toggle.
When an exception is triggered, IPython can optionally call the
interactive pdb debugger after the traceback printout. %pdb toggles
this feature on and off.
The initial state of this feature is set in your ipythonrc
configuration file (the variable is called 'pdb').
If you want to just activate the debugger AFTER an exception has fired,
without having to type '%pdb on' and rerunning your code, you can use
the %debug magic."""
par = parameter_s.strip().lower()
if par:
try:
new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
except KeyError:
print ('Incorrect argument. Use on/1, off/0, '
'or nothing for a toggle.')
return
else:
# toggle
new_pdb = not self.shell.call_pdb
# set on the shell
self.shell.call_pdb = new_pdb
print 'Automatic pdb calling has been turned',on_off(new_pdb)
def magic_debug(self, parameter_s=''):
"""Activate the interactive debugger in post-mortem mode.
If an exception has just occurred, this lets you inspect its stack
frames interactively. Note that this will always work only on the last
traceback that occurred, so you must call this quickly after an
exception that you wish to inspect has fired, because if another one
occurs, it clobbers the previous one.
If you want IPython to automatically do this on every exception, see
the %pdb magic for more details.
"""
self.shell.debugger(force=True)
@testdec.skip_doctest
def magic_prun(self, parameter_s ='',user_mode=1,
opts=None,arg_lst=None,prog_ns=None):
"""Run a statement through the python code profiler.
Usage:
%prun [options] statement
The given statement (which doesn't require quote marks) is run via the
python profiler in a manner similar to the profile.run() function.
Namespaces are internally managed to work correctly; profile.run
cannot be used in IPython because it makes certain assumptions about
namespaces which do not hold under IPython.
Options:
-l <limit>: you can place restrictions on what or how much of the
profile gets printed. The limit value can be:
* A string: only information for function names containing this string
is printed.
* An integer: only these many lines are printed.
* A float (between 0 and 1): this fraction of the report is printed
(for example, use a limit of 0.4 to see the topmost 40% only).
You can combine several limits with repeated use of the option. For
example, '-l __init__ -l 5' will print only the topmost 5 lines of
information about class constructors.
-r: return the pstats.Stats object generated by the profiling. This
object has all the information about the profile in it, and you can
later use it for further analysis or in other functions.
-s <key>: sort profile by given key. You can provide more than one key
by using the option several times: '-s key1 -s key2 -s key3...'. The
default sorting key is 'time'.
The following is copied verbatim from the profile documentation
referenced below:
When more than one key is provided, additional keys are used as
secondary criteria when the there is equality in all keys selected
before them.
Abbreviations can be used for any key names, as long as the
abbreviation is unambiguous. The following are the keys currently
defined:
Valid Arg Meaning
"calls" call count
"cumulative" cumulative time
"file" file name
"module" file name
"pcalls" primitive call count
"line" line number
"name" function name
"nfl" name/file/line
"stdname" standard name
"time" internal time
Note that all sorts on statistics are in descending order (placing
most time consuming items first), where as name, file, and line number
searches are in ascending order (i.e., alphabetical). The subtle
distinction between "nfl" and "stdname" is that the standard name is a
sort of the name as printed, which means that the embedded line
numbers get compared in an odd way. For example, lines 3, 20, and 40
would (if the file names were the same) appear in the string order
"20" "3" and "40". In contrast, "nfl" does a numeric compare of the
line numbers. In fact, sort_stats("nfl") is the same as
sort_stats("name", "file", "line").
-T #: save profile results as shown on screen to a text
file. The profile is still shown on screen.
-D #: save (via dump_stats) profile statistics to given
filename. This data is in a format understod by the pstats module, and
is generated by a call to the dump_stats() method of profile
objects. The profile is still shown on screen.
If you want to run complete programs under the profiler's control, use
'%run -p [prof_opts] filename.py [args to program]' where prof_opts
contains profiler specific options as described here.
You can read the complete documentation for the profile module with::
In [1]: import profile; profile.help()
"""
opts_def = Struct(D=[''],l=[],s=['time'],T=[''])
# protect user quote marks
parameter_s = parameter_s.replace('"',r'\"').replace("'",r"\'")
if user_mode: # regular user call
opts,arg_str = self.parse_options(parameter_s,'D:l:rs:T:',
list_all=1)
namespace = self.shell.user_ns
else: # called to run a program by %run -p
try:
filename = get_py_filename(arg_lst[0])
except IOError,msg:
error(msg)
return
arg_str = 'execfile(filename,prog_ns)'
namespace = locals()
opts.merge(opts_def)
prof = profile.Profile()
try:
prof = prof.runctx(arg_str,namespace,namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
if hasattr(stats,'stream'):
# In newer versions of python, the stats object has a 'stream'
# attribute to write into.
stats.stream = stdout_trap
stats.print_stats(*lims)
else:
# For older versions, we manually redirect stdout during printing
sys_stdout = sys.stdout
try:
sys.stdout = stdout_trap
stats.print_stats(*lims)
finally:
sys.stdout = sys_stdout
output = stdout_trap.getvalue()
output = output.rstrip()
page.page(output)
print sys_exit,
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
prof.dump_stats(dump_file)
print '\n*** Profile stats marshalled to file',\
`dump_file`+'.',sys_exit
if text_file:
pfile = file(text_file,'w')
pfile.write(output)
pfile.close()
print '\n*** Profile printout saved to text file',\
`text_file`+'.',sys_exit
if opts.has_key('r'):
return stats
else:
return None
@testdec.skip_doctest
def magic_run(self, parameter_s ='',runner=None,
file_finder=get_py_filename):
"""Run the named file inside IPython as a program.
Usage:\\
%run [-n -i -t [-N<N>] -d [-b<N>] -p [profile options]] file [args]
Parameters after the filename are passed as command-line arguments to
the program (put in sys.argv). Then, control returns to IPython's
prompt.
This is similar to running at a system prompt:\\
$ python file args\\
but with the advantage of giving you IPython's tracebacks, and of
loading all variables into your interactive namespace for further use
(unless -p is used, see below).
The file is executed in a namespace initially consisting only of
__name__=='__main__' and sys.argv constructed as indicated. It thus
sees its environment as if it were being run as a stand-alone program
(except for sharing global objects such as previously imported
modules). But after execution, the IPython interactive namespace gets
updated with all variables defined in the program (except for __name__
and sys.argv). This allows for very convenient loading of code for
interactive work, while giving each program a 'clean sheet' to run in.
Options:
-n: __name__ is NOT set to '__main__', but to the running file's name
without extension (as python does under import). This allows running
scripts and reloading the definitions in them without calling code
protected by an ' if __name__ == "__main__" ' clause.
-i: run the file in IPython's namespace instead of an empty one. This
is useful if you are experimenting with code written in a text editor
which depends on variables defined interactively.
-e: ignore sys.exit() calls or SystemExit exceptions in the script
being run. This is particularly useful if IPython is being used to
run unittests, which always exit with a sys.exit() call. In such
cases you are interested in the output of the test results, not in
seeing a traceback of the unittest module.
-t: print timing information at the end of the run. IPython will give
you an estimated CPU time consumption for your script, which under
Unix uses the resource module to avoid the wraparound problems of
time.clock(). Under Unix, an estimate of time spent on system tasks
is also given (for Windows platforms this is reported as 0.0).
If -t is given, an additional -N<N> option can be given, where <N>
must be an integer indicating how many times you want the script to
run. The final timing report will include total and per run results.
For example (testing the script uniq_stable.py):
In [1]: run -t uniq_stable
IPython CPU timings (estimated):\\
User : 0.19597 s.\\
System: 0.0 s.\\
In [2]: run -t -N5 uniq_stable
IPython CPU timings (estimated):\\
Total runs performed: 5\\
Times : Total Per run\\
User : 0.910862 s, 0.1821724 s.\\
System: 0.0 s, 0.0 s.
-d: run your program under the control of pdb, the Python debugger.
This allows you to execute your program step by step, watch variables,
etc. Internally, what IPython does is similar to calling:
pdb.run('execfile("YOURFILENAME")')
with a breakpoint set on line 1 of your file. You can change the line
number for this automatic breakpoint to be <N> by using the -bN option
(where N must be an integer). For example:
%run -d -b40 myscript
will set the first breakpoint at line 40 in myscript.py. Note that
the first breakpoint must be set on a line which actually does
something (not a comment or docstring) for it to stop execution.
When the pdb debugger starts, you will see a (Pdb) prompt. You must
first enter 'c' (without qoutes) to start execution up to the first
breakpoint.
Entering 'help' gives information about the use of the debugger. You
can easily see pdb's full documentation with "import pdb;pdb.help()"
at a prompt.
-p: run program under the control of the Python profiler module (which
prints a detailed report of execution times, function calls, etc).
You can pass other options after -p which affect the behavior of the
profiler itself. See the docs for %prun for details.
In this mode, the program's variables do NOT propagate back to the
IPython interactive namespace (because they remain in the namespace
where the profiler executes them).
Internally this triggers a call to %prun, see its documentation for
details on the options available specifically for profiling.
There is one special usage for which the text above doesn't apply:
if the filename ends with .ipy, the file is run as ipython script,
just as if the commands were written on IPython prompt.
"""
# get arguments and set sys.argv for program to be run.
opts,arg_lst = self.parse_options(parameter_s,'nidtN:b:pD:l:rs:T:e',
mode='list',list_all=1)
try:
filename = file_finder(arg_lst[0])
except IndexError:
warn('you must provide at least a filename.')
print '\n%run:\n',oinspect.getdoc(self.magic_run)
return
except IOError,msg:
error(msg)
return
if filename.lower().endswith('.ipy'):
self.shell.safe_execfile_ipy(filename)
return
# Control the response to exit() calls made by the script being run
exit_ignore = opts.has_key('e')
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv # save it for later restoring
sys.argv = [filename]+ arg_lst[1:] # put in the proper filename
if opts.has_key('i'):
# Run in user's interactive namespace
prog_ns = self.shell.user_ns
__name__save = self.shell.user_ns['__name__']
prog_ns['__name__'] = '__main__'
main_mod = self.shell.new_main_mod(prog_ns)
else:
# Run in a fresh, empty namespace
if opts.has_key('n'):
name = os.path.splitext(os.path.basename(filename))[0]
else:
name = '__main__'
main_mod = self.shell.new_main_mod()
prog_ns = main_mod.__dict__
prog_ns['__name__'] = name
# Since '%run foo' emulates 'python foo.py' at the cmd line, we must
# set the __file__ global in the script's namespace
prog_ns['__file__'] = filename
# pickle fix. See interactiveshell for an explanation. But we need to make sure
# that, if we overwrite __main__, we replace it at the end
main_mod_name = prog_ns['__name__']
if main_mod_name == '__main__':
restore_main = sys.modules['__main__']
else:
restore_main = False
# This needs to be undone at the end to prevent holding references to
# every single object ever created.
sys.modules[main_mod_name] = main_mod
stats = None
try:
self.shell.save_history()
if opts.has_key('p'):
stats = self.magic_prun('',0,opts,arg_lst,prog_ns)
else:
if opts.has_key('d'):
deb = debugger.Pdb(self.shell.colors)
# reset Breakpoint state, which is moronically kept
# in a class
bdb.Breakpoint.next = 1
bdb.Breakpoint.bplist = {}
bdb.Breakpoint.bpbynumber = [None]
# Set an initial breakpoint to stop execution
maxtries = 10
bp = int(opts.get('b',[1])[0])
checkline = deb.checkline(filename,bp)
if not checkline:
for bp in range(bp+1,bp+maxtries+1):
if deb.checkline(filename,bp):
break
else:
msg = ("\nI failed to find a valid line to set "
"a breakpoint\n"
"after trying up to line: %s.\n"
"Please set a valid breakpoint manually "
"with the -b option." % bp)
error(msg)
return
# if we find a good linenumber, set the breakpoint
deb.do_break('%s:%s' % (filename,bp))
# Start file run
print "NOTE: Enter 'c' at the",
print "%s prompt to start your script." % deb.prompt
try:
deb.run('execfile("%s")' % filename,prog_ns)
except:
etype, value, tb = sys.exc_info()
# Skip three frames in the traceback: the %run one,
# one inside bdb.py, and the command-line typed by the
# user (run by exec in pdb itself).
self.shell.InteractiveTB(etype,value,tb,tb_offset=3)
else:
if runner is None:
runner = self.shell.safe_execfile
if opts.has_key('t'):
# timed execution
try:
nruns = int(opts['N'][0])
if nruns < 1:
error('Number of runs must be >=1')
return
except (KeyError):
nruns = 1
if nruns == 1:
t0 = clock2()
runner(filename,prog_ns,prog_ns,
exit_ignore=exit_ignore)
t1 = clock2()
t_usr = t1[0]-t0[0]
t_sys = t1[1]-t0[1]
print "\nIPython CPU timings (estimated):"
print " User : %10s s." % t_usr
print " System: %10s s." % t_sys
else:
runs = range(nruns)
t0 = clock2()
for nr in runs:
runner(filename,prog_ns,prog_ns,
exit_ignore=exit_ignore)
t1 = clock2()
t_usr = t1[0]-t0[0]
t_sys = t1[1]-t0[1]
print "\nIPython CPU timings (estimated):"
print "Total runs performed:",nruns
print " Times : %10s %10s" % ('Total','Per run')
print " User : %10s s, %10s s." % (t_usr,t_usr/nruns)
print " System: %10s s, %10s s." % (t_sys,t_sys/nruns)
else:
# regular execution
runner(filename,prog_ns,prog_ns,exit_ignore=exit_ignore)
if opts.has_key('i'):
self.shell.user_ns['__name__'] = __name__save
else:
# The shell MUST hold a reference to prog_ns so after %run
# exits, the python deletion mechanism doesn't zero it out
# (leaving dangling references).
self.shell.cache_main_mod(prog_ns,filename)
# update IPython interactive namespace
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
self.shell.user_ns.update(prog_ns)
finally:
# It's a bit of a mystery why, but __builtins__ can change from
# being a module to becoming a dict missing some key data after
# %run. As best I can see, this is NOT something IPython is doing
# at all, and similar problems have been reported before:
# http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
# Since this seems to be done by the interpreter itself, the best
# we can do is to at least restore __builtins__ for the user on
# exit.
self.shell.user_ns['__builtins__'] = __builtin__
# Ensure key global structures are restored
sys.argv = save_argv
if restore_main:
sys.modules['__main__'] = restore_main
else:
# Remove from sys.modules the reference to main_mod we'd
# added. Otherwise it will trap references to objects
# contained therein.
del sys.modules[main_mod_name]
self.shell.reload_history()
return stats
@testdec.skip_doctest
def magic_timeit(self, parameter_s =''):
"""Time execution of a Python statement or expression
Usage:\\
%timeit [-n<N> -r<R> [-t|-c]] statement
Time execution of a Python statement or expression using the timeit
module.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
Examples:
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
import timeit
import math
# XXX: Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals. Until we figure out a robust way of
# auto-detecting if the terminal can deal with it, use plain 'us' for
# microseconds. I am really NOT happy about disabling the proper
# 'micro' prefix, but crashing is worse... If anyone knows what the
# right solution for this is, I'm all ears...
#
# Note: using
#
# s = u'\xb5'
# s.encode(sys.getdefaultencoding())
#
# is not sufficient, as I've seen terminals where that fails but
# print s
#
# succeeds
#
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
#units = [u"s", u"ms",u'\xb5',"ns"]
units = [u"s", u"ms",u'us',"ns"]
scaling = [1, 1e3, 1e6, 1e9]
opts, stmt = self.parse_options(parameter_s,'n:r:tcp:',
posix=False)
if stmt == "":
return
timefunc = timeit.default_timer
number = int(getattr(opts, "n", 0))
repeat = int(getattr(opts, "r", timeit.default_repeat))
precision = int(getattr(opts, "p", 3))
if hasattr(opts, "t"):
timefunc = time.time
if hasattr(opts, "c"):
timefunc = clock
timer = timeit.Timer(timer=timefunc)
# this code has tight coupling to the inner workings of timeit.Timer,
# but is there a better way to achieve that the code stmt has access
# to the shell namespace?
src = timeit.template % {'stmt': timeit.reindent(stmt, 8),
'setup': "pass"}
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
code = compile(src, "<magic-timeit>", "exec")
tc = clock()-t0
ns = {}
exec code in self.shell.user_ns, ns
timer.inner = ns["inner"]
if number == 0:
# determine number so that 0.2 <= total time < 2.0
number = 1
for i in range(1, 10):
if timer.timeit(number) >= 0.2:
break
number *= 10
best = min(timer.repeat(repeat, number)) / number
if best > 0.0 and best < 1000.0:
order = min(-int(math.floor(math.log10(best)) // 3), 3)
elif best >= 1000.0:
order = 0
else:
order = 3
print u"%d loops, best of %d: %.*g %s per loop" % (number, repeat,
precision,
best * scaling[order],
units[order])
if tc > tc_min:
print "Compiler time: %.2f s" % tc
@testdec.skip_doctest
def magic_time(self,parameter_s = ''):
"""Time execution of a Python statement or expression.
The CPU and wall clock times are printed, and the value of the
expression (if any) is returned. Note that under Win32, system time
is always reported as 0, since it can not be measured.
This function provides very basic timing functionality. In Python
2.3, the timeit module offers more control and sophistication, so this
could be rewritten to use it (patches welcome).
Some examples:
In [1]: time 2**128
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Out[1]: 340282366920938463463374607431768211456L
In [2]: n = 1000000
In [3]: time sum(range(n))
CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
Wall time: 1.37
Out[3]: 499999500000L
In [4]: time print 'hello world'
hello world
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Note that the time needed by Python to compile the given expression
will be reported if it is more than 0.1s. In this example, the
actual exponentiation is done by Python at compilation time, so while
the expression can take a noticeable amount of time to compute, that
time is purely due to the compilation:
In [5]: time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
# fail immediately if the given expression can't be compiled
expr = self.shell.prefilter(parameter_s,False)
# Minimum time above which compilation time will be reported
tc_min = 0.1
try:
mode = 'eval'
t0 = clock()
code = compile(expr,'<timed eval>',mode)
tc = clock()-t0
except SyntaxError:
mode = 'exec'
t0 = clock()
code = compile(expr,'<timed exec>',mode)
tc = clock()-t0
# skew measurement as little as possible
glob = self.shell.user_ns
clk = clock2
wtime = time.time
# time execution
wall_st = wtime()
if mode=='eval':
st = clk()
out = eval(code,glob)
end = clk()
else:
st = clk()
exec code in glob
end = clk()
out = None
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end-wall_st
cpu_user = end[0]-st[0]
cpu_sys = end[1]-st[1]
cpu_tot = cpu_user+cpu_sys
print "CPU times: user %.2f s, sys: %.2f s, total: %.2f s" % \
(cpu_user,cpu_sys,cpu_tot)
print "Wall time: %.2f s" % wall_time
if tc > tc_min:
print "Compiler : %.2f s" % tc
return out
@testdec.skip_doctest
def magic_macro(self,parameter_s = ''):
"""Define a set of input lines as a macro for future re-execution.
Usage:\\
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed as the
command line is used instead.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The notation for indicating number ranges is: n1-n2 means 'use line
numbers n1,...n2' (the endpoint is included). That is, '5-7' means
using the lines numbered 5,6 and 7.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (%hist prints it):
44: x=1
45: y=3
46: z=x+y
47: print x
48: a=5
49: print 'x',x,'y',y
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with:
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with:
'print macro_name'.
For one-off cases which DON'T contain magic function calls in them you
can obtain similar results by explicitly executing slices from your
input history with:
In [60]: exec In[44:48]+In[49]"""
opts,args = self.parse_options(parameter_s,'r',mode='list')
if not args:
macs = [k for k,v in self.shell.user_ns.items() if isinstance(v, Macro)]
macs.sort()
return macs
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name,ranges = args[0], args[1:]
#print 'rng',ranges # dbg
lines = self.extract_input_slices(ranges,opts.has_key('r'))
macro = Macro(lines)
self.shell.define_macro(name, macro)
print 'Macro `%s` created. To execute, type its name (without quotes).' % name
print 'Macro contents:'
print macro,
def magic_save(self,parameter_s = ''):
"""Save a set of lines to a given filename.
Usage:\\
%save [options] filename n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed as the
command line is used instead.
This function uses the same syntax as %macro for line extraction, but
instead of creating a macro it saves the resulting string to the
filename you specify.
It adds a '.py' extension to the file if you don't do so yourself, and
it asks for confirmation before overwriting existing files."""
opts,args = self.parse_options(parameter_s,'r',mode='list')
fname,ranges = args[0], args[1:]
if not fname.endswith('.py'):
fname += '.py'
if os.path.isfile(fname):
ans = raw_input('File `%s` exists. Overwrite (y/[N])? ' % fname)
if ans.lower() not in ['y','yes']:
print 'Operation cancelled.'
return
cmds = ''.join(self.extract_input_slices(ranges,opts.has_key('r')))
f = file(fname,'w')
f.write(cmds)
f.close()
print 'The following commands were written to file `%s`:' % fname
print cmds
def _edit_macro(self,mname,macro):
"""open an editor with the macro data in a file"""
filename = self.shell.mktempfile(macro.value)
self.shell.hooks.editor(filename)
# and make a new macro object, to replace the old one
mfile = open(filename)
mvalue = mfile.read()
mfile.close()
self.shell.user_ns[mname] = Macro(mvalue)
def magic_ed(self,parameter_s=''):
"""Alias to %edit."""
return self.magic_edit(parameter_s)
@testdec.skip_doctest
def magic_edit(self,parameter_s='',last_call=['','']):
"""Bring up an editor and execute the resulting code.
Usage:
%edit [options] [args]
%edit runs IPython's editor hook. The default version of this hook is
set to call the __IPYTHON__.rc.editor command. This is read from your
environment variable $EDITOR. If this isn't found, it will default to
vi under Linux/Unix and to notepad under Windows. See the end of this
docstring for how to change the editor hook.
You can also set the value of this editor via the command line option
'-editor' or in your ipythonrc file. This is useful if you wish to use
specifically for IPython an editor different from your typical default
(and for Windows users who typically don't set environment variables).
This command allows you to conveniently edit multi-line code right in
your IPython session.
If called without arguments, %edit opens up an empty editor with a
temporary file and will execute the contents of this file when you
close it (don't forget to save it!).
Options:
-n <number>: open the editor at a specified line number. By default,
the IPython editor hook uses the unix syntax 'editor +N filename', but
you can configure this by providing your own modified hook if your
favorite editor supports line-number specifications with a different
syntax.
-p: this will call the editor with the same data as the previous time
it was used, regardless of how long ago (in your current session) it
was.
-r: use 'raw' input. This option only applies to input taken from the
user's history. By default, the 'processed' history is used, so that
magics are loaded in their transformed version to valid Python. If
this option is given, the raw input as typed as the command line is
used instead. When you exit the editor, it will be executed by
IPython's own processor.
-x: do not execute the edited code immediately upon exit. This is
mainly useful if you are editing programs which need to be called with
command line arguments, which you can then do using %run.
Arguments:
If arguments are given, the following possibilites exist:
- The arguments are numbers or pairs of colon-separated numbers (like
1 4:8 9). These are interpreted as lines of previous input to be
loaded into the editor. The syntax is the same of the %macro command.
- If the argument doesn't start with a number, it is evaluated as a
variable and its contents loaded into the editor. You can thus edit
any string which contains python code (including the result of
previous edits).
- If the argument is the name of an object (other than a string),
IPython will try to locate the file where it was defined and open the
editor at the point where it is defined. You can use `%edit function`
to load an editor exactly at the point where 'function' is defined,
edit it and have the file be executed automatically.
If the object is a macro (see %macro for details), this opens up your
specified editor with a temporary file containing the macro's data.
Upon exit, the macro is reloaded with the contents of the file.
Note: opening at an exact line is only supported under Unix, and some
editors (like kedit and gedit up to Gnome 2.8) do not understand the
'+NUMBER' parameter necessary for this feature. Good editors like
(X)Emacs, vi, jed, pico and joe all do.
- If the argument is not found as a variable, IPython will look for a
file with that name (adding .py if necessary) and load it into the
editor. It will execute its contents with execfile() when you exit,
loading any code in the file into your interactive namespace.
After executing your code, %edit will return as output the code you
typed in the editor (except when it was an existing file). This way
you can reload the code in further invocations of %edit as a variable,
via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
the output.
Note that %edit is also available through the alias %ed.
This is an example of creating a simple function inside the editor and
then modifying it. First, start up the editor:
In [1]: ed
Editing... done. Executing edited code...
Out[1]: 'def foo():n print "foo() was defined in an editing session"n'
We can then call the function foo():
In [2]: foo()
foo() was defined in an editing session
Now we edit foo. IPython automatically loads the editor with the
(temporary) file where foo() was previously defined:
In [3]: ed foo
Editing... done. Executing edited code...
And if we call foo() again we get the modified version:
In [4]: foo()
foo() has now been changed!
Here is an example of how to edit a code snippet successive
times. First we call the editor:
In [5]: ed
Editing... done. Executing edited code...
hello
Out[5]: "print 'hello'n"
Now we call it again with the previous output (stored in _):
In [6]: ed _
Editing... done. Executing edited code...
hello world
Out[6]: "print 'hello world'n"
Now we call it with the output #8 (stored in _8, also as Out[8]):
In [7]: ed _8
Editing... done. Executing edited code...
hello again
Out[7]: "print 'hello again'n"
Changing the default editor hook:
If you wish to write your own editor hook, you can put it in a
configuration file which you load at startup time. The default hook
is defined in the IPython.core.hooks module, and you can use that as a
starting example for further modifications. That file also has
general instructions on how to set a new hook for use once you've
defined it."""
# FIXME: This function has become a convoluted mess. It needs a
# ground-up rewrite with clean, simple logic.
def make_filename(arg):
"Make a filename from the given args"
try:
filename = get_py_filename(arg)
except IOError:
if args.endswith('.py'):
filename = arg
else:
filename = None
return filename
# custom exceptions
class DataIsObject(Exception): pass
opts,args = self.parse_options(parameter_s,'prxn:')
# Set a few locals from the options for convenience:
opts_p = opts.has_key('p')
opts_r = opts.has_key('r')
# Default line number value
lineno = opts.get('n',None)
if opts_p:
args = '_%s' % last_call[0]
if not self.shell.user_ns.has_key(args):
args = last_call[1]
# use last_call to remember the state of the previous call, but don't
# let it be clobbered by successive '-p' calls.
try:
last_call[0] = self.shell.displayhook.prompt_count
if not opts_p:
last_call[1] = parameter_s
except:
pass
# by default this is done with temp files, except when the given
# arg is a filename
use_temp = 1
if re.match(r'\d',args):
# Mode where user specifies ranges of lines, like in %macro.
# This means that you can't edit files whose names begin with
# numbers this way. Tough.
ranges = args.split()
data = ''.join(self.extract_input_slices(ranges,opts_r))
elif args.endswith('.py'):
filename = make_filename(args)
data = ''
use_temp = 0
elif args:
try:
# Load the parameter given as a variable. If not a string,
# process it as an object instead (below)
#print '*** args',args,'type',type(args) # dbg
data = eval(args,self.shell.user_ns)
if not type(data) in StringTypes:
raise DataIsObject
except (NameError,SyntaxError):
# given argument is not a variable, try as a filename
filename = make_filename(args)
if filename is None:
warn("Argument given (%s) can't be found as a variable "
"or as a filename." % args)
return
data = ''
use_temp = 0
except DataIsObject:
# macros have a special edit function
if isinstance(data,Macro):
self._edit_macro(args,data)
return
# For objects, try to edit the file where they are defined
try:
filename = inspect.getabsfile(data)
if 'fakemodule' in filename.lower() and inspect.isclass(data):
# class created by %edit? Try to find source
# by looking for method definitions instead, the
# __module__ in those classes is FakeModule.
attrs = [getattr(data, aname) for aname in dir(data)]
for attr in attrs:
if not inspect.ismethod(attr):
continue
filename = inspect.getabsfile(attr)
if filename and 'fakemodule' not in filename.lower():
# change the attribute to be the edit target instead
data = attr
break
datafile = 1
except TypeError:
filename = make_filename(args)
datafile = 1
warn('Could not find file where `%s` is defined.\n'
'Opening a file named `%s`' % (args,filename))
# Now, make sure we can actually read the source (if it was in
# a temp file it's gone by now).
if datafile:
try:
if lineno is None:
lineno = inspect.getsourcelines(data)[1]
except IOError:
filename = make_filename(args)
if filename is None:
warn('The file `%s` where `%s` was defined cannot '
'be read.' % (filename,data))
return
use_temp = 0
else:
data = ''
if use_temp:
filename = self.shell.mktempfile(data)
print 'IPython will make a temporary file named:',filename
# do actual editing here
print 'Editing...',
sys.stdout.flush()
try:
# Quote filenames that may have spaces in them
if ' ' in filename:
filename = "%s" % filename
self.shell.hooks.editor(filename,lineno)
except TryNext:
warn('Could not open editor')
return
# XXX TODO: should this be generalized for all string vars?
# For now, this is special-cased to blocks created by cpaste
if args.strip() == 'pasted_block':
self.shell.user_ns['pasted_block'] = file_read(filename)
if opts.has_key('x'): # -x prevents actual execution
print
else:
print 'done. Executing edited code...'
if opts_r:
self.shell.run_cell(file_read(filename))
else:
self.shell.safe_execfile(filename,self.shell.user_ns,
self.shell.user_ns)
if use_temp:
try:
return open(filename).read()
except IOError,msg:
if msg.filename == filename:
warn('File not found. Did you forget to save?')
return
else:
self.shell.showtraceback()
def magic_xmode(self,parameter_s = ''):
"""Switch modes for the exception handlers.
Valid modes: Plain, Context and Verbose.
If called without arguments, acts as a toggle."""
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
shell = self.shell
new_mode = parameter_s.strip().capitalize()
try:
shell.InteractiveTB.set_mode(mode=new_mode)
print 'Exception reporting mode:',shell.InteractiveTB.mode
except:
xmode_switch_err('user')
def magic_colors(self,parameter_s = ''):
"""Switch color scheme for prompts, info system and exception handlers.
Currently implemented schemes: NoColor, Linux, LightBG.
Color scheme names are not case-sensitive."""
def color_switch_err(name):
warn('Error changing %s color schemes.\n%s' %
(name,sys.exc_info()[1]))
new_scheme = parameter_s.strip()
if not new_scheme:
raise UsageError(
"%colors: you must specify a color scheme. See '%colors?'")
return
# local shortcut
shell = self.shell
import IPython.utils.rlineimpl as readline
if not readline.have_readline and sys.platform == "win32":
msg = """\
Proper color support under MS Windows requires the pyreadline library.
You can find it at:
http://ipython.scipy.org/moin/PyReadline/Intro
Gary's readline needs the ctypes module, from:
http://starship.python.net/crew/theller/ctypes
(Note that ctypes is already part of Python versions 2.5 and newer).
Defaulting color scheme to 'NoColor'"""
new_scheme = 'NoColor'
warn(msg)
# readline option is 0
if not shell.has_readline:
new_scheme = 'NoColor'
# Set prompt colors
try:
shell.displayhook.set_colors(new_scheme)
except:
color_switch_err('prompt')
else:
shell.colors = \
shell.displayhook.color_table.active_scheme_name
# Set exception colors
try:
shell.InteractiveTB.set_colors(scheme = new_scheme)
shell.SyntaxTB.set_colors(scheme = new_scheme)
except:
color_switch_err('exception')
# Set info (for 'object?') colors
if shell.color_info:
try:
shell.inspector.set_active_scheme(new_scheme)
except:
color_switch_err('object inspector')
else:
shell.inspector.set_active_scheme('NoColor')
def magic_Pprint(self, parameter_s=''):
"""Toggle pretty printing on/off."""
self.shell.pprint = 1 - self.shell.pprint
print 'Pretty printing has been turned', \
['OFF','ON'][self.shell.pprint]
def magic_Exit(self, parameter_s=''):
"""Exit IPython."""
self.shell.ask_exit()
# Add aliases as magics so all common forms work: exit, quit, Exit, Quit.
magic_exit = magic_quit = magic_Quit = magic_Exit
#......................................................................
# Functions to implement unix shell-type things
@testdec.skip_doctest
def magic_alias(self, parameter_s = ''):
"""Define an alias for a system command.
'%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
Then, typing 'alias_name params' will execute the system command 'cmd
params' (from your underlying operating system).
Aliases have lower precedence than magic functions and Python normal
variables, so if 'foo' is both a Python variable and an alias, the
alias can not be executed until 'del foo' removes the Python variable.
You can use the %l specifier in an alias definition to represent the
whole line when the alias is called. For example:
In [2]: alias bracket echo "Input in brackets: <%l>"
In [3]: bracket hello world
Input in brackets: <hello world>
You can also define aliases with parameters using %s specifiers (one
per parameter):
In [1]: alias parts echo first %s second %s
In [2]: %parts A B
first A second B
In [3]: %parts A
Incorrect number of arguments: 2 expected.
parts is an alias to: 'echo first %s second %s'
Note that %l and %s are mutually exclusive. You can only use one or
the other in your aliases.
Aliases expand Python variables just like system calls using ! or !!
do: all expressions prefixed with '$' get expanded. For details of
the semantic rules, see PEP-215:
http://www.python.org/peps/pep-0215.html. This is the library used by
IPython for variable expansion. If you want to access a true shell
variable, an extra $ is necessary to prevent its expansion by IPython:
In [6]: alias show echo
In [7]: PATH='A Python string'
In [8]: show $PATH
A Python string
In [9]: show $$PATH
/usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
You can use the alias facility to acess all of $PATH. See the %rehash
and %rehashx functions, which automatically create aliases for the
contents of your $PATH.
If called with no parameters, %alias prints the current alias table."""
par = parameter_s.strip()
if not par:
stored = self.db.get('stored_aliases', {} )
aliases = sorted(self.shell.alias_manager.aliases)
# for k, v in stored:
# atab.append(k, v[0])
print "Total number of aliases:", len(aliases)
sys.stdout.flush()
return aliases
# Now try to define a new one
try:
alias,cmd = par.split(None, 1)
except:
print oinspect.getdoc(self.magic_alias)
else:
self.shell.alias_manager.soft_define_alias(alias, cmd)
# end magic_alias
def magic_unalias(self, parameter_s = ''):
"""Remove an alias"""
aname = parameter_s.strip()
self.shell.alias_manager.undefine_alias(aname)
stored = self.db.get('stored_aliases', {} )
if aname in stored:
print "Removing %stored alias",aname
del stored[aname]
self.db['stored_aliases'] = stored
def magic_rehashx(self, parameter_s = ''):
"""Update the alias table with all executable files in $PATH.
This version explicitly checks that every entry in $PATH is a file
with execute access (os.X_OK), so it is much slower than %rehash.
Under Windows, it checks executability as a match agains a
'|'-separated string of extensions, stored in the IPython config
variable win_exec_ext. This defaults to 'exe|com|bat'.
This function also resets the root module cache of module completer,
used on slow filesystems.
"""
from IPython.core.alias import InvalidAliasError
# for the benefit of module completer in ipy_completers.py
del self.db['rootmodules']
path = [os.path.abspath(os.path.expanduser(p)) for p in
os.environ.get('PATH','').split(os.pathsep)]
path = filter(os.path.isdir,path)
syscmdlist = []
# Now define isexec in a cross platform manner.
if os.name == 'posix':
isexec = lambda fname:os.path.isfile(fname) and \
os.access(fname,os.X_OK)
else:
try:
winext = os.environ['pathext'].replace(';','|').replace('.','')
except KeyError:
winext = 'exe|com|bat|py'
if 'py' not in winext:
winext += '|py'
execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
isexec = lambda fname:os.path.isfile(fname) and execre.match(fname)
savedir = os.getcwd()
# Now walk the paths looking for executables to alias.
try:
# write the whole loop for posix/Windows so we don't have an if in
# the innermost part
if os.name == 'posix':
for pdir in path:
os.chdir(pdir)
for ff in os.listdir(pdir):
if isexec(ff):
try:
# Removes dots from the name since ipython
# will assume names with dots to be python.
self.shell.alias_manager.define_alias(
ff.replace('.',''), ff)
except InvalidAliasError:
pass
else:
syscmdlist.append(ff)
else:
no_alias = self.shell.alias_manager.no_alias
for pdir in path:
os.chdir(pdir)
for ff in os.listdir(pdir):
base, ext = os.path.splitext(ff)
if isexec(ff) and base.lower() not in no_alias:
if ext.lower() == '.exe':
ff = base
try:
# Removes dots from the name since ipython
# will assume names with dots to be python.
self.shell.alias_manager.define_alias(
base.lower().replace('.',''), ff)
except InvalidAliasError:
pass
syscmdlist.append(ff)
db = self.db
db['syscmdlist'] = syscmdlist
finally:
os.chdir(savedir)
def magic_pwd(self, parameter_s = ''):
"""Return the current working directory path."""
return os.getcwd()
def magic_cd(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _dh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
cd 'dir': changes to directory 'dir'.
cd -: changes to the last visited directory.
cd -<n>: changes to the n-th directory in the directory history.
cd --foo: change to directory that matches 'foo' in history
cd -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$',parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = self.shell.user_ns['_dh'][nn]
except IndexError:
print 'The requested directory does not exist in history.'
return
else:
opts = {}
elif parameter_s.startswith('--'):
ps = None
fallback = None
pat = parameter_s[2:]
dh = self.shell.user_ns['_dh']
# first search only by basename (last component)
for ent in reversed(dh):
if pat in os.path.basename(ent) and os.path.isdir(ent):
ps = ent
break
if fallback is None and pat in ent and os.path.isdir(ent):
fallback = ent
# if we have no last part match, pick the first full path match
if ps is None:
ps = fallback
if ps is None:
print "No matching entry in directory history"
return
else:
opts = {}
else:
#turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )','/', parameter_s)
opts,ps = self.parse_options(parameter_s,'qb',mode='string')
# jump to previous
if ps == '-':
try:
ps = self.shell.user_ns['_dh'][-2]
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# jump to bookmark if needed
else:
if not os.path.isdir(ps) or opts.has_key('b'):
bkms = self.db.get('bookmarks', {})
if bkms.has_key(ps):
target = bkms[ps]
print '(bookmark:%s) -> %s' % (ps,target)
ps = target
else:
if opts.has_key('b'):
raise UsageError("Bookmark '%s' not found. "
"Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
try:
os.chdir(os.path.expanduser(ps))
if hasattr(self.shell, 'term_title') and self.shell.term_title:
set_term_title('IPython: ' + abbrev_cwd())
except OSError:
print sys.exc_info()[1]
else:
cwd = os.getcwd()
dhist = self.shell.user_ns['_dh']
if oldcwd != cwd:
dhist.append(cwd)
self.db['dhist'] = compress_dhist(dhist)[-100:]
else:
os.chdir(self.shell.home_dir)
if hasattr(self.shell, 'term_title') and self.shell.term_title:
set_term_title('IPython: ' + '~')
cwd = os.getcwd()
dhist = self.shell.user_ns['_dh']
if oldcwd != cwd:
dhist.append(cwd)
self.db['dhist'] = compress_dhist(dhist)[-100:]
if not 'q' in opts and self.shell.user_ns['_dh']:
print self.shell.user_ns['_dh'][-1]
def magic_env(self, parameter_s=''):
"""List environment variables."""
return os.environ.data
def magic_pushd(self, parameter_s=''):
"""Place the current dir on stack and change directory.
Usage:\\
%pushd ['dirname']
"""
dir_s = self.shell.dir_stack
tgt = os.path.expanduser(parameter_s)
cwd = os.getcwd().replace(self.home_dir,'~')
if tgt:
self.magic_cd(parameter_s)
dir_s.insert(0,cwd)
return self.magic_dirs()
def magic_popd(self, parameter_s=''):
"""Change to directory popped off the top of the stack.
"""
if not self.shell.dir_stack:
raise UsageError("%popd on empty stack")
top = self.shell.dir_stack.pop(0)
self.magic_cd(top)
print "popd ->",top
def magic_dirs(self, parameter_s=''):
"""Return the current directory stack."""
return self.shell.dir_stack
def magic_dhist(self, parameter_s=''):
"""Print your history of visited directories.
%dhist -> print full history\\
%dhist n -> print last n entries only\\
%dhist n1 n2 -> print entries between n1 and n2 (n1 not included)\\
This history is automatically maintained by the %cd command, and
always available as the global list variable _dh. You can use %cd -<n>
to go to directory number <n>.
Note that most of time, you should view directory history by entering
cd -<TAB>.
"""
dh = self.shell.user_ns['_dh']
if parameter_s:
try:
args = map(int,parameter_s.split())
except:
self.arg_err(Magic.magic_dhist)
return
if len(args) == 1:
ini,fin = max(len(dh)-(args[0]),0),len(dh)
elif len(args) == 2:
ini,fin = args
else:
self.arg_err(Magic.magic_dhist)
return
else:
ini,fin = 0,len(dh)
nlprint(dh,
header = 'Directory history (kept in _dh)',
start=ini,stop=fin)
@testdec.skip_doctest
def magic_sc(self, parameter_s=''):
"""Shell capture - execute a shell command and capture its output.
DEPRECATED. Suboptimal, retained for backwards compatibility.
You should use the form 'var = !command' instead. Example:
"%sc -l myfiles = ls ~" should now be written as
"myfiles = !ls ~"
myfiles.s, myfiles.l and myfiles.n still apply as documented
below.
--
%sc [options] varname=command
IPython will run the given command using commands.getoutput(), and
will then update the user's interactive namespace with a variable
called varname, containing the value of the call. Your command can
contain shell wildcards, pipes, etc.
The '=' sign in the syntax is mandatory, and the variable name you
supply must follow Python's standard conventions for valid names.
(A special format without variable name exists for internal use)
Options:
-l: list output. Split the output on newlines into a list before
assigning it to the given variable. By default the output is stored
as a single string.
-v: verbose. Print the contents of the variable.
In most cases you should not need to split as a list, because the
returned value is a special type of string which can automatically
provide its contents either as a list (split on newlines) or as a
space-separated string. These are convenient, respectively, either
for sequential processing or to be passed to a shell command.
For example:
# all-random
# Capture into variable a
In [1]: sc a=ls *py
# a is a string with embedded newlines
In [2]: a
Out[2]: 'setup.py\\nwin32_manual_post_install.py'
# which can be seen as a list:
In [3]: a.l
Out[3]: ['setup.py', 'win32_manual_post_install.py']
# or as a whitespace-separated string:
In [4]: a.s
Out[4]: 'setup.py win32_manual_post_install.py'
# a.s is useful to pass as a single command line:
In [5]: !wc -l $a.s
146 setup.py
130 win32_manual_post_install.py
276 total
# while the list form is useful to loop over:
In [6]: for f in a.l:
...: !wc -l $f
...:
146 setup.py
130 win32_manual_post_install.py
Similiarly, the lists returned by the -l option are also special, in
the sense that you can equally invoke the .s attribute on them to
automatically get a whitespace-separated string from their contents:
In [7]: sc -l b=ls *py
In [8]: b
Out[8]: ['setup.py', 'win32_manual_post_install.py']
In [9]: b.s
Out[9]: 'setup.py win32_manual_post_install.py'
In summary, both the lists and strings used for ouptut capture have
the following special attributes:
.l (or .list) : value as list.
.n (or .nlstr): value as newline-separated string.
.s (or .spstr): value as space-separated string.
"""
opts,args = self.parse_options(parameter_s,'lv')
# Try to get a variable name and command to run
try:
# the variable name must be obtained from the parse_options
# output, which uses shlex.split to strip options out.
var,_ = args.split('=',1)
var = var.strip()
# But the the command has to be extracted from the original input
# parameter_s, not on what parse_options returns, to avoid the
# quote stripping which shlex.split performs on it.
_,cmd = parameter_s.split('=',1)
except ValueError:
var,cmd = '',''
# If all looks ok, proceed
split = 'l' in opts
out = self.shell.getoutput(cmd, split=split)
if opts.has_key('v'):
print '%s ==\n%s' % (var,pformat(out))
if var:
self.shell.user_ns.update({var:out})
else:
return out
def magic_sx(self, parameter_s=''):
"""Shell execute - run a shell command and capture its output.
%sx command
IPython will run the given command using commands.getoutput(), and
return the result formatted as a list (split on '\\n'). Since the
output is _returned_, it will be stored in ipython's regular output
cache Out[N] and in the '_N' automatic variables.
Notes:
1) If an input line begins with '!!', then %sx is automatically
invoked. That is, while:
!ls
causes ipython to simply issue system('ls'), typing
!!ls
is a shorthand equivalent to:
%sx ls
2) %sx differs from %sc in that %sx automatically splits into a list,
like '%sc -l'. The reason for this is to make it as easy as possible
to process line-oriented shell output via further python commands.
%sc is meant to provide much finer control, but requires more
typing.
3) Just like %sc -l, this is a list with special attributes:
.l (or .list) : value as list.
.n (or .nlstr): value as newline-separated string.
.s (or .spstr): value as whitespace-separated string.
This is very useful when trying to use such lists as arguments to
system commands."""
if parameter_s:
return self.shell.getoutput(parameter_s)
def magic_r(self, parameter_s=''):
"""Repeat previous input.
Note: Consider using the more powerfull %rep instead!
If given an argument, repeats the previous command which starts with
the same string, otherwise it just repeats the previous input.
Shell escaped commands (with ! as first character) are not recognized
by this system, only pure python code and magic commands.
"""
start = parameter_s.strip()
esc_magic = ESC_MAGIC
# Identify magic commands even if automagic is on (which means
# the in-memory version is different from that typed by the user).
if self.shell.automagic:
start_magic = esc_magic+start
else:
start_magic = start
# Look through the input history in reverse
for n in range(len(self.shell.history_manager.input_hist_parsed)-2,0,-1):
input = self.shell.history_manager.input_hist_parsed[n]
# skip plain 'r' lines so we don't recurse to infinity
if input != '_ip.magic("r")\n' and \
(input.startswith(start) or input.startswith(start_magic)):
#print 'match',`input` # dbg
print 'Executing:',input,
self.shell.run_cell(input)
return
print 'No previous input matching `%s` found.' % start
def magic_bookmark(self, parameter_s=''):
"""Manage IPython's bookmark system.
%bookmark <name> - set bookmark to current dir
%bookmark <name> <dir> - set bookmark to <dir>
%bookmark -l - list all bookmarks
%bookmark -d <name> - remove bookmark
%bookmark -r - remove all bookmarks
You can later on access a bookmarked folder with:
%cd -b <name>
or simply '%cd <name>' if there is no directory called <name> AND
there is such a bookmark defined.
Your bookmarks persist through IPython sessions, but they are
associated with each profile."""
opts,args = self.parse_options(parameter_s,'drl',mode='list')
if len(args) > 2:
raise UsageError("%bookmark: too many arguments")
bkms = self.db.get('bookmarks',{})
if opts.has_key('d'):
try:
todel = args[0]
except IndexError:
raise UsageError(
"%bookmark -d: must provide a bookmark to delete")
else:
try:
del bkms[todel]
except KeyError:
raise UsageError(
"%%bookmark -d: Can't delete bookmark '%s'" % todel)
elif opts.has_key('r'):
bkms = {}
elif opts.has_key('l'):
bks = bkms.keys()
bks.sort()
if bks:
size = max(map(len,bks))
else:
size = 0
fmt = '%-'+str(size)+'s -> %s'
print 'Current bookmarks:'
for bk in bks:
print fmt % (bk,bkms[bk])
else:
if not args:
raise UsageError("%bookmark: You must specify the bookmark name")
elif len(args)==1:
bkms[args[0]] = os.getcwd()
elif len(args)==2:
bkms[args[0]] = args[1]
self.db['bookmarks'] = bkms
def magic_pycat(self, parameter_s=''):
"""Show a syntax-highlighted file through a pager.
This magic is similar to the cat utility, but it will assume the file
to be Python source and will show it with syntax highlighting. """
try:
filename = get_py_filename(parameter_s)
cont = file_read(filename)
except IOError:
try:
cont = eval(parameter_s,self.user_ns)
except NameError:
cont = None
if cont is None:
print "Error: no such file or variable"
return
page.page(self.shell.pycolorize(cont))
def _rerun_pasted(self):
""" Rerun a previously pasted command.
"""
b = self.user_ns.get('pasted_block', None)
if b is None:
raise UsageError('No previous pasted block available')
print "Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b))
exec b in self.user_ns
def _get_pasted_lines(self, sentinel):
""" Yield pasted lines until the user enters the given sentinel value.
"""
from IPython.core import interactiveshell
print "Pasting code; enter '%s' alone on the line to stop." % sentinel
while True:
l = interactiveshell.raw_input_original(':')
if l == sentinel:
return
else:
yield l
def _strip_pasted_lines_for_code(self, raw_lines):
""" Strip non-code parts of a sequence of lines to return a block of
code.
"""
# Regular expressions that declare text we strip from the input:
strip_re = [r'^\s*In \[\d+\]:', # IPython input prompt
r'^\s*(\s?>)+', # Python input prompt
r'^\s*\.{3,}', # Continuation prompts
r'^\++',
]
strip_from_start = map(re.compile,strip_re)
lines = []
for l in raw_lines:
for pat in strip_from_start:
l = pat.sub('',l)
lines.append(l)
block = "\n".join(lines) + '\n'
#print "block:\n",block
return block
def _execute_block(self, block, par):
""" Execute a block, or store it in a variable, per the user's request.
"""
if not par:
b = textwrap.dedent(block)
self.user_ns['pasted_block'] = b
exec b in self.user_ns
else:
self.user_ns[par] = SList(block.splitlines())
print "Block assigned to '%s'" % par
def magic_quickref(self,arg):
""" Show a quick reference sheet """
import IPython.core.usage
qr = IPython.core.usage.quick_reference + self.magic_magic('-brief')
page.page(qr)
def magic_doctest_mode(self,parameter_s=''):
"""Toggle doctest mode on and off.
This mode is intended to make IPython behave as much as possible like a
plain Python shell, from the perspective of how its prompts, exceptions
and output look. This makes it easy to copy and paste parts of a
session into doctests. It does so by:
- Changing the prompts to the classic ``>>>`` ones.
- Changing the exception reporting mode to 'Plain'.
- Disabling pretty-printing of output.
Note that IPython also supports the pasting of code snippets that have
leading '>>>' and '...' prompts in them. This means that you can paste
doctests from files or docstrings (even if they have leading
whitespace), and the code will execute correctly. You can then use
'%history -t' to see the translated history; this will give you the
input after removal of all the leading prompts and whitespace, which
can be pasted back into an editor.
With these features, you can switch into this mode easily whenever you
need to do testing and changes to doctests, without having to leave
your existing IPython session.
"""
from IPython.utils.ipstruct import Struct
# Shorthands
shell = self.shell
oc = shell.displayhook
meta = shell.meta
# dstore is a data store kept in the instance metadata bag to track any
# changes we make, so we can undo them later.
dstore = meta.setdefault('doctest_mode',Struct())
save_dstore = dstore.setdefault
# save a few values we'll need to recover later
mode = save_dstore('mode',False)
save_dstore('rc_pprint',shell.pprint)
save_dstore('xmode',shell.InteractiveTB.mode)
save_dstore('rc_separate_out',shell.separate_out)
save_dstore('rc_separate_out2',shell.separate_out2)
save_dstore('rc_prompts_pad_left',shell.prompts_pad_left)
save_dstore('rc_separate_in',shell.separate_in)
if mode == False:
# turn on
oc.prompt1.p_template = '>>> '
oc.prompt2.p_template = '... '
oc.prompt_out.p_template = ''
# Prompt separators like plain python
oc.input_sep = oc.prompt1.sep = ''
oc.output_sep = ''
oc.output_sep2 = ''
oc.prompt1.pad_left = oc.prompt2.pad_left = \
oc.prompt_out.pad_left = False
shell.pprint = False
shell.magic_xmode('Plain')
else:
# turn off
oc.prompt1.p_template = shell.prompt_in1
oc.prompt2.p_template = shell.prompt_in2
oc.prompt_out.p_template = shell.prompt_out
oc.input_sep = oc.prompt1.sep = dstore.rc_separate_in
oc.output_sep = dstore.rc_separate_out
oc.output_sep2 = dstore.rc_separate_out2
oc.prompt1.pad_left = oc.prompt2.pad_left = \
oc.prompt_out.pad_left = dstore.rc_prompts_pad_left
shell.pprint = dstore.rc_pprint
shell.magic_xmode(dstore.xmode)
# Store new mode and inform
dstore.mode = bool(1-int(mode))
mode_label = ['OFF','ON'][dstore.mode]
print 'Doctest mode is:', mode_label
def magic_gui(self, parameter_s=''):
"""Enable or disable IPython GUI event loop integration.
%gui [GUINAME]
This magic replaces IPython's threaded shells that were activated
using the (pylab/wthread/etc.) command line flags. GUI toolkits
can now be enabled, disabled and swtiched at runtime and keyboard
interrupts should work without any problems. The following toolkits
are supported: wxPython, PyQt4, PyGTK, and Tk::
%gui wx # enable wxPython event loop integration
%gui qt4|qt # enable PyQt4 event loop integration
%gui gtk # enable PyGTK event loop integration
%gui tk # enable Tk event loop integration
%gui # disable all event loop integration
WARNING: after any of these has been called you can simply create
an application object, but DO NOT start the event loop yourself, as
we have already handled that.
"""
from IPython.lib.inputhook import enable_gui
opts, arg = self.parse_options(parameter_s, '')
if arg=='': arg = None
return enable_gui(arg)
def magic_load_ext(self, module_str):
"""Load an IPython extension by its module name."""
return self.extension_manager.load_extension(module_str)
def magic_unload_ext(self, module_str):
"""Unload an IPython extension by its module name."""
self.extension_manager.unload_extension(module_str)
def magic_reload_ext(self, module_str):
"""Reload an IPython extension by its module name."""
self.extension_manager.reload_extension(module_str)
@testdec.skip_doctest
def magic_install_profiles(self, s):
"""Install the default IPython profiles into the .ipython dir.
If the default profiles have already been installed, they will not
be overwritten. You can force overwriting them by using the ``-o``
option::
In [1]: %install_profiles -o
"""
if '-o' in s:
overwrite = True
else:
overwrite = False
from IPython.config import profile
profile_dir = os.path.split(profile.__file__)[0]
ipython_dir = self.ipython_dir
files = os.listdir(profile_dir)
to_install = []
for f in files:
if f.startswith('ipython_config'):
src = os.path.join(profile_dir, f)
dst = os.path.join(ipython_dir, f)
if (not os.path.isfile(dst)) or overwrite:
to_install.append((f, src, dst))
if len(to_install)>0:
print "Installing profiles to: ", ipython_dir
for (f, src, dst) in to_install:
shutil.copy(src, dst)
print " %s" % f
def magic_install_default_config(self, s):
"""Install IPython's default config file into the .ipython dir.
If the default config file (:file:`ipython_config.py`) is already
installed, it will not be overwritten. You can force overwriting
by using the ``-o`` option::
In [1]: %install_default_config
"""
if '-o' in s:
overwrite = True
else:
overwrite = False
from IPython.config import default
config_dir = os.path.split(default.__file__)[0]
ipython_dir = self.ipython_dir
default_config_file_name = 'ipython_config.py'
src = os.path.join(config_dir, default_config_file_name)
dst = os.path.join(ipython_dir, default_config_file_name)
if (not os.path.isfile(dst)) or overwrite:
shutil.copy(src, dst)
print "Installing default config file: %s" % dst
# Pylab support: simple wrappers that activate pylab, load gui input
# handling and modify slightly %run
@testdec.skip_doctest
def _pylab_magic_run(self, parameter_s=''):
Magic.magic_run(self, parameter_s,
runner=mpl_runner(self.shell.safe_execfile))
_pylab_magic_run.__doc__ = magic_run.__doc__
@testdec.skip_doctest
def magic_pylab(self, s):
"""Load numpy and matplotlib to work interactively.
%pylab [GUINAME]
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
It will import at the top level numpy as np, pyplot as plt, matplotlib,
pylab and mlab, as well as all names from numpy and pylab.
Parameters
----------
guiname : optional
One of the valid arguments to the %gui magic ('qt', 'wx', 'gtk' or
'tk'). If given, the corresponding Matplotlib backend is used,
otherwise matplotlib's default (which you can override in your
matplotlib config file) is used.
Examples
--------
In this case, where the MPL default is TkAgg:
In [2]: %pylab
Welcome to pylab, a matplotlib-based Python environment.
Backend in use: TkAgg
For more information, type 'help(pylab)'.
But you can explicitly request a different backend:
In [3]: %pylab qt
Welcome to pylab, a matplotlib-based Python environment.
Backend in use: Qt4Agg
For more information, type 'help(pylab)'.
"""
self.shell.enable_pylab(s)
def magic_tb(self, s):
"""Print the last traceback with the currently active exception mode.
See %xmode for changing exception reporting modes."""
self.shell.showtraceback()
# end Magic
```
#### File: qt/console/rich_ipython_widget.py
```python
import os
import re
from PyQt4 import QtCore, QtGui
# Local imports
from IPython.frontend.qt.svg import save_svg, svg_to_clipboard, svg_to_image
from ipython_widget import IPythonWidget
class RichIPythonWidget(IPythonWidget):
""" An IPythonWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichIPythonWidget protected class variables.
_payload_source_plot = 'IPython.zmq.pylab.backend_payload.add_plot_payload'
_svg_text_format_property = 1
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichIPythonWidget.
"""
kw['kind'] = 'rich'
super(RichIPythonWidget, self).__init__(*args, **kw)
# Dictionary for resolving Qt names to images when
# generating XHTML output
self._name_to_svg = {}
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name.isEmpty():
menu = super(RichIPythonWidget, self)._context_menu_make(pos)
else:
menu = QtGui.QMenu()
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = format.stringProperty(self._svg_text_format_property)
if not svg.isEmpty():
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
return menu
#---------------------------------------------------------------------------
# 'FrontendWidget' protected interface
#---------------------------------------------------------------------------
def _process_execute_payload(self, item):
""" Reimplemented to handle matplotlib plot payloads.
"""
if item['source'] == self._payload_source_plot:
if item['format'] == 'svg':
svg = item['data']
try:
image = svg_to_image(svg)
except ValueError:
self._append_plain_text('Received invalid plot data.')
else:
format = self._add_image(image)
self._name_to_svg[str(format.name())] = svg
format.setProperty(self._svg_text_format_property, svg)
cursor = self._get_end_cursor()
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
return True
else:
# Add other plot formats here!
return False
else:
return super(RichIPythonWidget, self)._process_execute_payload(item)
#---------------------------------------------------------------------------
# 'RichIPythonWidget' protected interface
#---------------------------------------------------------------------------
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = QtCore.QString.number(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
variant = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return variant.toPyObject()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
def image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files
may be written (e.g., for linked images).
If None, all images are to be included inline.
format : "png"|"svg", optional [default "png"]
Format for returned or referenced images.
Subclasses supporting image display should override this
method.
"""
if(format == "png"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if(path is not None):
if not os.path.exists(path):
os.mkdir(path)
relpath = os.path.basename(path)
if(image.save("%s/qt_img%s.png" % (path,match.group("name")),
"PNG")):
return '<img src="%s/qt_img%s.png">' % (relpath,
match.group("name"))
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, "PNG")
buffer_.close()
return '<img src="data:image/png;base64,\n%s\n" />' % (
re.sub(r'(.{60})',r'\1\n',str(ba.toBase64())))
elif(format == "svg"):
try:
svg = str(self._name_to_svg[match.group("name")])
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
``` |
{
"source": "0915318/PietonFunktie",
"score": 3
} |
#### File: 0915318/PietonFunktie/test.py
```python
from easygui import *
import os
from pprint import pprint
class Settings(EgStore):
def __init__(self, filename): # filename is required
#-------------------------------------------------
# Specify default/initial values for variables that
# this particular application wants to remember.
#-------------------------------------------------
self.userId = "userid"
self.targetServer = "targetserver"
#-------------------------------------------------
# For subclasses of EgStore, these must be
# the last two statements in __init__
#-------------------------------------------------
self.filename = filename # this is required
self.restore() # restore values from the storage file if possible
settingsFilename = os.path.join("C:", "\School\Python\OP2\PietonFunktie", "settings.txt") # Windows example
settings = Settings(settingsFilename)
msg = "Enter logon information"
title = "Demo of multpasswordbox"
fieldNames = ["Server ID", "User ID", "Password"]
fieldValues = [] # we start with blanks for the values
fieldValues = multpasswordbox(msg,title, fieldNames)
# we save the variables as attributes of the "settings" object
settings.userId = fieldValues[1]
settings.targetServer = fieldValues[2]
settings.store() # persist the settings
# run code that gets a new value for userId
# then persist the settings with the new value
user = "biden_joe"
settings.userId = user
settings.store()
``` |
{
"source": "0916dhkim/forage-pacman",
"score": 3
} |
#### File: forage-pacman/components/player.py
```python
from components.tile_map import TileMap
from update_context import UpdateContext
from freegames import vector
from components.circle_renderer import CircleRenderer
class Player:
def __init__(self, position: vector, aim: vector):
self._position = position
self._aim = aim
self._circle_renderer = CircleRenderer("yellow")
def render(self):
self._circle_renderer.render(self._position)
def update(self, context: UpdateContext):
if context.tile_map.valid(self._position + self._aim):
self._position += self._aim
tile_index = context.tile_map.find_tile_index(self._position)
if context.tile_map.get(tile_index) == 1:
context.tile_map.set(tile_index, 2)
context.on_score()
context.tile_map.render_tile(tile_index)
def change_aim(self, x, y, tile_map: TileMap):
if tile_map.valid(self._position + vector(x, y)):
self._aim = vector(x, y)
``` |
{
"source": "0916dhkim/samehomedifferenthacks-tutorbot",
"score": 3
} |
#### File: samehomedifferenthacks-tutorbot/commands/register.py
```python
from discord_client import client
from discord.ext.commands import Context
from discord import utils
from typing import List
# Command to add roles to the author.
@client.command(
help=(
"Add your technical stack.\n"
"Identify yourself as a mentor "
"by providing your technical stack. "
"This command will assign discord server "
"roles to you."
),
usage="[ROLE]..."
)
async def register(ctx: Context, *args: str):
added_role_names: List[str] = []
for role_name in args:
role = utils.get(ctx.author.guild.roles, name=role_name)
if role is not None:
added_role_names.append(role_name)
await ctx.author.add_roles(role)
message = (
"Registration successful.\n"
f"Added roles: {','.join(added_role_names)}"
)
await ctx.message.channel.send(message)
```
#### File: samehomedifferenthacks-tutorbot/commands/sos.py
```python
from discord_client import client
from discord.ext.commands import Context
from discord import Role, Member, utils
from data import mentorAdapter
from typing import List
# Token for separating roles and question
SEPARATOR_SET = set([":", "--"])
# Return true if target member contains all required roles
def hasAllRoles(member: Member, *roles: Role) -> bool:
required = set(roles)
for r in member.roles:
required.discard(r)
return len(required) == 0
# Filter Members by required roles.
async def filterMembersByRequirements(
members: List[Member], *roles: Role
) -> List[Member]:
return list(filter(lambda m: hasAllRoles(m, *roles), members))
# Command to notify mentors for help.
@client.command(
help=(
"Get help for your question.\n"
"Notify mentors who have specified roles "
"about your question."
),
usage="[ROLE]... [-- [QUESTION]]"
)
async def sos(ctx: Context, *args: str):
# Separate args into role names and question.
role_names: List[str] = []
question_words: List[str] = []
after_separator: bool = False
for arg in args:
if arg in SEPARATOR_SET:
after_separator = True
continue
if after_separator:
question_words.append(arg)
else:
role_names.append(arg)
# Calculate free members.
free_members = list(
set(ctx.author.guild.members) - mentorAdapter._busy_members
)
# Filter matching mentors.
roles: List[Role] = []
for role_name in role_names:
role = utils.get(ctx.author.guild.roles, name=role_name)
if role is not None:
roles.append(role)
# If there is no matching role,
if len(roles) == 0:
# Send an error message.
await ctx.message.channel.send(
"Please specify more than 1 role."
)
return
mentors = await filterMembersByRequirements(free_members, *roles)
# Send message.
message = (
f"{ctx.author.mention} needs your HELP!\n"
f"Question: {' '.join(question_words)}\n"
f"Roles: {' '.join(map(lambda r: r.name, roles))}\n"
f"Available Mentors: {' '.join(map(lambda m: m.mention, mentors))}"
)
await ctx.message.channel.send(message)
```
#### File: samehomedifferenthacks-tutorbot/commands/status.py
```python
from discord_client import client
from data import mentorAdapter
from discord.ext.commands import Context
# Command to check the author's busy status.
@client.command(help="Print your status.")
async def status(ctx: Context):
message = ""
is_busy = mentorAdapter.check_busy(ctx.author)
role_names = map(
lambda r: r.name,
filter(lambda r: r != ctx.guild.default_role, ctx.author.roles)
)
if is_busy:
message += "(busy) You are not available to answer questions."
else:
message += "(free) You are open for questions."
message += "\nYour roles: "
message += f"{', '.join(role_names)}"
await ctx.message.channel.send(message)
``` |
Subsets and Splits