content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python AsyncIO implementation of the GRPC helloworld.Greeter server."""
import logging
import asyncio
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
async def SayHello(
self, request: helloworld_pb2.HelloRequest,
context: grpc.aio.ServicerContext) -> helloworld_pb2.HelloReply:
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
async def serve() -> None:
server = grpc.aio.server()
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
listen_addr = '[::]:50051'
server.add_insecure_port(listen_addr)
logging.info("Starting server on %s", listen_addr)
await server.start()
try:
await server.wait_for_termination()
except KeyboardInterrupt:
# Shuts down the server with 0 seconds of grace period. During the
# grace period, the server won't accept new connections and allow
# existing RPCs to continue within the grace period.
await server.stop(0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.run(serve())
| 34.215686 | 78 | 0.731805 | [
"Apache-2.0"
] | 1261385937/grpc | examples/python/helloworld/async_greeter_server.py | 1,745 | Python |
import scipy.sparse as sp
import numpy as np
import torch
import time
import os
from configparser import ConfigParser
import sys
sys.path.append('/home/shiyan/project/gcn_for_prediction_of_protein_interactions/')
from src.util.load_data import load_data, sparse_to_tuple, mask_test_edges, preprocess_graph
from src.util.loss import arga_loss_function, varga_loss_function
from src.util.metrics import get_roc_score
from src.util import define_optimizer
from src.graph_nheads_att_gan.model import NHGATModelGAN
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Train():
def __init__(self):
pass
def train_model(self, config_path):
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (
os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
# load config file
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
# data catalog path
data_catalog = config.get(section, "data_catalog")
# train file path
train_file_name = config.get(section, "train_file_name")
# model save/load path
model_path = config.get(section, "model_path")
# model param config
hidden_dim1 = config.getint(section, "hidden_dim1")
hidden_dim2 = config.getint(section, "hidden_dim2")
hidden_dim3 = config.getint(section, 'hidden_dim3')
num_heads = config.getint(section, 'num_heads')
dropout = config.getfloat(section, "dropout")
vae_bool = config.getboolean(section, 'vae_bool')
alpha = config.getfloat(section, 'alpha')
lr = config.getfloat(section, "lr")
lr_decay = config.getfloat(section, 'lr_decay')
weight_decay = config.getfloat(section, "weight_decay")
gamma = config.getfloat(section, "gamma")
momentum = config.getfloat(section, "momentum")
eps = config.getfloat(section, "eps")
clip = config.getfloat(section, "clip")
epochs = config.getint(section, "epochs")
optimizer_name = config.get(section, "optimizer")
# 加载相关数据
adj = load_data(os.path.join(data_catalog, train_file_name))
num_nodes = adj.shape[0]
num_edges = adj.sum()
features = sparse_to_tuple(sp.identity(num_nodes))
num_features = features[2][1]
# 去除对角线元素
# 下边的右部分为:返回adj_orig的对角元素(一维),并增加一维,抽出adj_orig的对角元素并构建只有这些对角元素的对角矩阵
adj_orig = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj_orig.eliminate_zeros()
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj_orig)
adj = adj_train
# 返回D^{-0.5}SD^{-0.5}的coords, data, shape,其中S=A+I
adj_norm = preprocess_graph(adj)
adj_label = adj_train + sp.eye(adj_train.shape[0])
# adj_label = sparse_to_tuple(adj_label)
adj_label = torch.FloatTensor(adj_label.toarray()).to(DEVICE)
'''
注意,adj的每个元素非1即0。pos_weight是用于训练的邻接矩阵中负样本边(既不存在的边)和正样本边的倍数(即比值),这个数值在二分类交叉熵损失函数中用到,
如果正样本边所占的比例和负样本边所占比例失衡,比如正样本边很多,负样本边很少,那么在求loss的时候可以提供weight参数,将正样本边的weight设置小一点,负样本边的weight设置大一点,
此时能够很好的平衡两类在loss中的占比,任务效果可以得到进一步提升。参考:https://www.zhihu.com/question/383567632
负样本边的weight都为1,正样本边的weight都为pos_weight
'''
pos_weight = float(adj.shape[0] * adj.shape[0] - num_edges) / num_edges
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
# create model
print('create model ...')
model = NHGATModelGAN(num_features, hidden_dim1=hidden_dim1, hidden_dim2=hidden_dim2, hidden_dim3=hidden_dim3, num_heads=num_heads, dropout=dropout, alpha=alpha, vae_bool=vae_bool)
# define optimizer
if optimizer_name == 'adam':
optimizer = define_optimizer.define_optimizer_adam(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'adamw':
optimizer = define_optimizer.define_optimizer_adamw(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'sgd':
optimizer = define_optimizer.define_optimizer_sgd(model, lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimizer_name == 'adagrad':
optimizer = define_optimizer.define_optimizer_adagrad(model, lr=lr, lr_decay=lr_decay,
weight_decay=weight_decay)
elif optimizer_name == 'rmsprop':
optimizer = define_optimizer.define_optimizer_rmsprop(model, lr=lr, weight_decay=weight_decay,
momentum=momentum)
elif optimizer_name == 'adadelta':
optimizer = define_optimizer.define_optimizer_adadelta(model, lr=lr, weight_decay=weight_decay)
else:
raise NameError('No define optimization function name!')
model = model.to(DEVICE)
# 稀疏张量被表示为一对致密张量:一维张量和二维张量的索引。可以通过提供这两个张量来构造稀疏张量
adj_norm = torch.sparse.FloatTensor(torch.LongTensor(adj_norm[0].T),
torch.FloatTensor(adj_norm[1]),
torch.Size(adj_norm[2]))
features = torch.sparse.FloatTensor(torch.LongTensor(features[0].T),
torch.FloatTensor(features[1]),
torch.Size(features[2])).to_dense()
adj_norm = adj_norm.to(DEVICE)
features = features.to(DEVICE)
norm = torch.FloatTensor(np.array(norm)).to(DEVICE)
pos_weight = torch.tensor(pos_weight).to(DEVICE)
num_nodes = torch.tensor(num_nodes).to(DEVICE)
print('start training...')
best_valid_roc_score = float('-inf')
hidden_emb = None
model.train()
for epoch in range(epochs):
t = time.time()
optimizer.zero_grad()
# 解码后的邻接矩阵,判别器
recovered, dis_real, dis_fake, mu, logvar = model(features, adj_norm)
if vae_bool:
loss = varga_loss_function(preds=recovered, labels=adj_label,
mu=mu, logvar=logvar,
dis_real=dis_real, dis_fake=dis_fake,
n_nodes=num_nodes,
norm=norm, pos_weight=pos_weight)
else:
loss = arga_loss_function(preds=recovered, labels=adj_label,
dis_real=dis_real, dis_fake=dis_fake,
norm=norm, pos_weight=pos_weight)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
cur_loss = loss.item()
optimizer.step()
hidden_emb = mu.data.cpu().numpy()
# 评估验证集,val set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
# 保存最好的roc score
if roc_score > best_valid_roc_score:
best_valid_roc_score = roc_score
# 不需要保存整个model,只需保存hidden_emb,因为后面的解码是用hidden_emb内积的形式作推断
np.save(model_path, hidden_emb)
print("Epoch:", '%04d' % (epoch + 1), "train_loss = ", "{:.5f}".format(cur_loss),
"val_roc_score = ", "{:.5f}".format(roc_score),
"average_precision_score = ", "{:.5f}".format(ap_score),
"time=", "{:.5f}".format(time.time() - t)
)
print("Optimization Finished!")
# 评估测试集,test set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, test_edges, test_edges_false)
print('test roc score: {}'.format(roc_score))
print('test ap score: {}'.format(ap_score))
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
train = Train()
train.train_model(config_path)
| 47.351351 | 192 | 0.574543 | [
"Apache-2.0"
] | jiangnanboy/gcn_for_prediction_of_protein_interactions | src/graph_nheads_att_gan/train.py | 9,490 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend_context."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
try:
# python version >= 3.3
from unittest import mock
except ImportError:
import mock # pylint: disable=unused-import
import tensorflow as tf
from google.protobuf import text_format
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins import base_plugin
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import backend_context
from tensorboard.plugins.hparams import metadata
from tensorboard.plugins.hparams import plugin_data_pb2
from tensorboard.plugins.scalar import metadata as scalars_metadata
DATA_TYPE_EXPERIMENT = "experiment"
DATA_TYPE_SESSION_START_INFO = "session_start_info"
DATA_TYPE_SESSION_END_INFO = "session_end_info"
class BackendContextTest(tf.test.TestCase):
# Make assertProtoEquals print all the diff.
maxDiff = None # pylint: disable=invalid-name
def setUp(self):
self._mock_tb_context = base_plugin.TBContext()
# TODO(#3425): Remove mocking or switch to mocking data provider
# APIs directly.
self._mock_multiplexer = mock.create_autospec(
plugin_event_multiplexer.EventMultiplexer
)
self._mock_tb_context.multiplexer = self._mock_multiplexer
self._mock_multiplexer.PluginRunToTagToContent.side_effect = (
self._mock_plugin_run_to_tag_to_content
)
self._mock_multiplexer.AllSummaryMetadata.side_effect = (
self._mock_all_summary_metadata
)
self._mock_multiplexer.SummaryMetadata.side_effect = (
self._mock_summary_metadata
)
self._mock_tb_context.data_provider = data_provider.MultiplexerDataProvider(
self._mock_multiplexer, "/path/to/logs"
)
self.session_1_start_info_ = ""
self.session_2_start_info_ = ""
self.session_3_start_info_ = ""
def _mock_all_summary_metadata(self):
result = {}
hparams_content = {
"exp/session_1": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_1_start_info_
),
},
"exp/session_2": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_2_start_info_
),
},
"exp/session_3": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_3_start_info_
),
},
}
scalars_content = {
"exp/session_1": {"loss": b"", "accuracy": b""},
"exp/session_1/eval": {"loss": b"",},
"exp/session_1/train": {"loss": b"",},
"exp/session_2": {"loss": b"", "accuracy": b"",},
"exp/session_2/eval": {"loss": b"",},
"exp/session_2/train": {"loss": b"",},
"exp/session_3": {"loss": b"", "accuracy": b"",},
"exp/session_3/eval": {"loss": b"",},
"exp/session_3xyz/": {"loss2": b"",},
}
for (run, tag_to_content) in hparams_content.items():
result.setdefault(run, {})
for (tag, content) in tag_to_content.items():
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_TENSOR
m.plugin_data.plugin_name = metadata.PLUGIN_NAME
m.plugin_data.content = content
result[run][tag] = m
for (run, tag_to_content) in scalars_content.items():
result.setdefault(run, {})
for (tag, content) in tag_to_content.items():
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_SCALAR
m.plugin_data.plugin_name = scalars_metadata.PLUGIN_NAME
m.plugin_data.content = content
result[run][tag] = m
return result
def _mock_plugin_run_to_tag_to_content(self, plugin_name):
result = {}
for (
run,
tag_to_metadata,
) in self._mock_multiplexer.AllSummaryMetadata().items():
for (tag, metadata) in tag_to_metadata.items():
if metadata.plugin_data.plugin_name != plugin_name:
continue
result.setdefault(run, {})
result[run][tag] = metadata.plugin_data.content
return result
def _mock_summary_metadata(self, run, tag):
return self._mock_multiplexer.AllSummaryMetadata()[run][tag]
def test_experiment_with_experiment_tag(self):
experiment = """
description: 'Test experiment'
metric_infos: [
{ name: { tag: 'current_temp' } }
]
"""
run = "exp"
tag = metadata.EXPERIMENT_TAG
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_TENSOR
m.plugin_data.plugin_name = metadata.PLUGIN_NAME
m.plugin_data.content = self._serialized_plugin_data(
DATA_TYPE_EXPERIMENT, experiment
)
self._mock_multiplexer.AllSummaryMetadata.side_effect = None
self._mock_multiplexer.AllSummaryMetadata.return_value = {run: {tag: m}}
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
self.assertProtoEquals(
experiment,
ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
),
)
def test_experiment_without_experiment_tag(self):
self.session_1_start_info_ = """
hparams: [
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {number_value: 0.01}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 200}},
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'LATTICE'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 300}},
{key: 'lr' value: {number_value: 0.05}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_FLOAT64
},
hparam_infos: {
name: 'lr'
type: DATA_TYPE_FLOAT64
},
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'},
{string_value: 'LATTICE'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def test_experiment_without_experiment_tag_different_hparam_types(self):
self.session_1_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {string_value: '0.01'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'LATTICE'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {bool_value: true}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: '100.0'},
{string_value: 'true'}]
}
}
hparam_infos: {
name: 'lr'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: '0.01'},
{string_value: '0.02'}]
}
}
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'},
{string_value: 'LATTICE'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def test_experiment_without_experiment_tag_many_distinct_values(self):
self.session_1_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {string_value: '0.01'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {bool_value: true}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_STRING
}
hparam_infos: {
name: 'lr'
type: DATA_TYPE_STRING
}
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(
self._mock_tb_context, max_domain_discrete_len=1
)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123"),
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def _serialized_plugin_data(self, data_oneof_field, text_protobuffer):
oneof_type_dict = {
DATA_TYPE_EXPERIMENT: api_pb2.Experiment,
DATA_TYPE_SESSION_START_INFO: plugin_data_pb2.SessionStartInfo,
DATA_TYPE_SESSION_END_INFO: plugin_data_pb2.SessionEndInfo,
}
protobuffer = text_format.Merge(
text_protobuffer, oneof_type_dict[data_oneof_field]()
)
plugin_data = plugin_data_pb2.HParamsPluginData()
getattr(plugin_data, data_oneof_field).CopyFrom(protobuffer)
return metadata.create_summary_metadata(plugin_data).plugin_data.content
def _canonicalize_experiment(exp):
"""Sorts the repeated fields of an Experiment message."""
exp.hparam_infos.sort(key=operator.attrgetter("name"))
exp.metric_infos.sort(key=operator.attrgetter("name.group", "name.tag"))
for hparam_info in exp.hparam_infos:
if hparam_info.HasField("domain_discrete"):
hparam_info.domain_discrete.values.sort(
key=operator.attrgetter("string_value")
)
if __name__ == "__main__":
tf.test.main()
| 37.452128 | 84 | 0.569805 | [
"Apache-2.0"
] | aryaman4/tensorboard | tensorboard/plugins/hparams/backend_context_test.py | 14,082 | Python |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 ESMA to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| 40.438017 | 214 | 0.556019 | [
"MIT"
] | alik918/esmacoin | qa/rpc-tests/fundrawtransaction.py | 24,465 | Python |
# Copyright (c) 2017, John Skinner
import unittest
import numpy as np
import arvet.database.tests.database_connection as dbconn
from arvet.config.path_manager import PathManager
import arvet.batch_analysis.task as task
class MockTask(task.Task):
def run_task(self, path_manager: PathManager):
pass
def get_unique_name(self) -> str:
return "mock_task_{0}".format(self.pk)
class TestTaskDatabase(unittest.TestCase):
@classmethod
def setUpClass(cls):
dbconn.connect_to_test_db()
def setUp(self):
# Remove the collection as the start of the test, so that we're sure it's empty
task.Task._mongometa.collection.drop()
@classmethod
def tearDownClass(cls):
# Clean up after ourselves by dropping the collection for this model
task.Task._mongometa.collection.drop()
def test_stores_and_loads_simple(self):
obj = MockTask(state=task.JobState.UNSTARTED)
obj.save()
# Load all the entities
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
all_entities[0].delete()
def test_stores_and_loads_all_params(self):
obj = MockTask(
state=task.JobState.RUNNING,
node_id='test-hpc',
job_id=15,
num_cpus=3,
num_gpus=150,
memory_requirements='4KB',
expected_duration='100:00:00'
)
obj.save()
# Load all the entities
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
all_entities[0].delete()
def test_stores_and_loads_after_change_state(self):
obj = MockTask(
state=task.JobState.RUNNING,
node_id='test-hpc',
job_id=15,
num_cpus=3,
num_gpus=150,
memory_requirements='4KB',
expected_duration='100:00:00'
)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_failed()
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_started('test_node', 143)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_complete()
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
class TestTask(unittest.TestCase):
def test_mark_job_started_changes_unstarted_to_running(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
def test_mark_job_started_doesnt_affect_already_running_jobs(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='external', job_id=3)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
def test_mark_job_started_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_failed_changes_running_to_unstarted(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_failed_increases_failed_count(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5, failure_count=4)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertEqual(5, subject.failure_count)
def test_mark_job_failed_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_finished)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_finished)
self.assertEqual(0, subject.failure_count)
def test_mark_job_failed_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_failed()
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_complete_changes_running_to_finished(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_complete()
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_complete_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertFalse(subject.is_finished)
subject.mark_job_complete()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertFalse(subject.is_finished)
def test_mark_job_complete_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_complete()
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_change_job_id_doesnt_affect_state(self):
subject = MockTask(state=task.JobState.RUNNING)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
subject.change_job_id('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
def test_change_job_id_changes_job_info(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='external', job_id=3)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
subject.change_job_id('test', 12)
self.assertEqual('test', subject.node_id)
self.assertEqual(12, subject.job_id)
def test_change_job_id_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
subject.change_job_id('test', 12)
self.assertTrue(subject.is_unstarted)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_change_job_id_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE, node_id='external', job_id=3)
self.assertTrue(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
subject.change_job_id('test', 12)
self.assertTrue(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
def test_state_remains_consistent(self):
random = np.random.RandomState(144135)
subject = MockTask(state=task.JobState.UNSTARTED)
for idx in range(50):
change = random.randint(0, 4 if idx > 30 else 3)
if idx > 30 and change == 3:
subject.mark_job_complete()
elif change == 2:
subject.change_job_id('external', random.randint(0, 1000))
elif change == 1:
subject.mark_job_started('test', random.randint(0, 1000))
else:
subject.mark_job_failed()
# Make sure that the node id and job id match the state
if subject.is_unstarted or subject.is_finished:
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
else:
self.assertIsNotNone(subject.node_id)
self.assertIsNotNone(subject.job_id)
| 38.830769 | 98 | 0.682448 | [
"BSD-2-Clause"
] | jskinn/arvet | arvet/batch_analysis/tests/test_task.py | 10,096 | Python |
from datetime import datetime
import uuid
from django.db import models
import django.forms as forms
import django_filters.fields as filter_fields
from apps.ineedstudent.models import Hospital
from .filters import StudentJobRequirementsFilter
from .models import * # noqa: F401, F403
from .models import COUNTRY_CODE_CHOICES
class LocationFilterModel(models.Model):
plz = models.CharField(max_length=5, null=True)
distance = models.IntegerField(default=0)
countrycode = models.CharField(max_length=2, choices=COUNTRY_CODE_CHOICES, default="DE",)
uuid = models.CharField(max_length=100, blank=True, unique=True, default=uuid.uuid4)
class StudentListFilterModel(models.Model):
hospital = models.ForeignKey(Hospital, on_delete=models.CASCADE)
location = LocationFilterModel
uuid = models.CharField(max_length=100, blank=True, unique=True, default=uuid.uuid4)
registration_date = models.DateTimeField(default=datetime.now, blank=True, null=True)
name = models.CharField(max_length=100)
jrf = StudentJobRequirementsFilter()
for f_name, jr_filter in jrf.base_filters.items():
if type(jr_filter.field) == forms.NullBooleanField:
StudentListFilterModel.add_to_class(
f_name, models.NullBooleanField(default=None, null=True)
)
elif type(jr_filter.field) == forms.DecimalField:
StudentListFilterModel.add_to_class(f_name, models.IntegerField(default=0))
elif type(jr_filter.field) == filter_fields.ChoiceField:
StudentListFilterModel.add_to_class(
f_name, models.IntegerField(default=0, choices=jr_filter.field.choices)
)
elif type(jr_filter.field) == forms.DateField:
StudentListFilterModel.add_to_class(
f_name, models.DateField(null=True, default=datetime.now)
)
else:
raise ValueError(
"I do not know what to do with field type '%s' for '%s'"
% (type(jr_filter.field), f_name)
)
| 34.666667 | 93 | 0.730263 | [
"MIT"
] | match4healthcare/match4healthcare | backend/apps/iamstudent/models_persistent_filter.py | 1,976 | Python |
# coding: utf-8
"""
vautoscaling
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_vautoscaling.model.process import Process # noqa: F401,E501
class ResumeProcessesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'process_list': 'list[Process]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'process_list': 'processList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, process_list=None): # noqa: E501
"""ResumeProcessesResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._process_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if process_list is not None:
self.process_list = process_list
@property
def request_id(self):
"""Gets the request_id of this ResumeProcessesResponse. # noqa: E501
:return: The request_id of this ResumeProcessesResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ResumeProcessesResponse.
:param request_id: The request_id of this ResumeProcessesResponse. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def return_code(self):
"""Gets the return_code of this ResumeProcessesResponse. # noqa: E501
:return: The return_code of this ResumeProcessesResponse. # noqa: E501
:rtype: str
"""
return self._return_code
@return_code.setter
def return_code(self, return_code):
"""Sets the return_code of this ResumeProcessesResponse.
:param return_code: The return_code of this ResumeProcessesResponse. # noqa: E501
:type: str
"""
self._return_code = return_code
@property
def return_message(self):
"""Gets the return_message of this ResumeProcessesResponse. # noqa: E501
:return: The return_message of this ResumeProcessesResponse. # noqa: E501
:rtype: str
"""
return self._return_message
@return_message.setter
def return_message(self, return_message):
"""Sets the return_message of this ResumeProcessesResponse.
:param return_message: The return_message of this ResumeProcessesResponse. # noqa: E501
:type: str
"""
self._return_message = return_message
@property
def total_rows(self):
"""Gets the total_rows of this ResumeProcessesResponse. # noqa: E501
:return: The total_rows of this ResumeProcessesResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this ResumeProcessesResponse.
:param total_rows: The total_rows of this ResumeProcessesResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def process_list(self):
"""Gets the process_list of this ResumeProcessesResponse. # noqa: E501
:return: The process_list of this ResumeProcessesResponse. # noqa: E501
:rtype: list[Process]
"""
return self._process_list
@process_list.setter
def process_list(self, process_list):
"""Sets the process_list of this ResumeProcessesResponse.
:param process_list: The process_list of this ResumeProcessesResponse. # noqa: E501
:type: list[Process]
"""
self._process_list = process_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResumeProcessesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.453704 | 129 | 0.607062 | [
"MIT"
] | NaverCloudPlatform/ncloud-sdk-python | lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py | 6,146 | Python |
#!/usr/bin/env python3
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import css_checker
from os import path as os_path
import re
from sys import path as sys_path
import unittest
_HERE = os_path.dirname(os_path.abspath(__file__))
sys_path.append(os_path.join(_HERE, '..', '..'))
from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi, MockFile
class CssCheckerTest(unittest.TestCase):
def setUp(self):
super(CssCheckerTest, self).setUp()
self.input_api = MockInputApi()
self.checker = css_checker.CSSChecker(self.input_api, MockOutputApi())
def _create_file(self, contents, filename):
self.input_api.files.append(MockFile(filename, contents.splitlines()))
def VerifyContentIsValid(self, contents, filename='fake.css'):
self._create_file(contents, filename)
results = self.checker.RunChecks()
self.assertEqual(len(results), 0)
def VerifyContentsProducesOutput(self, contents, output, filename='fake.css'):
self._create_file(contents, filename)
results = self.checker.RunChecks()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].message, filename + ':\n' + output.strip())
def testCssAlphaWithAtBlock(self):
self.VerifyContentsProducesOutput("""
<include src="../shared/css/cr/ui/overlay.css">
<include src="chrome://resources/totally-cool.css" />
/* A hopefully safely ignored comment and @media statement. /**/
@media print {
div {
display: block;
color: red;
}
}
.rule {
z-index: 5;
<if expr="not is macosx">
background-image: url(chrome://resources/BLAH); /* TODO(dbeam): Fix this. */
background-color: rgb(235, 239, 249);
</if>
<if expr="is_macosx">
background-color: white;
background-image: url(chrome://resources/BLAH2);
</if>
color: black;
}
<if expr="is_macosx">
.language-options-right {
visibility: hidden;
opacity: 1; /* TODO(dbeam): Fix this. */
}
</if>
@media (prefers-color-scheme: dark) {
a[href] {
z-index: 3;
color: blue;
}
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
display: block;
color: red;
z-index: 5;
color: black;
z-index: 3;
color: blue;""")
def testCssStringWithAt(self):
self.VerifyContentIsValid("""
#logo {
background-image: url(images/google_logo.png@2x);
}
body.alternate-logo #logo {
-webkit-mask-image: url(images/google_logo.png@2x);
background: none;
}
div {
margin-inline-start: 5px;
}
.stuff1 {
}
.stuff2 {
}
""")
def testCssAlphaWithNonStandard(self):
self.VerifyContentsProducesOutput("""
div {
/* A hopefully safely ignored comment and @media statement. /**/
color: red;
-webkit-margin-before-collapse: discard;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
color: red;
-webkit-margin-before-collapse: discard;""")
def testCssAlphaWithLongerDashedProps(self):
self.VerifyContentsProducesOutput("""
div {
border-inline-start: 5px; /* A hopefully removed comment. */
border: 5px solid red;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
border-inline-start: 5px;
border: 5px solid red;""")
def testCssAlphaWithVariables(self):
self.VerifyContentIsValid("""
#id {
--zzyxx-xylophone: 3px;
--aardvark-animal: var(--zzyxz-xylophone);
}
""")
def testCssBracesHaveSpaceBeforeAndNothingAfter(self):
self.VerifyContentsProducesOutput("""
/* Hello! */div/* Comment here*/{
display: block;
}
blah /* hey! */
{
rule: value;
}
.mixed-in {
display: none;
}
.this.is { /* allowed */
rule: value;
}""", """
- Start braces ({) end a selector, have a space before them and no rules after.
div{
{""")
def testMixins(self):
self.VerifyContentsProducesOutput(
"""
.mixed-in {
--css-mixin: {
color: red;
}
}""", """
- Avoid using CSS mixins. Use CSS shadow parts, CSS variables, or common CSS \
classes instead.
--css-mixin: {""")
def testCssClassesUseDashes(self):
self.VerifyContentsProducesOutput("""
.className,
.ClassName,
.class-name /* We should not catch this. */,
.class_name,
[i18n-values*='.innerHTML:'] {
display: block;
}""", """
- Classes use .dash-form.
.className,
.ClassName,
.class_name,""")
def testCssCloseBraceOnNewLine(self):
self.VerifyContentsProducesOutput("""
@-webkit-keyframe blah {
from { height: rotate(-10turn); }
100% { height: 500px; }
}
#id { /* $i18n{*} and $i18nRaw{*} should be ignored. */
rule: $i18n{someValue};
rule2: $i18nRaw{someValue};
}
#rule {
rule: value; }""", """
- Always put a rule closing brace (}) on a new line.
rule: value; }""")
def testCssColonsHaveSpaceAfter(self):
self.VerifyContentsProducesOutput("""
div:not(.class):not([attr=5]), /* We should not catch this. */
div:not(.class):not([attr]) /* Nor this. */ {
background: url(data:image/jpeg,asdfasdfsadf); /* Ignore this. */
background: -webkit-linear-gradient(left, red,
80% blah blee blar);
color: red;
display:block;
}""", """
- Colons (:) should have a space after them.
display:block;
- Don't use data URIs in source files. Use grit instead.
background: url(data:image/jpeg,asdfasdfsadf);""")
def testCssFavorSingleQuotes(self):
self.VerifyContentsProducesOutput("""
html[dir="rtl"] body,
html[dir=ltr] body /* TODO(dbeam): Require '' around rtl in future? */ {
font-family: "Open Sans";
<if expr="is_macosx">
blah: blee;
</if>
}""", """
- Use single quotes (') instead of double quotes (") in strings.
html[dir="rtl"] body,
font-family: "Open Sans";""")
def testCssHexCouldBeShorter(self):
self.VerifyContentsProducesOutput("""
#abc,
#abc-,
#abc-ghij,
#abcdef-,
#abcdef-ghij,
#aaaaaa,
#bbaacc {
background-color: #336699; /* Ignore short hex rule if not gray. */
color: #999999;
color: #666;
}""", """
- Use abbreviated hex (#rgb) when in form #rrggbb.
color: #999999; (replace with #999)
- Use rgb() over #hex when not a shade of gray (like #333).
background-color: #336699; (replace with rgb(51, 102, 153))""")
def testCssUseMillisecondsForSmallTimes(self):
self.VerifyContentsProducesOutput("""
.transition-0s /* This is gross but may happen. */ {
transform: one 0.2s;
transform: two .1s;
transform: tree 1s;
transform: four 300ms;
}""", """
- Use milliseconds for time measurements under 1 second.
transform: one 0.2s; (replace with 200ms)
transform: two .1s; (replace with 100ms)""")
def testCssNoDataUrisInSourceFiles(self):
self.VerifyContentsProducesOutput("""
img {
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
}""", """
- Don't use data URIs in source files. Use grit instead.
background: url( data:image/jpeg,4\/\/350|\/|3|2 );""")
def testCssNoMixinShims(self):
self.VerifyContentsProducesOutput("""
:host {
--good-property: red;
--not-okay-mixin_-_not-okay-property: green;
}""", """
- Don't override custom properties created by Polymer's mixin shim. Set \
mixins or documented custom properties directly.
--not-okay-mixin_-_not-okay-property: green;""")
def testCssNoQuotesInUrl(self):
self.VerifyContentsProducesOutput("""
img {
background: url('chrome://resources/images/blah.jpg');
background: url("../../folder/hello.png");
}""", """
- Use single quotes (') instead of double quotes (") in strings.
background: url("../../folder/hello.png");
- Don't use quotes in url().
background: url('chrome://resources/images/blah.jpg');
background: url("../../folder/hello.png");""")
def testCssOneRulePerLine(self):
self.VerifyContentsProducesOutput("""
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type,
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type ~
input[type='checkbox']:not([hidden]),
div {
background: url(chrome://resources/BLAH);
rule: value; /* rule: value; */
rule: value; rule: value;
}
""", """
- One rule per line (what not to do: color: red; margin: 0;).
rule: value; rule: value;""")
def testCssOneSelectorPerLine(self):
self.VerifyContentsProducesOutput(
"""
a,
div,a,
div,/* Hello! */ span,
#id.class([dir=rtl]):not(.class):any(a, b, d),
div :is(:not(a), #b, .c) {
rule: value;
}
a,
div,a {
some-other: rule here;
}""", """
- One selector per line (what not to do: a, b {}).
div,a,
div, span,
div,a {""")
def testCssPseudoElementDoubleColon(self):
self.VerifyContentsProducesOutput("""
a:href,
br::after,
::-webkit-scrollbar-thumb,
a:not([empty]):hover:focus:active, /* shouldn't catch here and above */
abbr:after,
.tree-label:empty:after,
b:before,
:-WebKit-ScrollBar {
rule: value;
}""", """
- Pseudo-elements should use double colon (i.e. ::after).
:after (should be ::after)
:after (should be ::after)
:before (should be ::before)
:-WebKit-ScrollBar (should be ::-WebKit-ScrollBar)
""")
def testCssRgbIfNotGray(self):
self.VerifyContentsProducesOutput(
"""
#abc,
#aaa,
#aabbcc {
background: -webkit-linear-gradient(left, from(#abc), to(#def));
color: #bad;
color: #bada55;
}""", """
- Use rgb() over #hex when not a shade of gray (like #333).
background: -webkit-linear-gradient(left, from(#abc), to(#def)); """
"""(replace with rgb(170, 187, 204), rgb(221, 238, 255))
color: #bad; (replace with rgb(187, 170, 221))
color: #bada55; (replace with rgb(186, 218, 85))""")
def testPrefixedLogicalAxis(self):
self.VerifyContentsProducesOutput("""
.test {
-webkit-logical-height: 50%;
-webkit-logical-width: 50%;
-webkit-max-logical-height: 200px;
-webkit-max-logical-width: 200px;
-webkit-min-logical-height: 100px;
-webkit-min-logical-width: 100px;
}
""", """
- Unprefix logical axis property.
-webkit-logical-height: 50%; (replace with block-size)
-webkit-logical-width: 50%; (replace with inline-size)
-webkit-max-logical-height: 200px; (replace with max-block-size)
-webkit-max-logical-width: 200px; (replace with max-inline-size)
-webkit-min-logical-height: 100px; (replace with min-block-size)
-webkit-min-logical-width: 100px; (replace with min-inline-size)""")
def testPrefixedLogicalSide(self):
self.VerifyContentsProducesOutput("""
.test {
-webkit-border-after: 1px solid blue;
-webkit-border-after-color: green;
-webkit-border-after-style: dotted;
-webkit-border-after-width: 10px;
-webkit-border-before: 2px solid blue;
-webkit-border-before-color: green;
-webkit-border-before-style: dotted;
-webkit-border-before-width: 20px;
-webkit-border-end: 3px solid blue;
-webkit-border-end-color: green;
-webkit-border-end-style: dotted;
-webkit-border-end-width: 30px;
-webkit-border-start: 4px solid blue;
-webkit-border-start-color: green;
-webkit-border-start-style: dotted;
-webkit-border-start-width: 40px;
-webkit-margin-after: 1px;
-webkit-margin-after-collapse: discard;
-webkit-margin-before: 2px;
-webkit-margin-before-collapse: discard;
-webkit-margin-end: 3px;
-webkit-margin-end-collapse: discard;
-webkit-margin-start: 4px;
-webkit-margin-start-collapse: discard;
-webkit-padding-after: 1px;
-webkit-padding-before: 2px;
-webkit-padding-end: 3px;
-webkit-padding-start: 4px;
}
""", """
- Unprefix logical side property.
-webkit-border-after: 1px solid blue; (replace with border-block-end)
-webkit-border-after-color: green; (replace with border-block-end-color)
-webkit-border-after-style: dotted; (replace with border-block-end-style)
-webkit-border-after-width: 10px; (replace with border-block-end-width)
-webkit-border-before: 2px solid blue; (replace with border-block-start)
-webkit-border-before-color: green; (replace with border-block-start-color)
-webkit-border-before-style: dotted; (replace with border-block-start-style)
-webkit-border-before-width: 20px; (replace with border-block-start-width)
-webkit-border-end: 3px solid blue; (replace with border-inline-end)
-webkit-border-end-color: green; (replace with border-inline-end-color)
-webkit-border-end-style: dotted; (replace with border-inline-end-style)
-webkit-border-end-width: 30px; (replace with border-inline-end-width)
-webkit-border-start: 4px solid blue; (replace with border-inline-start)
-webkit-border-start-color: green; (replace with border-inline-start-color)
-webkit-border-start-style: dotted; (replace with border-inline-start-style)
-webkit-border-start-width: 40px; (replace with border-inline-start-width)
-webkit-margin-after: 1px; (replace with margin-block-end)
-webkit-margin-before: 2px; (replace with margin-block-start)
-webkit-margin-end: 3px; (replace with margin-inline-end)
-webkit-margin-start: 4px; (replace with margin-inline-start)
-webkit-padding-after: 1px; (replace with padding-block-end)
-webkit-padding-before: 2px; (replace with padding-block-start)
-webkit-padding-end: 3px; (replace with padding-inline-end)
-webkit-padding-start: 4px; (replace with padding-inline-start)""")
def testStartEndInsteadOfLeftRight(self):
self.VerifyContentsProducesOutput("""
.inline-node {
--var-is-ignored-left: 10px;
--var-is-ignored-right: 10px;
border-left-color: black;
border-right: 1px solid blue; /* csschecker-disable-line left-right */
margin-right: 5px;
padding-left: 10px; /* csschecker-disable-line some-other-thing */
text-align: right;
}""", """
- Use -start/end instead of -left/right (https://goo.gl/gQYY7z, add /* csschecker-disable-line left-right */ to suppress)
border-left-color: black; (replace with border-inline-start-color)
margin-right: 5px; (replace with margin-inline-end)
padding-left: 10px; (replace with padding-inline-start)
text-align: right; (replace with text-align: end)
""")
def testCssZeroWidthLengths(self):
self.VerifyContentsProducesOutput("""
@-webkit-keyframe anim {
0% { /* Ignore key frames */
width: 0px;
}
10% {
width: 10px;
}
50% { background-image: url(blah.svg); }
100% {
width: 100px;
}
}
#logo {
background-image: url(images/google_logo.png@2x);
}
body.alternate-logo #logo {
-webkit-mask-image: url(images/google_logo.png@2x);
}
/* http://crbug.com/359682 */
#spinner-container #spinner {
-webkit-animation-duration: 1.0s;
background-image: url(images/google_logo0.svg);
}
.media-button.play > .state0.active,
.media-button[state='0'] > .state0.normal /* blah */, /* blee */
.media-button[state='0']:not(.disabled):hover > .state0.hover {
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
color: hsl(0, 0%, 85%); /* Shouldn't trigger error. */
opacity: .0;
opacity: 0.0;
opacity: 0.;
}
@page {
border-width: 0mm;
height: 0cm;
width: 0in;
}""", """
- Use "0" for zero-width lengths (i.e. 0px -> 0)
width: 0px;
-webkit-transform: scale(0%);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
opacity: .0;
opacity: 0.0;
opacity: 0.;
border-width: 0mm;
height: 0cm;
width: 0in;
""")
def testInlineStyleInHtml(self):
self.VerifyContentsProducesOutput("""<!doctype html>
<html>
<head>
<!-- Don't warn about problems outside of style tags
html,
body {
margin: 0;
height: 100%;
}
-->
<style>
body {
flex-direction:column;
}
</style>
</head>
</html>""", """
- Colons (:) should have a space after them.
flex-direction:column;
""", filename='test.html')
def testInlineStyleInHtmlWithIncludes(self):
self.VerifyContentsProducesOutput("""<!doctype html>
<html>
<style include="fake-shared-css other-shared-css">
body {
flex-direction:column;
}
</style>
</head>
</html>""", """
- Colons (:) should have a space after them.
flex-direction:column;
""", filename='test.html')
def testInlineStyleInHtmlWithTagsInComments(self):
self.VerifyContentsProducesOutput("""<!doctype html>
<html>
<style>
body {
/* You better ignore the <tag> in this comment! */
flex-direction:column;
}
</style>
</head>
</html>""", """
- Colons (:) should have a space after them.
flex-direction:column;
""", filename='test.html')
def testRemoveAtBlocks(self):
self.assertEqual(self.checker.RemoveAtBlocks("""
@media (prefers-color-scheme: dark) {
.magic {
color: #000;
}
}"""), """
.magic {
color: #000;
}""")
self.assertEqual(self.checker.RemoveAtBlocks("""
@media (prefers-color-scheme: dark) {
.magic {
--mixin-definition: {
color: red;
};
}
}"""), """
.magic {
--mixin-definition: {
color: red;
};
}""")
self.assertEqual(self.checker.RemoveAtBlocks("""
@keyframes jiggle {
from { left: 0; }
50% { left: 100%; }
to { left: 10%; }
}"""), """
from { left: 0; }
50% { left: 100%; }
to { left: 10%; }""")
self.assertEqual(self.checker.RemoveAtBlocks("""
@media print {
.rule1 {
color: black;
}
.rule2 {
margin: 1in;
}
}"""), """
.rule1 {
color: black;
}
.rule2 {
margin: 1in;
}""")
self.assertEqual(self.checker.RemoveAtBlocks("""
@media (prefers-color-scheme: dark) {
.rule1 {
color: gray;
}
.rule2 {
margin: .5in;
}
@keyframe dark-fade {
0% { background: black; }
100% { background: darkgray; }
}
}"""), """
.rule1 {
color: gray;
}
.rule2 {
margin: .5in;
}
0% { background: black; }
100% { background: darkgray; }""")
self.assertEqual(self.checker.RemoveAtBlocks("""
@-webkit-keyframe anim {
0% { /* Ignore key frames */
width: 0px;
}
10% {
width: 10px;
}
50% { background-image: url(blah.svg); }
100% {
width: 100px;
}
}"""), """
0% { /* Ignore key frames */
width: 0px;
}
10% {
width: 10px;
}
50% { background-image: url(blah.svg); }
100% {
width: 100px;
}""")
if __name__ == '__main__':
unittest.main()
| 27.007418 | 121 | 0.658683 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | Alan-love/chromium | tools/web_dev_style/css_checker_test.py | 18,203 | Python |
from typing import Dict, List, Optional
from db.sql.dal.general import sanitize
from db.sql.utils import query_to_dicts
class Region:
admin: str
admin_id: str
region_type: str
country: str
country_id: str
admin1: Optional[str]
admin1_id: Optional[str]
admin2: Optional[str]
admin2_id: Optional[str]
admin3: Optional[str]
admin3_id: Optional[str]
region_coordinate: Optional[str]
alias: Optional[str]
COUNTRY = 'Q6256'
ADMIN1 = 'Q10864048'
ADMIN2 = 'Q13220204'
ADMIN3 = 'Q13221722'
def __init__(self, **kwargs):
self.admin = kwargs['admin']
self.admin_id = kwargs['admin_id']
self.region_type = kwargs['region_type']
self.country = kwargs['country']
self.country_id = kwargs['country_id']
self.admin1 = kwargs.get('admin1')
self.admin1_id = kwargs.get('admin1_id')
self.admin2 = kwargs.get('admin2')
self.admin2_id = kwargs.get('admin2_id')
self.admin3 = kwargs.get('admin3')
self.admin3_id = kwargs.get('admin3_id')
self.region_coordinate = kwargs.get('region_coordinate')
self.alias = kwargs.get('alias')
# country, admin1 and admin2 queries return both admin and country,admin1,admin2 fields.
# admin3 queries do not, so we need to feel these fields ourselves
if self.region_type == Region.ADMIN3:
self.admin3_id, self.admin_3 = self.admin_id, self.admin
def __getitem__(self, key: str) -> str:
return getattr(self, key)
def query_country_qnodes(countries: List[str]) -> Dict[str, Optional[str]]:
# Translates countries to Q-nodes. Returns a dictionary of each input country and its QNode (None if not found)
# We look for countries in a case-insensitive fashion.
if not countries:
return {}
regions = query_countries(countries)
result_dict: Dict[str, Optional[str]] = {region.country: region.country_id for region in regions}
# The result dictionary contains all the countries we have found, we need to add those we did not find
found_countries = set([country.lower() for country in result_dict.keys()])
for country in countries:
if country.lower() not in found_countries:
result_dict[country] = None
return result_dict
def list_to_where(field: str, elements: List[str], lower=False) -> Optional[str]:
if not elements:
return None
if lower:
elements = [element.lower() for element in elements]
field = f"LOWER({field})"
santized = [sanitize(element) for element in elements]
quoted = [f"'{element}'" for element in santized]
joined = ', '.join(quoted)
return f"{field} IN ({joined})"
def region_where_clause(region_field: str, region_list: List[str], region_id_field: str,
region_id_list: List[str], alias_field: Optional[str] = None) -> str:
if not region_list and not region_id_list:
return "1=1"
region_where = list_to_where(region_field, region_list, lower=True) or "0=1"
if alias_field:
alias_where = list_to_where(alias_field, region_list, lower=True) or "0=1"
else:
alias_where = "0=1"
region_id_where = list_to_where(region_id_field, region_id_list) or "0=1"
return f'({region_where} OR {region_id_where} OR {alias_where})'
def _query_regions(query: str) -> List[Region]:
dicts = query_to_dicts(query)
return [Region(**d) for d in dicts]
def query_countries(countries: List[str] = [], country_ids: List[str] = []) -> List[Region]:
""" Returns a list of countries:
If countries or country_ids are not empty, only those countries are returned (all of those in both lists)
Otherwise, all countries are returned
"""
where = region_where_clause('s_country_label.text', countries, 'e_country.node1', country_ids)
query = f'''
SELECT e_country.node1 AS admin_id,
s_country_label.text AS admin,
'Q6256' AS region_type,
e_country.node1 AS country_id,
s_country_label.text AS country,
NULL as admin1_id,
NULL as admin1,
NULL as admin2_id,
NULL as admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_country
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node1=e_country_label.node1 AND e_country_label.label='label')
WHERE e_country.label='P31' AND e_country.node2='Q6256' AND {where}
ORDER BY country
'''
return _query_regions(query)
def query_admin1s(country: Optional[str] = None, country_id: Optional[str] = None, admin1s: List[str] = [],
admin1_ids: List[str] = []) -> List[Region]:
"""
Returns a list of admin1s. If country or country_id is specified, return the admin1s only of that country.
If admin1s or admin1_ids are provided, only those admins are returned.
If all arguments are empty, all admin1s in the system are returned.
"""
if country and country_id:
raise ValueError('Only one of country, country_id may be specified')
if country_id:
country_where = f"e_country.node2='{country_id}'"
elif country: # We are certain country is not None here, but need an `elif` because mypy isn't certain
country_where = f"LOWER(s_country_label.text)='{country.lower()}'"
else:
country_where = "1=1"
admin1_where = region_where_clause('s_admin1_label.text', admin1s, 'e_admin1.node1', admin1_ids)
query = f'''
SELECT e_admin1.node1 AS admin_id,
s_admin1_label.text AS admin,
'Q10864048' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node1 as admin1_id,
s_admin1_label.text as admin1,
NULL as admin2_id,
NULL as admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_admin1
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node1=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node1 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin1.label='P31' AND e_admin1.node2='Q10864048' AND {country_where} AND {admin1_where}
ORDER BY admin1
'''
return _query_regions(query)
def query_admin2s(admin1: Optional[str] = None, admin1_id: Optional[str] = None, admin2s: List[str] = [],
admin2_ids: List[str] = []) -> List[Region]:
"""
Returns a list of admin2s. If admin1 or admin1_id is specified, return the admin2s only of that admin1.
If admin2s or admin2_ids are provided, only those admins are returned.
If all arguments are empty, all admin2s in the system are returned.
"""
if admin1 and admin1_id:
raise ValueError('Only one of admin1, admin1_id may be specified')
if admin1_id:
admin1_where = f"e_admin1.node2='{admin1_id}'"
elif admin1:
admin1_where = f"LOWER(s_admin1_label.text)=LOWER('{admin1}')"
else:
admin1_where = "1=1"
admin2_where = region_where_clause('s_admin2_label.text', admin2s, 'e_admin2.node1', admin2_ids)
query = f'''
SELECT e_admin2.node1 AS admin_id,
s_admin2_label.text AS admin,
'Q13220204' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node2 AS admin1_id,
s_admin1_label.text AS admin1,
e_admin2.node1 AS admin2_id,
s_admin2_label.text AS admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_admin2
JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id)
ON (e_admin2.node1=e_admin2_label.node1 AND e_admin2_label.label='label')
JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001')
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin2.label='P31' AND e_admin2.node2='Q13220204' AND {admin1_where} AND {admin2_where}
ORDER BY admin2
'''
return _query_regions(query)
def query_admin3s(admin2: Optional[str] = None, admin2_id: Optional[str] = None, admin3s: List[str] = [],
admin3_ids: List[str] = [], debug=False) -> List[Region]:
"""
Returns a list of admin3s. If admin2 or admin2_id is specified, return the admin3s only of that admin2.
If admin3s or admin3_ids are provided, only those admins are returned.
If all arguments are empty, all admin3s in the system are returned.
"""
if admin2 and admin2_id:
raise ValueError('Only one of admin2, admin2_id may be specified')
if admin2_id:
admin2_where = f"e_admin2.node2='{admin2_id}'"
elif admin2:
admin2_where = f"LOWER(s_admin2_label.text)=LOWER('{admin2}')"
else:
admin2_where = "1=1"
admin3_where = region_where_clause('s_admin3_label.text', admin3s, 'e_admin3.node1', admin3_ids)
query = f'''
SELECT e_admin3.node1 AS admin_id,
s_admin3_label.text AS admin,
'Q13221722' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node2 AS admin1_id,
s_admin1_label.text AS admin1,
e_admin2.node2 AS admin2_id,
s_admin2_label.text AS admin2,
e_admin2.node1 AS admin3_id,
s_admin3_label.text AS admin3
FROM
edges e_admin3
JOIN edges e_admin3_label JOIN strings s_admin3_label ON (e_admin3_label.id=s_admin3_label.edge_id)
ON (e_admin3.node1=e_admin3_label.node1 AND e_admin3_label.label='label')
JOIN edges e_admin2 ON (e_admin2.node1=e_admin3.node1 AND e_admin2.label='P2006190002')
JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id)
ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label')
JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001')
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin3.label='P31' AND e_admin3.node2='Q13221722' AND {admin2_where} AND {admin3_where}
ORDER BY admin3
'''
if debug:
print(query)
return _query_regions(query)
def query_admins(admins: List[str] = [], admin_ids: List[str] = [], debug=False) -> List[Region]:
where = region_where_clause('s_region_label.text', admins, 'e_region.node1', admin_ids, 's_region_alias.text')
query = f'''
SELECT e_region.node1 AS admin_id, s_region_label.text AS admin, e_region.node2 AS region_type,
e_country.node2 AS country_id, s_country_label.text AS country,
e_admin1.node2 AS admin1_id, s_admin1_label.text AS admin1,
e_admin2.node2 AS admin2_id, s_admin2_label.text AS admin2,
'POINT(' || c_coordinate.longitude || ' ' || c_coordinate.latitude || ')' as region_coordinate,
s_region_alias.text AS alias
FROM edges e_region
JOIN edges e_region_label ON (e_region_label.node1=e_region.node1 AND e_region_label.label='label')
JOIN strings s_region_label ON (e_region_label.id=s_region_label.edge_id)
JOIN edges e_country
JOIN edges e_country_label
JOIN strings s_country_label
ON (s_country_label.edge_id=e_country_label.id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
ON (e_region.node1=e_country.node1 AND e_country.label='P17')
LEFT JOIN edges e_admin1
JOIN edges e_admin1_label
JOIN strings s_admin1_label
ON (s_admin1_label.edge_id=e_admin1_label.id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
ON (e_region.node1=e_admin1.node1 AND e_admin1.label='P2006190001')
LEFT JOIN edges e_admin2
JOIN edges e_admin2_label
JOIN strings s_admin2_label
ON (s_admin2_label.edge_id=e_admin2_label.id)
ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label')
ON (e_region.node1=e_admin2.node1 AND e_admin2.label='P2006190002')
LEFT JOIN edges e_coordinate
JOIN coordinates c_coordinate
ON (c_coordinate.edge_id=e_coordinate.id)
ON (e_region.node1=e_coordinate.node1 AND e_coordinate.label='P625')
LEFT JOIN edges e_region_alias
JOIN strings s_region_alias
ON (s_region_alias.edge_id=e_region_alias.id)
ON (e_region.node1=e_region_alias.node1 AND e_region_alias.label='alias')
WHERE e_region.label='P31' AND e_region.node2 IN ('Q6256', 'Q10864048', 'Q13220204', 'Q13221722') AND {where}
'''
if debug:
print(query)
return _query_regions(query)
| 44.449686 | 115 | 0.678882 | [
"MIT"
] | Otamio/datamart-api | db/sql/dal/regions.py | 14,135 | Python |
# -*- coding: utf-8 -*-
import json
import csv
import scrapy
import re
from locations.items import GeojsonPointItem
COOKIES = {
"bm_sz": "04B124C1C96D68082A9F61BAAAF0B6D5~YAAQdjsvF22E8Xl6AQAACr1VfAxPEt+enarZyrOZrBaNvyuX71lK5QPuDR/FgDEWBZVMRhjiIf000W7Z1PiAjxobrz2Y5LcYMH3CvUNvpdS3MjVLUMGwMEBCf9L5nD5Gs9ho2YL8T7Tz7lYvpolvaOlJnKrHyhCFxxk/uyBZ2G/0QrGKLwSaCQShDsz7ink=",
"_abck": "440E40C406E69413DCCC08ABAA3E9022~-1~YAAQdjsvF26E8Xl6AQAACr1VfAYznoJdJhX7TNIZW1Rfh6qRhzquXg+L1TWoaL7nZUjXlNls2iPIKFQrCdrWqY/CNXW+mHyXibInMflIXJi5VVB/Swq53kABYJDuXYSlCunYvJAzMSr1q12NOYswz134Y8HRNzVWhkb2jMS5whmHxS/v0vniIvS1TQtKjEQlMGzQYmN41CmLX0JobipQhDtUB4VyNwztb2DCAZiqDX8BLwWg7h/DtPd4158qU69hNhayFTgWmD76/MiR8/T536tMmcoRyWLl4fEtP/XUmKOcksuZO7dbfNxXBffTxIXPYwf1eO77LNuZTCQq5kfsGZLJX8ODju2KSjnIF1vdnyHAe98FDIm+hw==~-1~-1~-1"
}
HEADERS = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'cache-control': 'max-age=0',
'referer': 'https://www.aldi.co.uk/store-finder',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
}
class AldiUKSpider(scrapy.Spider):
name = "aldi_uk"
item_attributes = {'brand': "Aldi"}
allowed_domains = ['aldi.co.uk']
download_delay = 0.5
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
}
def start_requests(self):
url = 'https://www.aldi.co.uk/sitemap/store-en_gb-gbp'
yield scrapy.http.FormRequest(
url=url,
method='GET',
dont_filter=True,
cookies=COOKIES,
headers=HEADERS,
callback=self.parse
)
def parse(self, response):
response.selector.remove_namespaces()
store_urls = response.xpath('//url/loc/text()').extract()
for store_url in store_urls:
yield scrapy.http.FormRequest(
url=store_url,
method='GET',
dont_filter=True,
cookies=COOKIES,
headers=HEADERS,
callback=self.parse_store
)
def parse_store(self, response):
store_js = response.xpath('//script[@type="text/javascript"]/text()').extract_first()
json_data = re.search('gtmData =(.+?);', store_js).group(1)
data = json.loads(json_data)
geojson_data = response.xpath('//script[@class="js-store-finder-initial-state"][@type="application/json"]/text()').extract_first()
geodata = json.loads(geojson_data)
properties = {
'name': data['seoData']['name'],
'ref': data['seoData']['name'],
'addr_full': data['seoData']['address']['streetAddress'],
'city': data['seoData']['address']['addressLocality'],
'postcode': data['seoData']['address']['postalCode'],
'country': data['seoData']['address']['addressCountry'],
'website': response.request.url,
'opening_hours': str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",''),
'lat': geodata['store']['latlng']['lat'],
'lon': geodata['store']['latlng']['lng'],
}
yield GeojsonPointItem(**properties)
| 42.988636 | 436 | 0.636532 | [
"MIT"
] | ChrisSoderberg/alltheplaces | locations/spiders/aldi_uk.py | 3,783 | Python |
import argparse
import logging
from pprint import pformat
from . import guide
from . import settings
log = logging.getLogger(__name__)
def cli(settingsobject=None):
parser = argparse.ArgumentParser(description='Create a CSS/LESS/SASS style guide.')
if not settingsobject:
parser.add_argument('-f', '--settingsfile',
dest='settingsfile', default='vitalstyles.json',
help='Path to settings file. Defaults to "vitalstyles.json".')
parser.add_argument('-l', '--loglevel',
dest='loglevel', default='INFO',
choices=['DEBUG', 'INFO', 'ERROR'], help='Loglevel.')
args = parser.parse_args()
loglevel = getattr(logging, args.loglevel)
logging.basicConfig(
format='[%(name)s] %(levelname)s: %(message)s',
level=loglevel
)
if loglevel > logging.DEBUG:
markdownlogger = logging.getLogger('MARKDOWN')
markdownlogger.setLevel(logging.WARNING)
if not settingsobject:
settingsobject = settings.Settings(args.settingsfile)
logging.debug('Creating vitalstyles styleguide with the following settings:\n%s',
pformat(settingsobject.settings))
guide.Guide(settingsobject).render()
if __name__ == '__main__':
cli()
| 29.880952 | 87 | 0.67012 | [
"BSD-3-Clause"
] | appressoas/vitalstyles | vitalstyles/cli.py | 1,255 | Python |
class Player():
def __init__(self):
print("PLYR FAK SUM BODIE")
| 15.4 | 35 | 0.597403 | [
"MIT"
] | reecebenson/DADSA-Tennis-PartA | .history/classes/Player_20171106170937.py | 77 | Python |
#!/usr/bin/env python
# vim: set sts=4 sw=4 et:
import time
import xmlrpc.client
from . import players
from . import rpc
from .common import GameState, CardSet, GameError, RuleError, ProtocolError, simple_decorator
from .events import EventList, CardPlayedEvent, MessageEvent, TrickPlayedEvent, TurnEvent, StateChangedEvent
@simple_decorator
def error2fault(func):
"""
Catch known exceptions and translate them to
XML-RPC faults.
"""
def catcher(*args):
try:
return func(*args)
except GameError as error:
raise xmlrpc.client.Fault(GameError.rpc_code, str(error))
except RuleError as error:
raise xmlrpc.client.Fault(RuleError.rpc_code, str(error))
except ProtocolError as error:
raise xmlrpc.client.Fault(ProtocolError.rpc_code, str(error))
return catcher
@simple_decorator
def fault2error(func):
"""
Catch known XML-RPC faults and translate them to
custom exceptions.
"""
def catcher(*args):
try:
return func(*args)
except xmlrpc.client.Fault as error:
error_classes = (GameError, RuleError, ProtocolError)
for klass in error_classes:
if error.faultCode == klass.rpc_code:
raise klass(error.faultString)
raise error
return catcher
class XMLRPCCliPlayer(players.CliPlayer):
"""
XML-RPC command line interface human player.
"""
def __init__(self, player_name):
players.CliPlayer.__init__(self, player_name)
self.game_state = GameState()
self.hand = None
def handle_event(self, event):
if isinstance(event, CardPlayedEvent):
self.card_played(event.player, event.card, event.game_state)
elif isinstance(event, MessageEvent):
self.send_message(event.sender, event.message)
elif isinstance(event, TrickPlayedEvent):
self.trick_played(event.player, event.game_state)
elif isinstance(event, TurnEvent):
self.game_state.update(event.game_state)
state = self.controller.get_state(self.id)
self.hand = state['hand']
self.game_state.update(state['game_state'])
elif isinstance(event, StateChangedEvent):
self.game_state.update(event.game_state)
else:
print("unknown event: %s" % event)
def wait_for_turn(self):
"""
Wait for this player's turn.
"""
while True:
time.sleep(0.5)
if self.controller is not None:
events = self.controller.get_events(self.id)
for event in events:
self.handle_event(event)
if self.game_state.turn_id == self.id:
break
class XMLRPCProxyController():
"""
Client-side proxy object for the server/GameController.
"""
def __init__(self, server_uri):
super(XMLRPCProxyController, self).__init__()
if not server_uri.startswith('http://') and \
not server_uri.startswith('https://'):
server_uri = 'http://' + server_uri
self.server = xmlrpc.client.ServerProxy(server_uri)
self.game_id = None
self.akey = None
@fault2error
def play_card(self, _player, card):
self.server.game.play_card(self.akey, self.game_id, rpc.rpc_encode(card))
@fault2error
def get_events(self, _player_id):
return rpc.rpc_decode(EventList, self.server.get_events(self.akey))
@fault2error
def get_state(self, _player_id):
state = self.server.game.get_state(self.akey, self.game_id)
state['game_state'] = rpc.rpc_decode(GameState, state['game_state'])
state['hand'] = rpc.rpc_decode(CardSet, state['hand'])
return state
@fault2error
def player_quit(self, _player_id):
self.server.player.quit(self.akey)
@fault2error
def register_player(self, player):
player.controller = self
plr_data = self.server.player.register(rpc.rpc_encode(player))
player.id = plr_data['id']
self.akey = plr_data['akey']
@fault2error
def start_game_with_bots(self):
return self.server.game.start_with_bots(self.akey, self.game_id)
@fault2error
def create_game(self):
self.game_id = self.server.game.create(self.akey)
return self.game_id
| 31.621429 | 108 | 0.63813 | [
"BSD-3-Clause"
] | jait/tupelo | tupelo/xmlrpc.py | 4,427 | Python |
import time
import numpy as np
import tensorflow as tf
import layers as L
import vat
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('device', '/gpu:0', "device")
tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}")
tf.app.flags.DEFINE_string('log_dir', "", "log_dir")
tf.app.flags.DEFINE_integer('seed', 1, "initial random seed")
tf.app.flags.DEFINE_bool('validation', False, "")
tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch")
tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch")
tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch")
tf.app.flags.DEFINE_integer('eval_freq', 5, "")
tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training")
tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay")
tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch")
tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate")
tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate")
tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start")
tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}")
if FLAGS.dataset == 'cifar10':
from cifar10 import inputs, unlabeled_inputs
elif FLAGS.dataset == 'svhn':
from svhn import inputs, unlabeled_inputs
else:
raise NotImplementedError
NUM_EVAL_EXAMPLES = 5000
def build_training_graph(x, y, ul_x, ul_u, lr, mom):
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=False,
)
logit = vat.forward(x)
nll_loss = L.ce_loss(logit, y)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
if FLAGS.method == 'vat':
ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit)
additional_loss = vat_loss
elif FLAGS.method == 'vatent':
ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit)
ent_loss = L.entropy_y_x(ul_logit)
additional_loss = vat_loss + ent_loss
elif FLAGS.method == 'baseline':
additional_loss = 0
else:
raise NotImplementedError
loss = nll_loss + additional_loss
opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom)
tvars = tf.trainable_variables()
grads_and_vars = opt.compute_gradients(loss, tvars)
train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
return loss, train_op, global_step, ul_u_updated
def build_eval_graph(x, y, ul_x, ul_u):
losses = {}
logit = vat.forward(x, is_training=False, update_batch_stats=False)
nll_loss = L.ce_loss(logit, y)
losses['NLL'] = nll_loss
acc = L.accuracy(logit, y)
losses['Acc'] = acc
scope = tf.get_variable_scope()
scope.reuse_variables()
# at_loss = vat.adversarial_loss(x, y, nll_loss, is_training=False)
# losses['AT_loss'] = at_loss
ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
vat_loss = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit, is_training=False)
losses['VAT_loss'] = vat_loss
return losses
def main(_):
print(FLAGS.epsilon, FLAGS.top_bn)
np.random.seed(seed=FLAGS.seed)
tf.set_random_seed(np.random.randint(1234))
with tf.Graph().as_default() as g:
with tf.device("/cpu:0"):
images, labels = inputs(batch_size=FLAGS.batch_size,
train=True,
validation=FLAGS.validation,
shuffle=True)
ul_images = tf.placeholder(shape=images.shape, dtype=tf.float32)
'''unlabeled_inputs(batch_size=FLAGS.ul_batch_size,
validation=FLAGS.validation,
shuffle=True)'''
images_eval_train, labels_eval_train = inputs(batch_size=FLAGS.eval_batch_size,
train=True,
validation=FLAGS.validation,
shuffle=True)
ul_images_eval_train = unlabeled_inputs(batch_size=FLAGS.eval_batch_size,
validation=FLAGS.validation,
shuffle=True)
images_eval_test, labels_eval_test = inputs(batch_size=FLAGS.eval_batch_size,
train=False,
validation=FLAGS.validation,
shuffle=True)
def placeholder_like(x, name=None):
return tf.placeholder(shape=x.shape, dtype=tf.float32, name=name)
def random_sphere(shape):
n = tf.random_normal(shape=shape, dtype=tf.float32)
n = tf.reshape(n, shape=(int(shape[0]), -1))
n = tf.nn.l2_normalize(n, dim=1)
n = tf.reshape(n, shape)
return n
def random_sphere_numpy(shape):
n = np.random.normal(size=shape)
proj_shape = tuple([n.shape[0]] + [1 for _ in range(len(shape) - 1)])
return n / np.linalg.norm(n.reshape((n.shape[0], -1)), axis=1).reshape(proj_shape)
print(ul_images.shape)
# ul_u = random_sphere(ul_images.shape)
# ul_u_eval_train = random_sphere(ul_images_eval_train.shape)
# ul_u_eval_test = random_sphere(images_eval_test.shape)
ul_u = placeholder_like(ul_images, "ul_u")
ul_u_eval_train = placeholder_like(ul_images_eval_train, "ul_u_eval_train")
ul_u_eval_test = placeholder_like(images_eval_test, "ul_u_eval_test")
with tf.device(FLAGS.device):
lr = tf.placeholder(tf.float32, shape=[], name="learning_rate")
mom = tf.placeholder(tf.float32, shape=[], name="momentum")
with tf.variable_scope("CNN") as scope:
# Build training graph
loss, train_op, global_step, ul_u_updated = build_training_graph(
images, labels, ul_images, ul_u, lr, mom)
scope.reuse_variables()
# Build eval graph
losses_eval_train = build_eval_graph(images_eval_train, labels_eval_train, ul_images_eval_train, ul_u_eval_train)
losses_eval_test = build_eval_graph(images_eval_test, labels_eval_test, images_eval_test, ul_u_eval_test)
init_op = tf.global_variables_initializer()
if not FLAGS.log_dir:
logdir = None
writer_train = None
writer_test = None
else:
logdir = FLAGS.log_dir
writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g)
writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g)
saver = tf.train.Saver(tf.global_variables())
sv = tf.train.Supervisor(
is_chief=True,
logdir=logdir,
init_op=init_op,
init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1},
saver=saver,
global_step=global_step,
summary_op=None,
summary_writer=None,
save_model_secs=150, recovery_wait_secs=0)
ul_images_np = np.load("train_images.npy").reshape((-1, 32, 32, 3))
print("TRUNCATING UL DATA")
ul_images_np = ul_images_np[:FLAGS.batch_size]
ul_u_np = random_sphere_numpy(ul_images_np.shape)
print(ul_images_np.shape, ul_u_np.shape)
print("Training...")
with sv.managed_session() as sess:
for ep in range(FLAGS.num_epochs):
if sv.should_stop():
break
if ep < FLAGS.epoch_decay_start:
feed_dict = {lr: FLAGS.learning_rate, mom: FLAGS.mom1}
else:
decayed_lr = ((FLAGS.num_epochs - ep) / float(
FLAGS.num_epochs - FLAGS.epoch_decay_start)) * FLAGS.learning_rate
feed_dict = {lr: decayed_lr, mom: FLAGS.mom2}
sum_loss = 0
start = time.time()
for i in range(FLAGS.num_iter_per_epoch):
picked = range(FLAGS.batch_size) # np.random.choice(len(ul_images_np), size=FLAGS.batch_size, replace=False)
feed_dict[ul_images] = ul_images_np[picked]
feed_dict[ul_u] = ul_u_np[picked]
ul_u_updated_np, _, batch_loss, _ = sess.run([ul_u_updated, train_op, loss, global_step],
feed_dict=feed_dict)
delta = ul_u_updated_np - ul_u_np[picked]
# print("pos", ul_u_updated_np.reshape((FLAGS.batch_size, -1))[0, :4])
# print("delta", np.linalg.norm(delta.reshape((FLAGS.batch_size, -1)), axis=1)[:4])
print(np.linalg.norm(ul_u_updated_np - ul_u_np[picked]), ul_u_updated_np.reshape((FLAGS.batch_size, -1))[0, :3])
ul_u_np[picked] = ul_u_updated_np
sum_loss += batch_loss
end = time.time()
print("Epoch:", ep, "CE_loss_train:", sum_loss / FLAGS.num_iter_per_epoch, "elapsed_time:", end - start)
if (ep + 1) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs:
# Eval on training data
act_values_dict = {}
feed_dict = {ul_u_eval_train: random_sphere_numpy(ul_u_eval_train.shape)}
for key, _ in losses_eval_train.iteritems():
act_values_dict[key] = 0
n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size
for i in range(n_iter_per_epoch):
values = losses_eval_train.values()
act_values = sess.run(values, feed_dict=feed_dict)
for key, value in zip(act_values_dict.keys(), act_values):
act_values_dict[key] += value
summary = tf.Summary()
current_global_step = sess.run(global_step)
for key, value in act_values_dict.iteritems():
print("train-" + key, value / n_iter_per_epoch)
summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)
if writer_train is not None:
writer_train.add_summary(summary, current_global_step)
# Eval on test data
act_values_dict = {}
print("HOW COME THIS DOES NOT DEPEND ON ul_images_eval_train? SOMETHING'S WRONG HERE.")
feed_dict = {ul_u_eval_test: random_sphere_numpy(ul_u_eval_test.shape)}
for key, _ in losses_eval_test.iteritems():
act_values_dict[key] = 0
n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size
for i in range(n_iter_per_epoch):
values = losses_eval_test.values()
act_values = sess.run(values, feed_dict=feed_dict)
for key, value in zip(act_values_dict.keys(), act_values):
act_values_dict[key] += value
summary = tf.Summary()
current_global_step = sess.run(global_step)
for key, value in act_values_dict.iteritems():
print("test-" + key, value / n_iter_per_epoch)
summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)
if writer_test is not None:
writer_test.add_summary(summary, current_global_step)
saver.save(sess, sv.save_path, global_step=global_step)
sv.stop()
if __name__ == "__main__":
tf.app.run()
| 47.778626 | 132 | 0.582202 | [
"MIT"
] | danielvarga/vat_tf | train_semisup.py | 12,518 | Python |
"""
File: Milestone1.py
Name: 黃科諺
-----------------------
This file tests the milestone 1 for
our babyname.py project
"""
import sys
def add_data_for_name(name_data, year, rank, name):
name_info = {year: rank}
if name in name_data:
if year in name_data[name]:
exist_rank = int(name_data[name][year])
if int(rank) < exist_rank:
name_data[name][year] = rank
else:
name_data[name][year] = rank
else:
name_data[name] = name_info
# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #
def test1():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '200'}, 'Kate': {'2000': '100'}}
add_data_for_name(name_data, '1990', '900', 'Sammy')
add_data_for_name(name_data, '2010', '400', 'Kylie')
add_data_for_name(name_data, '2000', '20', 'Kate')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
| 31.307692 | 107 | 0.47502 | [
"MIT"
] | kenhuang1204/stanCode_projects | stanCode_projects/name_searching_system/milestone1.py | 2,448 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.utilities import await_callback
from compas_fab.backends.interfaces import AddCollisionMesh
from compas_fab.backends.ros.messages import ApplyPlanningSceneRequest
from compas_fab.backends.ros.messages import ApplyPlanningSceneResponse
from compas_fab.backends.ros.messages import CollisionObject
from compas_fab.backends.ros.messages import PlanningScene
from compas_fab.backends.ros.messages import PlanningSceneWorld
from compas_fab.backends.ros.service_description import ServiceDescription
__all__ = [
'MoveItAddCollisionMesh',
]
class MoveItAddCollisionMesh(AddCollisionMesh):
"""Callable to add a collision mesh to the planning scene."""
APPLY_PLANNING_SCENE = ServiceDescription('/apply_planning_scene',
'ApplyPlanningScene',
ApplyPlanningSceneRequest,
ApplyPlanningSceneResponse,
)
def __init__(self, ros_client):
self.ros_client = ros_client
def add_collision_mesh(self, collision_mesh, options=None):
"""Add a collision mesh to the planning scene.
Parameters
----------
collision_mesh : :class:`compas_fab.robots.CollisionMesh`
Object containing the collision mesh to be added.
options : dict, optional
Unused parameter.
Returns
-------
``None``
"""
kwargs = {}
kwargs['collision_mesh'] = collision_mesh
kwargs['errback_name'] = 'errback'
return await_callback(self.add_collision_mesh_async, **kwargs)
def add_collision_mesh_async(self, callback, errback, collision_mesh):
co = CollisionObject.from_collision_mesh(collision_mesh)
co.operation = CollisionObject.ADD
world = PlanningSceneWorld(collision_objects=[co])
scene = PlanningScene(world=world, is_diff=True)
request = scene.to_request(self.ros_client.ros_distro)
self.APPLY_PLANNING_SCENE(self.ros_client, request, callback, errback)
| 38.362069 | 78 | 0.681798 | [
"MIT"
] | gramaziokohler/compas_fab | src/compas_fab/backends/ros/backend_features/move_it_add_collision_mesh.py | 2,225 | Python |
"""
libquantum example 3: 03_sweep_linear.py
Construct classic linear chirp and illustrate CWT and STFT TRFs.
"""
import os
from pathlib import Path
import numpy as np
import scipy.io.wavfile
import matplotlib.pyplot as plt
from libquantum import atoms, entropy, scales, spectra, utils, synthetics
import libquantum.plot_templates.plot_time_frequency_reps as pltq
if __name__ == "__main__":
"""
Exercises with classic linear sweep
Option of exporting to wav
"""
# Do you want to export a wav file? True or False
do_save_wave = False
# If True, saves to home directory
home_dir: str = str(Path.home())
# Or can specify a preferred wav file directory
# home_dir: str = "/Users/mgarces/Documents/DATA_API_M/synthetics"
output_wav_directory = os.path.join(home_dir, "wav")
EVENT_NAME = "redshift_linear_sweep"
print("Event Name: " + EVENT_NAME)
wav_filename = EVENT_NAME
order_number_input = 3
station_id_str = 'Synth'
run_time_epoch_s = utils.datetime_now_epoch_s()
# Chirp type
is_redshift = True
sig_wf_sample_rate_hz = 8000.
sig_frequency_hz_start = 40.
sig_frequency_hz_end = 400.
sig_duration_s = 13.19675
head_s = 0.5
# sig_wf_sample_rate_hz = 8000.
# sig_frequency_hz_start = 40.
# sig_frequency_hz_end = 400.
# sig_duration_s = 13.19675
# head_s = 0.5
# Blueshift sweep
sig_wf_blu, sig_wf_epoch_s = synthetics.chirp_linear_in_noise(snr_bits=12.,
sample_rate_hz=sig_wf_sample_rate_hz,
duration_s=sig_duration_s,
frequency_start_hz=sig_frequency_hz_start,
frequency_end_hz=sig_frequency_hz_end,
intro_s=head_s,
outro_s=head_s)
sig_wf_red = np.flipud(sig_wf_blu)
# Choose origin and red/blue shift
sig_wf_epoch_s += run_time_epoch_s
sig_wf = np.copy(sig_wf_red)
# Antialias filter synthetic
synthetics.antialias_halfNyquist(synth=sig_wf)
# Export to wav directory
if do_save_wave:
wav_sample_rate_hz = 8000.
export_filename = os.path.join(output_wav_directory, wav_filename + "_8kz.wav")
synth_wav = 0.9 * np.real(sig_wf) / np.max(np.abs((np.real(sig_wf))))
scipy.io.wavfile.write(export_filename, int(wav_sample_rate_hz), synth_wav)
# Frame to mic start and end and plot
event_reference_time_epoch_s = sig_wf_epoch_s[0]
max_time_s, min_frequency_hz = scales.from_duration(band_order_Nth=order_number_input,
sig_duration_s=sig_duration_s)
print('\nRequest Order N=', order_number_input)
print('Lowest frequency in hz that can support this order for this signal duration is ', min_frequency_hz)
print('Scale with signal duration and to Nyquist, default G2 base re F1')
# Select plot frequencies
fmin = np.ceil(min_frequency_hz)
fmax = sig_wf_sample_rate_hz/2.
# TFR SECTION
# Compute complex wavelet transform (cwt) from signal duration
if is_redshift:
mic_cwt, mic_cwt_bits, mic_cwt_time_s, mic_cwt_frequency_hz = \
atoms.cwt_chirp_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input,
dictionary_type="tone",
index_shift=-1)
else:
mic_cwt, mic_cwt_bits, mic_cwt_time_s, mic_cwt_frequency_hz = \
atoms.cwt_chirp_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input,
dictionary_type="tone")
mic_cwt_snr, mic_cwt_snr_bits, mic_cwt_snr_entropy = entropy.snr_mean_max(tfr_coeff_complex=mic_cwt)
pltq.plot_wf_mesh_mesh_vert(redvox_id=station_id_str,
wf_panel_2_sig=sig_wf,
wf_panel_2_time=sig_wf_epoch_s,
mesh_time=mic_cwt_time_s,
mesh_frequency=mic_cwt_frequency_hz,
mesh_panel_1_trf=mic_cwt_bits,
mesh_panel_1_colormap_scaling="range",
mesh_panel_0_tfr=mic_cwt_snr_entropy,
wf_panel_2_units="Norm",
mesh_panel_1_cbar_units="bits",
mesh_panel_0_cbar_units="eSNR bits",
start_time_epoch=event_reference_time_epoch_s,
figure_title="CWT for " + EVENT_NAME,
frequency_hz_ymin=fmin,
frequency_hz_ymax=fmax)
# Compute short term Fourier transform (STFT) from segmented signal duration
mic_stft, mic_stft_bits, mic_stft_time_s, mic_stft_frequency_hz = \
spectra.stft_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input)
mic_stft_snr, mic_stft_snr_bits, mic_stft_snr_entropy = entropy.snr_mean_max(tfr_coeff_complex=mic_stft)
# Log frequency is the default, for linear use frequency_scaling="linear",
pltq.plot_wf_mesh_mesh_vert(frequency_scaling="log",
redvox_id=station_id_str,
wf_panel_2_sig=sig_wf,
wf_panel_2_time=sig_wf_epoch_s,
mesh_time=mic_stft_time_s,
mesh_frequency=mic_stft_frequency_hz,
mesh_panel_1_trf=mic_stft_bits,
mesh_panel_1_colormap_scaling="range",
mesh_panel_0_tfr=mic_stft_snr_entropy,
wf_panel_2_units="Norm",
mesh_panel_1_cbar_units="bits",
mesh_panel_0_cbar_units="eSNR bits",
figure_title="STFT for " + EVENT_NAME,
frequency_hz_ymin=fmin,
frequency_hz_ymax=fmax)
plt.show()
| 44.13245 | 110 | 0.579232 | [
"Apache-2.0"
] | RedVoxInc/libquantum | examples/03_sweep_linear.py | 6,664 | Python |
import hashlib
from fastecdsa import keys, curve, ecdsa
from hashlib import sha256
from uuid import uuid4
class Transaction:
def __init__(self, from_address, to_address, amount):
self.from_address = from_address
self.to_address = to_address
self.amount = amount
self.id = str(uuid4()).replace('-', '')
self.signature = None
def calculate_hash(self):
return sha256((str(self.from_address) + str(self.to_address) + str(self.amount) + self.id).encode()).hexdigest()
def sign_tx(self, priv_key):
hash_tx = self.calculate_hash()
self.signature = ecdsa.sign(hash_tx, priv_key, hashfunc=sha256)
def is_valid(self):
if self.signature is None:
return True
if len(self.signature) == 0 and self.to_address is None:
return False
hash_tx = self.calculate_hash()
pubkey = keys.get_public_keys_from_sig(self.signature, hash_tx, curve=curve.P256, hashfunc=sha256)
valid = ecdsa.verify(self.signature, hash_tx, pubkey[0], hashfunc=sha256)
return valid
def serialize(self):
return {
'id': self.id,
'from_address': self.from_address,
'to_address': self.to_address,
'amount': self.amount
}
| 30.928571 | 120 | 0.635874 | [
"MIT"
] | ephremdeme/voting | blockchain/Transaction.py | 1,299 | Python |
# -*- coding: utf-8 -*-
from app.libs.utils import data_decode
import socket, socketserver, threading
import traceback
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
ip = ""
port = 0
timeOut = 100
def __init__(self, request, client_address, server):
from app.service.device import Device
self.socket = None
self.addr = None
self.cloud_id = None
self.device = Device()
self.sign = None
self.device_id = None
self.timestamp = None
super().__init__(request, client_address, server)
def setup(self):
self.ip = self.client_address[0].strip()
self.port = self.client_address[1]
self.request.settimeout(self.timeOut)
self.addr = self.ip + str(self.port)
self.socket = self.request
print(self.ip)
def handle(self):
try:
while True:
try:
# time.sleep(1)
data = self.request.recv(1024)
except socket.timeout:
print(self.ip + ":" + str(self.port) + "接收超时")
break
if data:
data = data_decode(data)
self.device.parse_data(data, self)
else:
break
except Exception as e:
with open("err_log.log", "a+") as f:
f.write(traceback.format_exc()+'\r\r')
print(self.client_address, "连接断开")
finally:
self.request.close()
def finish(self):
if self.cloud_id is None:
print(self.ip + ":" + str(self.port) + "断开连接!")
else:
get_instance().remove_client(self.cloud_id)
print(self.ip + ":" + str(self.port) + self.cloud_id + "断开连接!")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class TCPServer:
instance = None
@staticmethod
def get_instance():
print("start")
if TCPServer.instance is None:
TCPServer.instance = TCPServer()
return TCPServer.instance
def __init__(self):
self.clients = {}
self.server = None
try:
self.server = ThreadedTCPServer(("0.0.0.0", 5002), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
# server_thread.join()
except (KeyboardInterrupt, SystemExit, Exception) as e:
print(e)
print("end")
self.server.shutdown()
self.server.close()
def add_client(self, cloud, sock):
self.clients[cloud] = sock
print("this is clients", self.clients)
def remove_client(self, cloud):
if cloud in self.clients:
print("删除设备" + cloud)
from app.service.device import Device
Device.offline_alarm(self.clients[cloud])
self.clients.pop(cloud)
def get_instance():
return TCPServer.get_instance()
| 30.126214 | 89 | 0.565259 | [
"Apache-2.0"
] | mohansd/cyx-xElec-server | app/service/socketservice.py | 3,147 | Python |
from django.conf.urls import patterns, url
from django.contrib.contenttypes.models import ContentType
from kitsune.questions.feeds import (
QuestionsFeed, AnswersFeed, TaggedQuestionsFeed)
from kitsune.questions.models import Question, Answer
from kitsune.flagit import views as flagit_views
urlpatterns = patterns(
'kitsune.questions.views',
url(r'^$', 'product_list', name='questions.home'),
url(r'^/answer-preview-async$', 'answer_preview_async',
name='questions.answer_preview_async'),
url(r'^/dashboard/metrics$', 'metrics', name='questions.metrics'),
url(r'^/dashboard/metrics/(?P<locale_code>[^/]+)$', 'metrics',
name='questions.locale_metrics'),
# AAQ
url(r'^/new$', 'aaq', name='questions.aaq_step1'),
url(r'^/new/confirm$', 'aaq_confirm', name='questions.aaq_confirm'),
url(r'^/new/(?P<product_key>[\w\-]+)$',
'aaq_step2', name='questions.aaq_step2'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)$',
'aaq_step3', name='questions.aaq_step3'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/search$',
'aaq_step4', name='questions.aaq_step4'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/form$',
'aaq_step5', name='questions.aaq_step5'),
# AAQ flow for Marketplace
url(r'^/marketplace$', 'marketplace', name='questions.marketplace_aaq'),
url(r'^/marketplace/success$',
'marketplace_success', name='questions.marketplace_aaq_success'),
url(r'^/marketplace/refund$', 'marketplace_refund',
name='questions.marketplace_refund'),
url(r'^/marketplace/developer-request$', 'marketplace_developer_request',
name='questions.marketplace_developer_request'),
url(r'^/marketplace/(?P<category_slug>[\w\-]+)$',
'marketplace_category', name='questions.marketplace_aaq_category'),
# TODO: Factor out `/(?P<question_id>\d+)` below
url(r'^/(?P<question_id>\d+)$', 'question_details',
name='questions.details'),
url(r'^/(?P<question_id>\d+)/edit$',
'edit_question', name='questions.edit_question'),
url(r'^/(?P<question_id>\d+)/edit-details$',
'edit_details', name='questions.edit_details'),
url(r'^/(?P<question_id>\d+)/reply$', 'reply', name='questions.reply'),
url(r'^/(?P<question_id>\d+)/delete$', 'delete_question',
name='questions.delete'),
url(r'^/(?P<question_id>\d+)/lock$', 'lock_question',
name='questions.lock'),
url(r'^/(?P<question_id>\d+)/archive$', 'archive_question',
name='questions.archive'),
url(r'^/(?P<question_id>\d+)/delete/(?P<answer_id>\d+)$',
'delete_answer', name='questions.delete_answer'),
url(r'^/(?P<question_id>\d+)/edit/(?P<answer_id>\d+)$', 'edit_answer',
name='questions.edit_answer'),
url(r'^/(?P<question_id>\d+)/solve/(?P<answer_id>\d+)$', 'solve',
name='questions.solve'),
url(r'^/(?P<question_id>\d+)/unsolve/(?P<answer_id>\d+)$', 'unsolve',
name='questions.unsolve'),
url(r'^/(?P<question_id>\d+)/vote$', 'question_vote',
name='questions.vote'),
url(r'^/(?P<question_id>\d+)/vote/(?P<answer_id>\d+)$',
'answer_vote', name='questions.answer_vote'),
url(r'^/(?P<question_id>\d+)/add-tag$', 'add_tag',
name='questions.add_tag'),
url(r'^/(?P<question_id>\d+)/remove-tag$', 'remove_tag',
name='questions.remove_tag'),
url(r'^/(?P<question_id>\d+)/add-tag-async$', 'add_tag_async',
name='questions.add_tag_async'),
url(r'^/(?P<question_id>\d+)/remove-tag-async$', 'remove_tag_async',
name='questions.remove_tag_async'),
# Feeds
# Note: this needs to be above questions.list because "feed"
# matches the product slug regex.
url(r'^/feed$', QuestionsFeed(), name='questions.feed'),
url(r'^/(?P<question_id>\d+)/feed$', AnswersFeed(),
name='questions.answers.feed'),
url(r'^/tagged/(?P<tag_slug>[\w\-]+)/feed$', TaggedQuestionsFeed(),
name='questions.tagged_feed'),
# Mark as spam
url(r'^/mark_spam$', 'mark_spam', name='questions.mark_spam'),
url(r'^/unmark_spam$', 'unmark_spam', name='questions.unmark_spam'),
# Question lists
url(r'^/(?P<product_slug>[\w+\-\,]+)$', 'question_list',
name='questions.list'),
# Flag content ("Report this post")
url(r'^/(?P<object_id>\d+)/flag$', flagit_views.flag,
{'content_type': ContentType.objects.get_for_model(Question).id},
name='questions.flag'),
url(r'^/(?P<question_id>\d+)/flag/(?P<object_id>\d+)$', flagit_views.flag,
{'content_type': ContentType.objects.get_for_model(Answer).id},
name='questions.answer_flag'),
# Subcribe by email
url(r'^/(?P<question_id>\d+)/watch$', 'watch_question',
name='questions.watch'),
url(r'^/(?P<question_id>\d+)/unwatch$', 'unwatch_question',
name='questions.unwatch'),
url(r'^/confirm/(?P<watch_id>\d+)/(?P<secret>\w+)$', 'activate_watch',
name='questions.activate_watch'),
url(r'^/unsubscribe/(?P<watch_id>\d+)/(?P<secret>\w+)$',
'unsubscribe_watch', name='questions.unsubscribe'),
)
| 45.778761 | 78 | 0.626329 | [
"BSD-3-Clause"
] | 983834572/kitsune | kitsune/questions/urls.py | 5,173 | Python |
# -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
SEMLikeCommandLine,
TraitedSpec,
File,
Directory,
traits,
isdefined,
InputMultiPath,
OutputMultiPath,
)
import os
class ExtractSkeletonInputSpec(CommandLineInputSpec):
InputImageFileName = File(position=-2, desc="Input image", exists=True, argstr="%s")
OutputImageFileName = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Skeleton of the input image",
argstr="%s",
)
type = traits.Enum(
"1D", "2D", desc="Type of skeleton to create", argstr="--type %s"
)
dontPrune = traits.Bool(
desc="Return the full skeleton, not just the maximal skeleton",
argstr="--dontPrune ",
)
numPoints = traits.Int(
desc="Number of points used to represent the skeleton", argstr="--numPoints %d"
)
pointsFile = traits.Str(
desc="Name of the file to store the coordinates of the central (1D) skeleton points",
argstr="--pointsFile %s",
)
class ExtractSkeletonOutputSpec(TraitedSpec):
OutputImageFileName = File(
position=-1, desc="Skeleton of the input image", exists=True
)
class ExtractSkeleton(SEMLikeCommandLine):
"""title: Extract Skeleton
category: Filtering
description: Extract the skeleton of a binary object. The skeleton can be limited to being a 1D curve or allowed to be a full 2D manifold. The branches of the skeleton can be pruned so that only the maximal center skeleton is returned.
version: 0.1.0.$Revision: 2104 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExtractSkeleton
contributor: Pierre Seroul (UNC), Martin Styner (UNC), Guido Gerig (UNC), Stephen Aylward (Kitware)
acknowledgements: The original implementation of this method was provided by ETH Zurich, Image Analysis Laboratory of Profs Olaf Kuebler, Gabor Szekely and Guido Gerig. Martin Styner at UNC, Chapel Hill made enhancements. Wrapping for Slicer was provided by Pierre Seroul and Stephen Aylward at Kitware, Inc.
"""
input_spec = ExtractSkeletonInputSpec
output_spec = ExtractSkeletonOutputSpec
_cmd = "ExtractSkeleton "
_outputs_filenames = {"OutputImageFileName": "OutputImageFileName.nii"}
| 33.986486 | 310 | 0.702584 | [
"Apache-2.0"
] | AnnaD15/nipype | nipype/interfaces/slicer/filtering/extractskeleton.py | 2,515 | Python |
import subprocess
import time
import os
TEST_TYPE = os.getenv("TEST_TYPE", "bdd")
def before_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = subprocess.Popen(["make", "start"])
time.sleep(4)
context.proc = proc
context.root_url = "http://localhost:5000"
else:
context.root_url = os.getenv("ROOT_ENDPOINT")
def after_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = context.proc
proc.terminate()
| 21.73913 | 53 | 0.622 | [
"MIT"
] | abhisheksr01/zero-2-hero-python-flask-microservice | features/environment.py | 500 | Python |
class Solution:
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
# max value taken as amount+1 because in worst case, it can be amount - when denoms of only 1
res = [amount+1]*(amount+1)
res[0] = 0
for i in range(1, amount+1):
for j in coins:
if j <= i:
res[i] = min(res[i], res[i-j] + 1)
if res[amount] > amount:
return -1
else:
return res[amount]
| 27.238095 | 101 | 0.461538 | [
"MIT"
] | vedantc6/LCode | Session1_2018/coinChange.py | 572 | Python |
import xml.etree.ElementTree as ET
from .exceptions import UnpopulatedPropertyError
from .property_decorators import property_not_nullable, property_is_boolean
from .tag_item import TagItem
from ..datetime_helpers import parse_datetime
import copy
class FlowItem(object):
def __init__(self, project_id, name=None):
self._webpage_url = None
self._created_at = None
self._id = None
self._initial_tags = set()
self._project_name = None
self._updated_at = None
self.name = name
self.owner_id = None
self.project_id = project_id
self.tags = set()
self.description = None
self._connections = None
self._permissions = None
@property
def connections(self):
if self._connections is None:
error = 'Flow item must be populated with connections first.'
raise UnpopulatedPropertyError(error)
return self._connections()
@property
def permissions(self):
if self._permissions is None:
error = "Project item must be populated with permissions first."
raise UnpopulatedPropertyError(error)
return self._permissions()
@property
def webpage_url(self):
return self._webpage_url
@property
def created_at(self):
return self._created_at
@property
def id(self):
return self._id
@property
def project_id(self):
return self._project_id
@project_id.setter
@property_not_nullable
def project_id(self, value):
self._project_id = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def project_name(self):
return self._project_name
@property
def flow_type(self):
return self._flow_type
@property
def updated_at(self):
return self._updated_at
def _set_connections(self, connections):
self._connections = connections
def _set_permissions(self, permissions):
self._permissions = permissions
def _parse_common_elements(self, flow_xml, ns):
if not isinstance(flow_xml, ET.Element):
flow_xml = ET.fromstring(flow_xml).find('.//t:flow', namespaces=ns)
if flow_xml is not None:
(_, _, _, _, _, updated_at, _, project_id, project_name, owner_id) = self._parse_element(flow_xml, ns)
self._set_values(None, None, None, None, None, updated_at, None, project_id,
project_name, owner_id)
return self
def _set_values(self, id, name, description, webpage_url, created_at,
updated_at, tags, project_id, project_name, owner_id):
if id is not None:
self._id = id
if name:
self.name = name
if description:
self.description = description
if webpage_url:
self._webpage_url = webpage_url
if created_at:
self._created_at = created_at
if updated_at:
self._updated_at = updated_at
if tags:
self.tags = tags
self._initial_tags = copy.copy(tags)
if project_id:
self.project_id = project_id
if project_name:
self._project_name = project_name
if owner_id:
self.owner_id = owner_id
@classmethod
def from_response(cls, resp, ns):
all_flow_items = list()
parsed_response = ET.fromstring(resp)
all_flow_xml = parsed_response.findall('.//t:flow', namespaces=ns)
for flow_xml in all_flow_xml:
(id_, name, description, webpage_url, created_at, updated_at,
tags, project_id, project_name, owner_id) = cls._parse_element(flow_xml, ns)
flow_item = cls(project_id)
flow_item._set_values(id_, name, description, webpage_url, created_at, updated_at,
tags, None, project_name, owner_id)
all_flow_items.append(flow_item)
return all_flow_items
@staticmethod
def _parse_element(flow_xml, ns):
id_ = flow_xml.get('id', None)
name = flow_xml.get('name', None)
description = flow_xml.get('description', None)
webpage_url = flow_xml.get('webpageUrl', None)
created_at = parse_datetime(flow_xml.get('createdAt', None))
updated_at = parse_datetime(flow_xml.get('updatedAt', None))
tags = None
tags_elem = flow_xml.find('.//t:tags', namespaces=ns)
if tags_elem is not None:
tags = TagItem.from_xml_element(tags_elem, ns)
project_id = None
project_name = None
project_elem = flow_xml.find('.//t:project', namespaces=ns)
if project_elem is not None:
project_id = project_elem.get('id', None)
project_name = project_elem.get('name', None)
owner_id = None
owner_elem = flow_xml.find('.//t:owner', namespaces=ns)
if owner_elem is not None:
owner_id = owner_elem.get('id', None)
return (id_, name, description, webpage_url, created_at, updated_at, tags, project_id,
project_name, owner_id)
| 32.484663 | 114 | 0.628706 | [
"CC0-1.0",
"MIT"
] | BenevolentHighPriestess/server-client-python | tableauserverclient/models/flow_item.py | 5,295 | Python |
from interpolate import interpolate_doc
foo = """
hello
world
"""
bar = "foo bar\nbaz"
class Foo:
# cf matplotlib's kwdoc.
__kw__ = "the kw of foo"
@interpolate_doc
def func():
"""
this is a docstring
{interpolate_example.foo}
{bar}
{Foo!K}
"""
try:
@interpolate_doc
def bad_doc():
"""
fields {must} be preceded by whitespace
"""
except ValueError:
print("error correctly caught")
| 12.756757 | 47 | 0.576271 | [
"MIT"
] | anntzer/structured-docstrings | interpolate_example.py | 472 | Python |
import discord
from discord.ext import commands
# Set slash commands=True when constructing your bot to enable all slash commands
# if your bot is only for a couple of servers, you can use the parameter
# `slash_command_guilds=[list, of, guild, ids]` to specify this,
# then the commands will be much faster to upload.
bot = commands.Bot("!", intents=discord.Intents(guilds=True, messages=True), slash_commands=True)
@bot.event
async def on_ready():
print(f"Logged in as {bot.user} (ID: {bot.user.id})")
print("------")
@bot.command()
# You can use commands.Option to define descriptions for your options, and converters will still work fine.
async def ping(
ctx: commands.Context, emoji: bool = commands.Option(description="whether to use an emoji when responding")
):
# This command can be used with slash commands or message commands
if emoji:
await ctx.send("\U0001f3d3")
else:
await ctx.send("Pong!")
@bot.command(message_command=False)
async def only_slash(ctx: commands.Context):
# This command can only be used with slash commands
await ctx.send("Hello from slash commands!")
@bot.command(slash_command=False)
async def only_message(ctx: commands.Context):
# This command can only be used with a message
await ctx.send("Hello from message commands!")
bot.run("token")
| 31.97619 | 111 | 0.724497 | [
"MIT"
] | Astrea49/enhanced-discord.py | examples/slash_commands.py | 1,343 | Python |
from io import BytesIO
from typing import Tuple, Sequence
import attr
from PIL import Image
from ebl.changelog import Changelog
from ebl.ebl_ai_client import EblAiClient
from ebl.files.application.file_repository import FileRepository
from ebl.fragmentarium.application.annotations_repository import AnnotationsRepository
from ebl.fragmentarium.application.annotations_schema import AnnotationsSchema
from ebl.fragmentarium.application.cropped_sign_image import CroppedSign
from ebl.fragmentarium.application.cropped_sign_images_repository import (
CroppedSignImage,
CroppedSignImagesRepository,
)
from ebl.fragmentarium.application.fragment_repository import FragmentRepository
from ebl.fragmentarium.domain.annotation import (
Annotations,
AnnotationValueType,
)
from ebl.transliteration.domain.line_label import LineLabel
from ebl.transliteration.domain.museum_number import MuseumNumber
from ebl.users.domain.user import User
@attr.attrs(auto_attribs=True, frozen=True)
class AnnotationsService:
_ebl_ai_client: EblAiClient
_annotations_repository: AnnotationsRepository
_photo_repository: FileRepository
_changelog: Changelog
_fragments_repository: FragmentRepository
_photos_repository: FileRepository
_cropped_sign_images_repository: CroppedSignImagesRepository
def generate_annotations(
self, number: MuseumNumber, threshold: float = 0.3
) -> Annotations:
fragment_image = self._photo_repository.query_by_file_name(f"{number}.jpg")
return self._ebl_ai_client.generate_annotations(
number, fragment_image, threshold
)
def find(self, number: MuseumNumber) -> Annotations:
return self._annotations_repository.query_by_museum_number(number)
def _label_by_line_number(
self, line_number_to_match: int, labels: Sequence[LineLabel]
) -> str:
matching_label = None
for label in labels:
label_line_number = label.line_number
if label_line_number and label_line_number.is_matching_number(
line_number_to_match
):
matching_label = label
return matching_label.formatted_label if matching_label else ""
def _cropped_image_from_annotations_helper(
self,
annotations: Annotations,
image: Image.Image,
script: str,
labels: Sequence[LineLabel],
) -> Tuple[Annotations, Sequence[CroppedSignImage]]:
cropped_sign_images = []
updated_cropped_annotations = []
for annotation in annotations.annotations:
label = (
self._label_by_line_number(annotation.data.path[0], labels)
if annotation.data.type != AnnotationValueType.BLANK
else ""
)
cropped_image = annotation.crop_image(image)
cropped_sign_image = CroppedSignImage.create(cropped_image)
cropped_sign_images.append(cropped_sign_image)
updated_cropped_annotation = attr.evolve(
annotation,
cropped_sign=CroppedSign(
cropped_sign_image.image_id,
script,
label,
),
)
updated_cropped_annotations.append(updated_cropped_annotation)
return (
attr.evolve(annotations, annotations=updated_cropped_annotations),
cropped_sign_images,
)
def _cropped_image_from_annotations(
self, annotations: Annotations
) -> Tuple[Annotations, Sequence[CroppedSignImage]]:
fragment = self._fragments_repository.query_by_museum_number(
annotations.fragment_number
)
fragment_image = self._photos_repository.query_by_file_name(
f"{annotations.fragment_number}.jpg"
)
image_bytes = fragment_image.read()
image = Image.open(BytesIO(image_bytes), mode="r")
return self._cropped_image_from_annotations_helper(
annotations, image, fragment.script, fragment.text.labels
)
def update(self, annotations: Annotations, user: User) -> Annotations:
old_annotations = self._annotations_repository.query_by_museum_number(
annotations.fragment_number
)
_id = str(annotations.fragment_number)
schema = AnnotationsSchema()
(
annotations_with_image_ids,
cropped_sign_images,
) = self._cropped_image_from_annotations(annotations)
self._annotations_repository.create_or_update(annotations_with_image_ids)
self._cropped_sign_images_repository.create_many(cropped_sign_images)
self._changelog.create(
"annotations",
user.profile,
{"_id": _id, **schema.dump(old_annotations)},
{"_id": _id, **schema.dump(annotations_with_image_ids)},
)
return annotations_with_image_ids
| 37.755725 | 86 | 0.697736 | [
"MIT"
] | ElectronicBabylonianLiterature/dictionary | ebl/fragmentarium/application/annotations_service.py | 4,946 | Python |
def number_of_occurrences(s, xs):
return xs.count(s) | 28 | 33 | 0.732143 | [
"MIT"
] | anubhab-code/Competitive-Programming | CodeWars/7 Kyu/Number Of Occurrences.py | 56 | Python |
#Python program to get the size of an object in bytes
import sys
Object = input("Enter any object: ")
print(f'The size of the object {Object} is {sys.getsizeof(Object)} bytes')
| 29.666667 | 74 | 0.735955 | [
"MIT"
] | CodedLadiesInnovateTech/-python-challenge-solutions | GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 9.py | 178 | Python |
from .chain import *
from .element import *
| 14.666667 | 22 | 0.727273 | [
"MIT"
] | ArcletProject/Edoves | arclet/edoves/main/message/__init__.py | 44 | Python |
from importlib import import_module, reload
import pytest
import sys
from unittest.mock import patch
from rest_framework import status
from django.contrib.auth.models import Permission, Group
from django.conf import settings
from django.urls import clear_url_caches
from django.urls import reverse
from .factories import UserFactory
from groups.models import GroupInfo
from users.models import UserProfile
BLANK_CHOICE = ('', '---------')
USER_DETAILS = {
'username': 'test',
'email': '[email protected]',
'first_name': 'Foo',
'last_name': 'Bar',
}
USER_DETAILS_CREATE = USER_DETAILS.copy()
USER_DETAILS_CREATE.update(password1='pass', password2='pass')
USER_DETAILS_CHANGING = {
'username': 'johnsmith',
'email': '[email protected]',
'first_name': 'John',
'last_name': 'Smith',
}
@pytest.mark.django_db
def test_create_user_view_get(admin_client):
url = reverse('wagtailusers_users:add')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_create_user_view(admin_client):
url = reverse('wagtailusers_users:add')
response = admin_client.post(url, data=USER_DETAILS_CREATE)
assert response.context['message'] == 'User test created.'
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
@pytest.mark.django_db
def test_create_user_view_invalid_form(admin_client):
url = reverse('wagtailusers_users:add')
post_data = USER_DETAILS.copy()
post_data.update(email='This is not an email address')
response = admin_client.post(url, post_data)
message = response.context['message']
assert message == 'The user could not be created due to errors.'
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_get_edit_user_view(admin_client):
user = UserFactory(**USER_DETAILS)
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.context['can_delete'] is True
@pytest.mark.django_db
def test_edit_user_view(team_leaders_group, admin_client):
user = UserFactory(**USER_DETAILS)
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
# We'll add the user to a group, as well as changing their details
post_data = USER_DETAILS_CHANGING.copy()
post_data['groups'] = [team_leaders_group.pk]
response = admin_client.post(url, data=post_data)
assert response.context['message'] == 'User johnsmith updated.'
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
# The user's details should have changed to reflect the posted values
user.refresh_from_db()
for field_name, changed_value in USER_DETAILS_CHANGING.items():
assert getattr(user, field_name) == changed_value
# And they should have been added to a group
group_ids = set(user.groups.values_list('id', flat=True))
assert group_ids == {team_leaders_group.pk}
@pytest.mark.django_db
def test_edit_user_view_invalid_form(admin_client, approved_user):
url = reverse('wagtailusers_users:edit', kwargs={'pk': approved_user.pk})
post_data = USER_DETAILS.copy()
post_data.update(email='This is not an email address')
response = admin_client.post(url, post_data)
message = response.context['message']
assert message == 'The user could not be saved due to errors.'
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_edit_user_view_cannot_change_personal_details_when_sso_enforced(
admin_client
):
# Set this flag to True and repeat previous test
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = UserFactory(**USER_DETAILS)
# Post changes to the view
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
admin_client.post(url, data=USER_DETAILS_CHANGING)
# The users details should remain unchanged, because the
# personal detail fields should all be disabled
user.refresh_from_db()
for field_name, original_value in USER_DETAILS.items():
assert getattr(user, field_name) == original_value
# Change this back to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_preserves_ability_to_update_is_active(admin_client):
# Set this flag to True and actions if previous test
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
# Create an 'inactive' user to test with
user = UserFactory(**USER_DETAILS)
user.is_active = False
user.save()
# Post using the same details + 'is_active=on'
post_data = USER_DETAILS.copy()
post_data.update(is_active='on')
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
admin_client.post(url, data=post_data)
# The change to 'is_active' should have been applied, because that field
# is not disabled along with the personal detail ones
user.refresh_from_db()
assert user.is_active is True
# Reset flag to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_warns_administrator_if_user_is_awaiting_approval(
admin_client, user_awaiting_approval
):
# This flag must be set for the warning to show
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = user_awaiting_approval
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
response = admin_client.get(url)
message = response.context['message']
assert "This user is awaiting approval" in message
assert "requested to be added to the 'Moderators' group" in message
# Reset flag to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_marks_user_as_approved_if_added_to_group(
admin_client, admin_user, user_awaiting_approval
):
# This flag must be set for the warning to show
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = user_awaiting_approval
profile = user_awaiting_approval.userprofile
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
group = Group.objects.get(pk=profile.self_assigned_group_id)
group.permissions.add(Permission.objects.get(codename='access_admin'))
with patch('users.views.notify_user_of_access_request_approval', autospec=True) as mocked_method:
response = admin_client.post(url, {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'is_active': True,
'groups': [group.pk],
})
# Ensure the post was successful
assert response.context['message'] == 'User %s updated.' % user.username
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
# The UserProfile should have been updated
profile.refresh_from_db()
assert profile.assignment_status == UserProfile.STATUS_APPROVED
assert profile.approved_by_id == admin_user.id
assert profile.approved_at is not None
# A notification should have been triggered for the user
expected_call_args = dict(
request=response.wsgi_request,
user_email=user.email,
user_name=user.first_name,
reviewer_name=admin_user.get_full_name(),
)
mocked_method.assert_called_with(**expected_call_args)
# Reset flag to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_does_not_mark_user_as_approved_if_not_added_to_a_group(admin_client, groups_with_info):
user = UserFactory(username='some-user')
profile = user.userprofile
profile.assignment_status = UserProfile.STATUS_AWAITING_APPROVAL
profile.self_assigned_group_id = groups_with_info[0].id
profile.save()
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
with patch(
'users.views.notify_user_of_access_request_approval'
) as mocked_method:
response = admin_client.post(url, {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'is_active': True,
})
# Ensure the post was successful
assert response.context['message'] == 'User %s updated.' % user.username
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
# The UserProfile should NOT have been updated
profile.refresh_from_db()
assert profile.assignment_status == UserProfile.STATUS_AWAITING_APPROVAL
assert profile.approved_by_id is None
assert profile.approved_at is None
# no notification should have been triggered
mocked_method.assert_not_called()
def reload_urlconf(urlconf=None):
clear_url_caches()
if urlconf is None:
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
reload(sys.modules[urlconf])
else:
import_module(urlconf)
@pytest.mark.django_db
def test_force_staff_sso(client):
"""Test that URLs and redirects are in place."""
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
settings.AUTHBROKER_CLIENT_ID = 'debug'
settings.AUTHBROKER_CLIENT_SECRET = 'debug'
settings.AUTHBROKER_URL = 'https://test.com'
reload_urlconf()
assert reverse('authbroker_client:login') == '/auth/login/'
assert reverse('authbroker_client:callback') == '/auth/callback/'
response = client.get('/admin/login/')
assert response.status_code == 302
assert response.url == '/auth/login/'
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.parametrize('assignment_status, expected_status_code', (
(UserProfile.STATUS_CREATED, 200),
(UserProfile.STATUS_AWAITING_APPROVAL, 302),
(UserProfile.STATUS_APPROVED, 302)
))
@pytest.mark.django_db
def test_ssorequestaccessview_responds_based_on_assignment_status(
admin_client, admin_user, assignment_status, expected_status_code
):
url = reverse('sso:request_access')
profile = admin_user.userprofile
profile.assignment_status = assignment_status
profile.save()
response = admin_client.get(url)
assert response.status_code == expected_status_code
@pytest.mark.django_db
def test_ssorequestaccessview_shows_unlimited_visibilty_groups_only(
admin_client, groups_with_info
):
url = reverse('sso:request_access')
# Visbility is set to 'unrestricted' for all groups in `groups_with_info`,
# so choices should reflect that by default
expected_choices = tuple(
(g.id, g.info.name_singular) for g in groups_with_info
)
# Confirm the choices in the form are as expected
response = admin_client.get(url)
group_field = response.context['form']['self_assigned_group'].field
actual_choices = tuple(group_field.choices)
assert actual_choices == expected_choices
# Change the visibility of groups and try again
GroupInfo.objects.all().update(
visibility=GroupInfo.VISIBILITY_MANAGERS_ONLY)
# Choices should be empty now
response = admin_client.get(url)
group_field = response.context['form']['self_assigned_group'].field
assert tuple(group_field.choices) == ()
@pytest.mark.django_db
def test_ssorequestaccessview_with_no_team_leaders_group(admin_client):
# If no 'team leaders group' has been designated, the 'team_leaders'
# field should only have a 'blank' option
url = reverse('sso:request_access')
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
assert tuple(team_leader_field.choices) == (BLANK_CHOICE,)
@pytest.mark.django_db
def test_ssorequestaccessview_with_team_leaders_group_but_no_members(
admin_client, team_leaders_group
):
# If the designated 'team leaders group' has no members, the 'team_leaders'
# field should only have a 'blank' option
url = reverse('sso:request_access')
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
assert team_leaders_group.user_set.all().exists() is False
assert tuple(team_leader_field.choices) == (BLANK_CHOICE,)
@pytest.mark.django_db
def test_ssorequestaccessview_with_team_leaders(
admin_client, team_leaders_group, team_leaders
):
url = reverse('sso:request_access')
# When team leaders are defined, they will appear as choices
# for the 'team_leaders' field
expected_choices = [BLANK_CHOICE]
expected_choices.extend(list(
(tl.id, "{} <{}>".format(tl.get_full_name(), tl.email))
for tl in team_leaders
))
# Confirm the choices in the form are as expected
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
actual_choices = list(team_leader_field.choices)
assert actual_choices == expected_choices
@pytest.mark.django_db
def test_ssorequestaccessview_fails_validation_if_form_incomplete(
admin_client, groups_with_info, team_leaders
):
url = reverse('sso:request_access')
response = admin_client.post(url, data={})
# Should still be on the same view
assert response.status_code == 200
# Both form fields should have errors
assert 'self_assigned_group' in response.context['form'].errors
assert 'team_leader' in response.context['form'].errors
@pytest.mark.django_db
def test_ssorequestaccessview_post_with_complete_data(
admin_client, admin_user, groups_with_info, team_leaders
):
group = groups_with_info[0]
team_leader = team_leaders[0]
with patch(
'users.views.notify_team_leader_of_pending_access_request',
autospec=True
) as mocked_method:
response = admin_client.post(
reverse('sso:request_access'),
data={
'self_assigned_group': group.id,
'team_leader': team_leader.id,
}
)
# Should be redirected to the success url
success_url = reverse('sso:request_access_success')
assert response.url == success_url
# The UserProfile for `admin_user` should have been updated
profile = admin_user.userprofile
assert profile.self_assigned_group_id == group.id
assert profile.team_leader_id == team_leader.id
assert profile.assignment_status == UserProfile.STATUS_AWAITING_APPROVAL # noqa
# A notification should have been triggered for the user
expected_call_args = dict(
request=response.wsgi_request,
team_leader_email=team_leader.email,
team_leader_name=team_leader.first_name,
user_id=admin_user.id,
user_name=admin_user.get_full_name(),
user_email=admin_user.email,
user_role=group.info.name_singular,
)
mocked_method.assert_called_with(**expected_call_args)
@pytest.mark.django_db
@pytest.mark.parametrize('url', (
reverse('sso:request_access'),
reverse('sso:request_access_success'),
))
def test_ssorequestaccess_views_only_available_to_authenticated_users(
client, admin_client, url
):
# When not authenticated, the user is redirected to the login page
response = client.get(url)
assert response.status_code == 302
assert response.url.startswith(settings.LOGIN_URL)
# When authenticated, things work fine
response = admin_client.get(url)
assert response.status_code == 200
| 34.45 | 111 | 0.730927 | [
"MIT"
] | uktrade/directory-cms | tests/users/test_views.py | 15,847 | Python |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: DbClient.py
Description : DB工厂类
Author : JHao
date: 2016/12/2
-------------------------------------------------
Change Activity:
2016/12/02: DB工厂类
2020/07/03: 取消raw_proxy储存
-------------------------------------------------
"""
__author__ = 'JHao'
import os
import sys
from util.six import urlparse, withMetaclass
from util.singleton import Singleton
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class DbClient(withMetaclass(Singleton)):
"""
DbClient DB工厂类 提供get/put/update/pop/delete/exists/getAll/clean/getCount/changeTable方法
抽象方法定义:
get(): 随机返回一个proxy;
put(proxy): 存入一个proxy;
pop(): 顺序返回并删除一个proxy;
update(proxy): 更新指定proxy信息;
delete(proxy): 删除指定proxy;
exists(proxy): 判断指定proxy是否存在;
getAll(): 返回所有代理;
clean(): 清除所有proxy信息;
getCount(): 返回proxy统计信息;
changeTable(name): 切换操作对象
所有方法需要相应类去具体实现:
ssdb: ssdbClient.py
redis: redisClient.py
mongodb: mongodbClient.py
"""
def __init__(self, db_conn):
"""
init
:return:
"""
self.parseDbConn(db_conn)
self.__initDbClient()
@classmethod
def parseDbConn(cls, db_conn):
db_conf = urlparse(db_conn)
cls.db_type = db_conf.scheme.upper().strip()
cls.db_host = db_conf.hostname
cls.db_port = db_conf.port
cls.db_user = db_conf.username
cls.db_pwd = db_conf.password
cls.db_name = db_conf.path[1:]
return cls
def __initDbClient(self):
"""
init DB Client
:return:
"""
__type = None
if "SSDB" == self.db_type:
__type = "ssdbClient"
elif "REDIS" == self.db_type:
__type = "redisClient"
elif "POSTGRESQL" == self.db_type:
__type = "postgresqlClient"
else:
pass
assert __type, 'type error, Not support DB type: {}'.format(self.db_type)
self.client = getattr(__import__(__type), "%sClient" % self.db_type.title())(host=self.db_host,
port=self.db_port,
username=self.db_user,
password=self.db_pwd,
db=self.db_name)
def get(self, **kwargs):
return self.client.get(**kwargs)
def put(self, key, **kwargs):
return self.client.put(key, **kwargs)
def update(self, key, value, **kwargs):
return self.client.update(key, value, **kwargs)
def delete(self, key, **kwargs):
return self.client.delete(key, **kwargs)
def exists(self, key, **kwargs):
return self.client.exists(key, **kwargs)
def pop(self, **kwargs):
return self.client.pop(**kwargs)
def getAll(self):
return self.client.getAll()
def clear(self):
return self.client.clear()
def changeTable(self, name):
self.client.changeTable(name)
def getCount(self):
return self.client.getCount()
def test(self):
return self.client.test()
| 28.487805 | 107 | 0.50742 | [
"MIT"
] | dota2heqiuzhi/proxy_pool | db/dbClient.py | 3,708 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1alpha1_priority_class import V1alpha1PriorityClass
class TestV1alpha1PriorityClass(unittest.TestCase):
""" V1alpha1PriorityClass unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1PriorityClass(self):
"""
Test V1alpha1PriorityClass
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1alpha1_priority_class.V1alpha1PriorityClass()
pass
if __name__ == '__main__':
unittest.main()
| 22.266667 | 105 | 0.720559 | [
"Apache-2.0"
] | MiaoRachelYu/python | kubernetes/test/test_v1alpha1_priority_class.py | 1,002 | Python |
"""Forms related to import operations."""
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy
class ImportDataForm(forms.Form):
"""Base form to import objects."""
sourcefile = forms.FileField(label=ugettext_lazy("Select a file"))
sepchar = forms.CharField(
label=ugettext_lazy("Separator"),
max_length=1,
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
continue_if_exists = forms.BooleanField(
label=ugettext_lazy("Continue on error"), required=False,
help_text=ugettext_lazy("Don't treat duplicated objects as error")
)
def __init__(self, *args, **kwargs):
super(ImportDataForm, self).__init__(*args, **kwargs)
self.fields["sepchar"].widget.attrs = {"class": "col-md-1 form-control"}
def clean_sepchar(self):
if self.cleaned_data["sepchar"] == "":
return ";"
return self.cleaned_data["sepchar"]
class ImportIdentitiesForm(ImportDataForm):
"""A form to import identities."""
crypt_password = forms.BooleanField(
label=ugettext_lazy("Crypt passwords"), required=False,
help_text=ugettext_lazy(
"Check this option if passwords contained in your file "
"are not crypted"
)
)
| 29.521739 | 80 | 0.662003 | [
"ISC"
] | antoniotrento/modoboa | modoboa/admin/forms/import_.py | 1,358 | Python |
from fork.util.ints import uint64
from .constants import ConsensusConstants
testnet_kwargs = {
"SLOT_BLOCKS_TARGET": 32,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 16, # Must be less than half of SLOT_BLOCKS_TARGET
"MAX_SUB_SLOT_BLOCKS": 128, # Must be less than half of SUB_EPOCH_BLOCKS
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"SUB_SLOT_ITERS_STARTING": 2 ** 27,
# DIFFICULTY_STARTING is the starting difficulty for the first epoch, which is then further
# multiplied by another factor of DIFFICULTY_CONSTANT_FACTOR, to be used in the VDF iter calculation formula.
"DIFFICULTY_CONSTANT_FACTOR": 2 ** 64,
"DIFFICULTY_STARTING": 13,
"DIFFICULTY_CHANGE_MAX_FACTOR": 3, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
# These 3 constants must be changed at the same time
"SUB_EPOCH_BLOCKS": 384, # The number of blocks per sub-epoch, mainnet 384
"EPOCH_BLOCKS": 4608, # The number of blocks per epoch, mainnet 4608. Must be multiple of SUB_EPOCH_SB
"SIGNIFICANT_BITS": 8, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_PLOT_FILTER": 9, # H(plot signature of the challenge) must start with these many zeroes
"MIN_PLOT_SIZE": 32, # 32 for mainnet
"MAX_PLOT_SIZE": 50,
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
# Default used for tests is std_hash(b'')
"GENESIS_CHALLENGE": bytes.fromhex("be6bbdf83a789fd2b7e5ac8e2954f510e92115bb9e1c84591f6adb4055a3b845"),
# Forks of fork should change this value to provide replay attack protection. This is set to mainnet genesis chall
"AGG_SIG_ME_ADDITIONAL_DATA": bytes.fromhex("3800a9169891c0554775b12cbf5d79f6eb50ccb5f95630536a4cecd7a18aa34b"),
"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bytes.fromhex(
"75e5849b1a27d71e74de1390a4fc81c38b4ed8ce24d4efb2c9a5807d0e82106c"
),
"GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bytes.fromhex(
"75e5849b1a27d71e74de1390a4fc81c38b4ed8ce24d4efb2c9a5807d0e82106c"
),
"MAX_VDF_WITNESS_SIZE": 64,
# Size of mempool = 50x the size of block
"MEMPOOL_BLOCK_BUFFER": 50,
# Max coin amount, fits into 64 bits
"MAX_COIN_AMOUNT": uint64((1 << 64) - 1),
# Max block cost in clvm cost units
"MAX_BLOCK_COST_CLVM": 11000000000,
# The cost per byte of generator program
"COST_PER_BYTE": 12000,
"WEIGHT_PROOF_THRESHOLD": 2,
"BLOCKS_CACHE_SIZE": 4608 + (128 * 4),
"WEIGHT_PROOF_RECENT_BLOCKS": 1000,
"MAX_BLOCK_COUNT_PER_REQUESTS": 32, # Allow up to 32 blocks per request
"INITIAL_FREEZE_END_TIMESTAMP": 1620061200, # Mon May 03 2021 17:00:00 GMT+0000
"NETWORK_TYPE": 0,
"MAX_GENERATOR_SIZE": 1000000,
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
}
DEFAULT_CONSTANTS = ConsensusConstants(**testnet_kwargs) # type: ignore
| 57.967742 | 118 | 0.741514 | [
"Apache-2.0"
] | Fork-Network/fork-blockchain | fork/consensus/default_constants.py | 3,594 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import os.path
import sys
import io
import logging
import datetime
from pprint import pprint
import yaml
import tzlocal
class ConfigFileNotFound(Exception):
pass
def _set_default_config():
config = {
"url": "",
"title": "",
"keywords": "",
"description": "",
"author": "",
"root": "/",
"source": "content",
"destination": "output",
"attach": "attach",
"themes_dir": "themes",
"theme": "simple2",
"default_ext": "md",
"pygments": True,
"debug": False,
"time": datetime.datetime.now(tzlocal.get_localzone()),
}
return config
def _post_process(config):
for k, v in config.items():
if v is None:
config[k] = ""
if config["url"].endswith("/"):
config["url"] = config["url"][:-1]
return config
def get_default_config():
return _post_process(_set_default_config())
def parse_config(config_file):
if not os.path.exists(config_file):
raise ConfigFileNotFound("{0} not exists".format(config_file))
default_config = _set_default_config()
with io.open(config_file, "rt", encoding="utf-8") as fd:
config = yaml.load(fd, Loader=yaml.FullLoader)
default_config.update(config)
config = _post_process(default_config)
return config
if __name__ == "__main__":
# pylint: disable=pointless-string-statement
"""
Usage:
python -m simiki.config : to test config template
python -m simiki.config _config.yml : to test _config.yml file in \
curren dir
"""
if len(sys.argv) == 1:
base_dir = os.path.dirname(__file__)
_config_file = os.path.join(base_dir, "conf_templates",
"_config.yml.in")
elif len(sys.argv) == 2:
base_dir = os.getcwd()
_config_file = os.path.join(base_dir, sys.argv[1])
else:
logging.error("Use the template config file by default, "
"you can specify the config file to parse. \n"
"Usage: `python -m simiki.config [_config.yml]'")
sys.exit(1)
pprint(parse_config(_config_file))
| 24.989362 | 75 | 0.585355 | [
"MIT"
] | 418sec/simiki | simiki/config.py | 2,349 | Python |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class ZipfTest(test_util.TestCase):
def assertBetween(self, x, minimum, maximum):
self.assertGreaterEqual(x, minimum)
self.assertLessEqual(x, maximum)
def assertAllBetween(self, a, minval, maxval, atol=1e-6):
a = self._GetNdArray(a)
minval = self._GetNdArray(minval)
maxval = self._GetNdArray(maxval)
self.assertEqual(a.shape, minval.shape)
self.assertEqual(a.shape, maxval.shape)
for idx, _ in np.ndenumerate(a):
self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)
def testZipfShape(self):
power = tf.constant([3.0] * 5)
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))
self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])
self.assertEqual(zipf.event_shape, tf.TensorShape([]))
def testInvalidPower(self):
invalid_powers = [-.02, 0.5, -2., .99, 1.]
for power in invalid_powers:
with self.assertRaisesOpError("Condition x > y"):
zipf = tfd.Zipf(power=power, validate_args=True)
self.evaluate(zipf.mean())
def testNanPower(self):
zipf = tfd.Zipf(power=np.nan, validate_args=False)
self.assertAllNan(self.evaluate(zipf.power))
def testValidPower_ImplicitlyConvertsToFloat32(self):
powers = [2, 10, 1.1]
for power in powers:
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(zipf.power.dtype, tf.float32)
def testEventDtype(self):
for power_dtype in [tf.float32, tf.float64]:
for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
power_dtype = tf.float32
event_dtype = tf.int32
power = tf.constant(5., dtype=power_dtype)
zipf = tfd.Zipf(power=power, dtype=event_dtype, validate_args=True)
self.assertEqual(zipf.dtype, event_dtype)
self.assertEqual(
zipf.dtype, zipf.sample(10, seed=test_util.test_seed()).dtype)
self.assertEqual(
zipf.dtype, zipf.sample(1, seed=test_util.test_seed()).dtype)
self.assertEqual(zipf.dtype, zipf.mode().dtype)
def testInvalidEventDtype(self):
with self.assertRaisesWithPredicateMatch(
TypeError, "power.dtype .* not a supported .* type"):
power = tf.constant(5., dtype=tf.float16)
zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)
self.evaluate(zipf.sample(seed=test_util.test_seed()))
def testZipfLogPmf_InvalidArgs(self):
power = tf.constant([4.0])
# Non-integer samples are rejected if validate_args is True and
# interpolate_nondiscrete is False.
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=True)
non_integer_samples = [0.99, 4.5, 5.001, 1e-5]
for x in non_integer_samples:
with self.assertRaisesOpError("cannot contain fractional components"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("cannot contain fractional components"):
self.evaluate(zipf.prob(x))
# Negative samples are rejected if validate_args is True.
zipf = tfd.Zipf(power=power, validate_args=True)
negative_samples = [-3, -2, -1]
for x in negative_samples:
with self.assertRaisesOpError("must be non-negative"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("must be non-negative"):
self.evaluate(zipf.prob(x))
def testZipfLogPmf_IntegerArgs(self):
batch_size = 9
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
zipf = tfd.Zipf(power=power, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
# Check that log_pmf(x) of tfd.Zipf is between the values of
# stats.zipf.logpmf for ceil(x) and floor(x).
log_pmf_values = self.evaluate(log_pmf)
floor_x = np.floor(x)
ceil_x = np.ceil(x)
self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
# Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for
# ceil(x) and floor(x).
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T
zipf = tfd.Zipf(power=power, validate_args=True)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=True)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllBetween(
self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllBetween(
self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfCdf_IntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsInterpolated(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
zipf = tfd.Zipf(power=power, validate_args=True)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power, validate_args=True)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfMean(self):
power_v = [2.0, 3.0, 2.5]
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.mean().shape)
self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))
def testZipfVariance(self):
power_v = [4.0, 3.0, 5.5] # var is undefined for power <= 3
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.variance().shape)
stat_vars = np.vectorize(stats.zipf.var)(power_v)
self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)
def testZipfStd(self):
power_v = [4.0, 3.5, 4.5]
zipf = tfd.Zipf(power=power_v, validate_args=True)
self.assertEqual((3,), zipf.stddev().shape)
stat_stddevs = np.vectorize(stats.zipf.std)(power_v)
self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)
def testZipfMode(self):
power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]
zipf = tfd.Zipf(power=power_v, validate_args=False)
self.assertEqual((6,), zipf.mode().shape)
self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))
def testZipfSample(self):
power_v = 5.
n = int(500e4)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)
self.assertAllClose(
sample_values.std(), stats.zipf.std(power_v), rtol=.03)
def testZipfSample_ValidateArgs(self):
power_v = 3.
n = int(100e3)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=test_util.test_seed())
self.evaluate(samples)
def testZipfSampleMultidimensionalMean(self):
power_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
zipf = tfd.Zipf(power=power_v, validate_args=True)
n = int(100e3)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 10,), samples.shape)
self.assertEqual((n, 1, 10,), sample_values.shape)
# stats.zipf wants float64 params.
stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))
self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)
def testZipfSampleMultidimensionalStd(self):
power_v = np.array([np.arange(5, 10, dtype=np.float32)]) # 1 x 5
zipf = tfd.Zipf(power=power_v, validate_args=True)
n = int(100e4)
samples = zipf.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 5), samples.shape)
self.assertEqual((n, 1, 5), sample_values.shape)
# stats.zipf wants float64 params.
stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))
self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)
# Test that sampling with the same seed twice gives the same results.
def testZipfSampleMultipleTimes(self):
n = 1000
seed = test_util.test_seed()
power = 1.5
zipf1 = tfd.Zipf(power=power, name="zipf1", validate_args=True)
tf.random.set_seed(seed)
samples1 = self.evaluate(zipf1.sample(n, seed=seed))
zipf2 = tfd.Zipf(power=power, name="zipf2", validate_args=True)
tf.random.set_seed(seed)
samples2 = self.evaluate(zipf2.sample(n, seed=seed))
self.assertAllEqual(samples1, samples2)
def testZipfSample_AvoidsInfiniteLoop(self):
zipf = tfd.Zipf(power=1., validate_args=False)
n = 1000
self.evaluate(zipf.sample(n, seed=test_util.test_seed()))
if __name__ == "__main__":
tf.test.main()
| 37.299304 | 79 | 0.669818 | [
"Apache-2.0"
] | OrenBochman/probability | tensorflow_probability/python/distributions/zipf_test.py | 16,076 | Python |
"""
Tensorflow implementation of DeepFM
"""
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
from sklearn import metrics
# from yellowfin import YFOptimizer
import os
import sys
import json
"""
关于 X_i 和 X_v
为什么要把训练数据分成两个矩阵?
FM模型需要为每个特征训练一个embedding vector,
在模型计算过程中使用 embedding_lookup + index matrix 可以方便计算。
首先把特征分成两种,一种是不需要one hot(数值类),一种是需要one hot(枚举类)。
然后定义,one hot 之前的特征称为 field,one hot 之后的特征为 feature。
- X_i 表示 feat_index
- X_v 表示 feat_value
**feat_index**
feat_index 存储的是样本的 field 的"feature索引",shape=(N,field_size)。
feat_index[i,j]表示的是第i个样本第j个field的 feature_index。
如果当前 field 不需要 one hot,此 field 就只会映射成一个 feature;
如果当前 field 需要 one hot,此 field 就会被映射成多个 feature ,
每个枚举值是一个 feature,其实就是进行 one hot 编码。
比如 feat_index[i,j]=c,表示 第i个样本第j个 field 的对应着第c个feature,
c是 feature_index。
当然如果 field_j 是数值 field,所有样本的j列都是一样的值,因为 field_j 不需要onehot。
如果 field_j 需要one hot,c的值就是其原来的枚举值onehot后映射对应的 feature_index。
feat_index 是给 embedding_lookup是用的。
**feat_value**
feat_value 存储的是样本field的"值",shape=(N,field_size)。
feat_value[i,j]表示的是第i个样本第j个field的值。
如果当前field 不需要 one hot,feat_value[i,j]就是原始数据值;
如果当前field 需要 one hot,feat_value[i,j]就是常量1;
注意:这里有一个前提条件,就是 one_hot 的 field 变量只能取一个值,一个变量可以有多个取值的情况是不支持的。
"""
class DeepFM(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size,
embedding_size=8, dropout_fm=[1.0, 1.0],
deep_layers=[32, 32], dropout_deep=[0.5, 0.5, 0.5],
deep_layers_activation=tf.nn.relu,
epoch=10, batch_size=256,
learning_rate=0.001, optimizer_type="adam",
batch_norm=0, batch_norm_decay=0.995,
verbose=False, random_seed=2016,
use_fm=True, use_deep=True,
loss_type="logloss", eval_metric=roc_auc_score,
l2_reg=0.0, greater_is_better=True, threshold=0.5
):
assert (use_fm or use_deep)
assert loss_type in ["logloss", "mse"], \
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
self.feature_size = feature_size # 259 denote as M, size of the feature dictionary
self.field_size = field_size # 39 denote as F, size of the feature fields
self.embedding_size = embedding_size # 8 denote as K, size of the feature embedding
self.dropout_fm = dropout_fm
self.deep_layers = deep_layers
self.dropout_deep = dropout_deep
self.deep_layers_activation = deep_layers_activation
self.use_fm = use_fm
self.use_deep = use_deep
self.l2_reg = l2_reg
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer_type
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose # 是否打印参数总量
self.random_seed = random_seed
self.loss_type = loss_type
self.eval_metric = eval_metric
self.greater_is_better = greater_is_better # 是否值越大越好
self.train_result, self.valid_result = [], []
self.sess = None
self.graph = None
self._config = None
self.threshold = threshold
def _make_config_pack(self):
self._config = {
"feature_size": self.feature_size, # 259 denote as M, size of the feature dictionary
"field_size ": self.field_size, # 39 denote as F, size of the feature fields
"embedding_size ": self.embedding_size, # 8 denote as K, size of the feature embedding
"dropout_fm ": self.dropout_fm,
"deep_layers ": self.deep_layers,
"dropout_deep ": self.dropout_deep,
"deep_layers_activation ": self.deep_layers_activation,
"use_fm ": self.use_fm,
"use_deep ": self.use_deep,
"l2_reg ": self.l2_reg,
"epoch ": self.epoch,
"batch_size ": self.batch_size,
"learning_rate ": self.learning_rate,
"optimizer_type ": self.optimizer_type,
"batch_norm ": self.batch_norm,
"batch_norm_decay ": self.batch_norm_decay,
"verbose ": self.verbose, # 是否打印参数总量
"random_seed ": self.random_seed,
"loss_type": self.loss_type,
"eval_metric ": self.eval_metric,
"greater_is_better ": self.greater_is_better, # 是否值越大越好
}
# self.model_path = '%s/deepfm' % (save_path)
# self._init_graph()
def init_graph(self):
if self.sess is not None:
return
self.graph = tf.Graph()
with self.graph.as_default():
tf1.set_random_seed(self.random_seed)
self.feat_index = tf1.placeholder(tf.int32, shape=[None, None],
name="feat_index") # None * F
self.feat_value = tf1.placeholder(tf.float32, shape=[None, None],
name="feat_value") # None * F
self.label = tf1.placeholder(tf.float32, shape=[None, 1], name="label") # None * 1
self.dropout_keep_fm = tf1.placeholder(tf.float32, shape=[None], name="dropout_keep_fm")
self.dropout_keep_deep = tf1.placeholder(tf.float32, shape=[None], name="dropout_keep_deep")
self.train_phase = tf1.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
# 每一个feature 有一个 embedding
# feature_embeddings.shape=(self.feature_size, self.embedding_size)
# feat_index[i,j] 存储的是 第i条样本第j个field 对应的 feature_index
# 1. 如果 field_j 是非 one hot 特征,则 field_j 不需要拆成多个 feature,
# feat_index[:,j] 所有样本行都是同一个值,对应同一个 feature_index。
# 2. 如果 field_j 是 one hot 特征,则 field_j 需要拆成多个 feature,每个枚举值独立成一个 feature,
# 此时 feat_index[:,j] 不同行是不同值,其值表示 枚举值Value(field_j) 对应的 feature_index.
# 比如,第i=3行样本,第j=5个field表示颜色,其值是红色,红色被 onehot成 feature_index=13.则 feat_index[3,5]=13
# shape=(N样本数量 * field_size * K)
# N 表示样本的数量
# K 是嵌入向量的长度,
# 取出所有样本,每个 feature 的嵌入向量
# 对于one_hot 的 field,相当于只取出来枚举值对应的 feature_index 的嵌入向量,
# 相当于每个 field 取一个,最终每条样本嵌入向量的数量还是 field 。
self.embeddings = tf.nn.embedding_lookup(
self.weights["feature_embeddings"], # shape=(self.feature_size, self.embedding_size)
self.feat_index # N * field_size
)
# shape=(None * F * 1)
#
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1]) # None * F * 1
# FM部分的公式是 (x_i * x_j)(v_i*v_j)=(x_i*v_i)(x_j*v_j)
# 这里先把每个特征的向量乘上其特征值。
self.embeddings = tf.multiply(self.embeddings, feat_value) # None * F * K
# ---------- first order term ----------
# 对于k维,tf.reduce_sum(x,axis=k-1)的结果是对最里面一维所有元素进行求和
self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1
self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F
self.y_first_order = tf.nn.dropout(self.y_first_order, rate=1 - self.dropout_keep_fm[0]) # None * F
# ---------- second order term ---------------
# sum_square part
self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K
self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K
# square_sum part
self.squared_features_emb = tf.square(self.embeddings)
self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K
# second order
self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square,
self.squared_sum_features_emb) # None * K
self.y_second_order = tf.nn.dropout(self.y_second_order, rate=1 - self.dropout_keep_fm[1]) # None * K
# ---------- Deep component ----------
self.y_deep = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size]) # None * (F*K)
self.y_deep = tf.nn.dropout(self.y_deep, rate=1 - self.dropout_keep_deep[0])
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" % i]),
self.weights["bias_%d" % i]) # None * layer[i] * 1
if self.batch_norm:
self.y_deep = self.batch_norm_layer(self.y_deep, train_phase=self.train_phase,
scope_bn="bn_%d" % i) # None * layer[i] * 1
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep,
rate=1 - self.dropout_keep_deep[1 + i]) # dropout at each Deep layer
# ---------- DeepFM ----------
if self.use_fm and self.use_deep:
concat_input = tf.concat([self.y_first_order, self.y_second_order, self.y_deep], axis=1)
elif self.use_fm:
concat_input = tf.concat([self.y_first_order, self.y_second_order], axis=1)
elif self.use_deep:
concat_input = self.y_deep
self.out = tf.add(tf.matmul(concat_input, self.weights["concat_projection"]), self.weights["concat_bias"])
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out, name='out')
self.loss = tf1.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights 正则
if self.l2_reg > 0:
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["concat_projection"])
if self.use_deep:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d" % i])
# optimizer
# 这里可以使用现成的ftrl优化损失
# optimizer = tf.train.FtrlOptimizer(lr) # lr: learningRate
# gradients = optimizer.compute_gradients(loss) # cost
# train_op = optimizer.apply_gradients(gradients, global_step=global_step)
if self.optimizer_type == "adam":
self.optimizer = tf1.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf1.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf1.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(
self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf1.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(
self.loss)
# elif self.optimizer_type == "yellowfin":
# self.optimizer = YFOptimizer(learning_rate=self.learning_rate, momentum=0.0).minimize(
# self.loss)
# init
self.saver = tf1.train.Saver()
init = tf1.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def _init_session(self):
config = tf1.ConfigProto(device_count={"gpu": 0})
config.gpu_options.allow_growth = True # 根据运行情况分配GPU内存
return tf1.Session(config=config)
def _initialize_weights(self):
weights = dict() # 定义参数字典
# embeddings
weights["feature_embeddings"] = tf.Variable(
tf.random.normal([self.feature_size, self.embedding_size], 0.0, 0.01),
# tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size * K
weights["feature_bias"] = tf.Variable(
# tf.random_uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1
tf.random.uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1
# deep layers
num_layer = len(self.deep_layers) # 层数
input_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (input_size + self.deep_layers[0])) # 正态分布的标准差
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32) # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
# final concat projection layer
if self.use_fm and self.use_deep:
input_size = self.field_size + self.embedding_size + self.deep_layers[-1]
elif self.use_fm:
input_size = self.field_size + self.embedding_size
elif self.use_deep:
input_size = self.deep_layers[-1]
glorot = np.sqrt(2.0 / (input_size + 1))
weights["concat_projection"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32) # layers[i-1]*layers[i]
weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32)
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, y, batch_size, index):
start = index * batch_size
end = (index + 1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def fit_on_batch(self, Xi, Xv, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.label: y,
self.dropout_keep_fm: self.dropout_fm,
self.dropout_keep_deep: self.dropout_deep,
self.train_phase: True}
out, loss, opt = self.sess.run((self.out, self.loss, self.optimizer), feed_dict=feed_dict)
return out, loss
def fit(self, Xi_train, Xv_train, y_train,
Xi_valid=None, Xv_valid=None, y_valid=None,
early_stopping=False, refit=False):
"""
:param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features)
:param y_train: label of each sample in the training set
:param Xi_valid: list of list of feature indices of each sample in the validation set
:param Xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
"""
has_valid = Xv_valid is not None
Xi_train = Xi_train.copy()
Xv_train = Xv_train.copy()
y_train = y_train.copy()
for epoch in range(self.epoch):
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)
trian_out, train_loss = self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# print(trian_out, file=sys.stderr)
if i % 1000 == 0:
# print(trian_out, file=sys.stderr)
print("epoch:%d batch:%d train_loss=%.4f" % (epoch, i, train_loss), file=sys.stderr)
# evaluate training and validation datasets
train_me = self.evaluate(Xi_train, Xv_train, y_train)
self.train_result.append(train_me)
if has_valid:
valid_me = self.evaluate(Xi_valid, Xv_valid, y_valid)
self.valid_result.append(valid_me)
if self.verbose > 0 and epoch % self.verbose == 0:
print("[%d] [train] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]"
% (epoch + 1,
train_me['auc'],
train_me['acc'],
train_me['mse'],
train_me['precision_1'],
train_me['recall_1'],
time() - t1))
if has_valid:
print(
"[%d] [valid] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]"
% (epoch + 1,
valid_me['auc'],
valid_me['acc'],
valid_me['mse'],
valid_me['precision_1'],
valid_me['recall_1'],
time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_result):
break
# fit a few more epoch on train+valid until result reaches the best_train_score
if has_valid and refit:
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
Xi_train = Xi_train + Xi_valid
Xv_train = Xv_train + Xv_valid
y_train = y_train + y_valid
for epoch in range(100):
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train,
self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# check
train_result = self.evaluate(Xi_train, Xv_train, y_train)
if abs(train_result - best_train_score) < 0.001 or \
(self.greater_is_better and train_result > best_train_score) or \
((not self.greater_is_better) and train_result < best_train_score):
break
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] \
and valid_result[-2] > valid_result[-3] \
and valid_result[-3] > valid_result[-4] \
and valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
# self.label: y_batch,
self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, y_true):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
size = y_true.shape[0]
y_pred = self.predict(Xi, Xv)
error = y_true - y_pred
mse = (error * error).sum() / size
y_pred_m = y_pred.copy()
y_pred_m[y_pred_m >= self.threshold] = 1
y_pred_m[y_pred_m < self.threshold] = 0
# accuracy = metrics.accuracy_score(y_true, y_pred_m)
cm = metrics.confusion_matrix(y_true, y_pred_m, labels=[1, 0])
# 实际正样本数量
real_1_count = cm[0, :].sum()
# 预测为正样本数量
predict_1_count = cm[:, 0].sum()
# 正样本 预测正确的数量
right_1_count = cm[0, 0]
if predict_1_count == 0:
precision_1 = 0
else:
# 正样本精确率
precision_1 = right_1_count / predict_1_count
if real_1_count == 0:
recall_1 = 0
else:
# 正样本召回率
recall_1 = right_1_count / real_1_count
return {
"size": size,
"acc": (cm[0, 0] + cm[1, 1]) / size,
# "实际退费人次": cm[0, :].sum(),
# "预测退费人次": cm[:, 0].sum(),
# "预测正确人次": cm[0, 0],
# "预测错误人次": cm[1, 0],
"precision_1": precision_1,
"recall_1": recall_1,
"auc": self.eval_metric(y_true, y_pred),
"mse": mse
}
def save(self, save_path):
model_prefix = os.path.join(save_path, 'deepfm')
print("Save model...", save_path, file=sys.stderr)
self.saver.save(self.sess, model_prefix)
if self._config is not None:
config_path = os.path.join(save_path, "config.json")
with open(config_path, 'w') as fp:
json.dump(fp)
print("Save model done.", save_path, file=sys.stderr)
def load(self, model_path):
if self.sess is not None:
self.sess.close()
if self.graph is not None:
self.graph = None
model_prefix = os.path.join(model_path, 'deepfm')
# self.sess = tf.Session()
# with tf.Session() as sess:
# print('load model', file=sys.stderr)
# t1 = time()
print("Load model...", model_path, file=sys.stderr)
self.sess = tf1.Session()
saver = tf1.train.import_meta_graph(model_prefix + '.meta', clear_devices=True)
saver.restore(self.sess, model_prefix)
self.feat_index = tf1.get_default_graph().get_tensor_by_name('feat_index:0')
self.feat_value = tf1.get_default_graph().get_tensor_by_name('feat_value:0')
self.dropout_keep_fm = tf1.get_default_graph().get_tensor_by_name('dropout_keep_fm:0')
self.dropout_keep_deep = tf1.get_default_graph().get_tensor_by_name('dropout_keep_deep:0')
self.train_phase = tf1.get_default_graph().get_tensor_by_name('train_phase:0')
self.out = tf1.get_default_graph().get_tensor_by_name('out:0')
config_path = os.path.join(model_path, "config.json")
if os.path.exists(config_path):
with open(config_path) as fp:
self._config = json.load(fp)
else:
self._config = None
print("Load model done", model_path, file=sys.stderr)
| 45.732323 | 146 | 0.57817 | [
"MIT"
] | zhangzhenhu/zzh | zzh/mllib/model/_deep_fm.py | 28,639 | Python |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""The the user API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_successful(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': '[email protected]',
'password': 'testpass',
'name': 'John Doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
payload = {
'email': '[email protected]',
'password': 'testpass',
"name": 'John Doe'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""tests that the password must be more than 5 characters"""
payload = {
'email': '[email protected]',
'password': 'pass',
"name": 'John Doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for a user"""
payload = {'email': '[email protected]', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_200_OK)
self.assertIn('token', res.data)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='[email protected]', password='testpass')
payload = {'email': '[email protected]', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_user(self):
"""Test that token is not created if user does not exist"""
payload = {'email': '[email protected]', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_missing_field(self):
"""Test that token is not created if email/password not given"""
res = self.client.post(
TOKEN_URL,
{'email': '[email protected]', 'password': ''})
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
| 37.577778 | 77 | 0.63631 | [
"MIT"
] | reallyusefulengine/django_rest_recipe | app/user/tests/test_user_api.py | 3,382 | Python |
# Copyright 2019 Grabtaxi Holdings PTE LTE (GRAB), All rights reserved.
# Use of this source code is governed by an MIT-style license that can be found in the LICENSE file
import subprocess
import re
import os
import glob
from utils.fileutils import FileUtils
from utils.ziputils import ZipUtils
from functools import wraps
from utils.logger import logger
from utils.step import step
# PREBUILD POD LIBS FLOW
# Normal build, it automatically:
# 1. Fetch binary cache from separated repo and unzip it to pod-binary folder.
# 2. pod-binary hook pod install and will use those cached libraries.
# When upgrade a library, need to do:
# 1. Need to run this script to prebuild/delete the libs which have change.
# 2. Commit binary changes to cache repo, set tag for it.
# 3. Update tag in this file. Then submit a new MR.
def print_func_name(func):
@wraps(func)
def echo_func(*func_args, **func_kwargs):
logger.info('🚀 Start func: {}'.format(func.__name__))
return func(*func_args, **func_kwargs)
return echo_func
class PrebuildLib:
def __init__(self, config):
self.cache_repo = config.cache_repo
self.cache_path = config.cache_path
self.prebuild_path = config.prebuild_path
self.generated_dir_name = config.generated_dir_name
self.delta_path = config.delta_path
self.manifest_file = config.manifest_file
self.devpod_cache_repo = config.devpod_cache_repo
self.devpod_cache_path = config.devpod_cache_path
self.devpod_prebuild_output = config.devpod_prebuild_output
self.generated_path = config.generated_path
self.cache_libs_path = config.cache_libs_path
self.devpod_cache_libs_path = config.devpod_cache_libs_path
@print_func_name
def zip_to_cache(self, libName):
if os.path.exists(self.cache_libs_path + libName + '.zip'):
logger.info('Warning: lib {} already exist'.format(libName))
else:
ZipUtils.zip_dir(
'{}/{}'.format(self.generated_path, libName),
'{}/{}.zip'.format(self.cache_libs_path, libName)
)
@print_func_name
def clean_cache(self, libName):
FileUtils.remove_file(self.cache_libs_path + libName + ".zip")
@print_func_name
def zip_all_libs_to_cache(self):
os.system('rm -rf ' + self.cache_libs_path + '/*')
FileUtils.create_dir(self.cache_libs_path)
for dir in FileUtils.listdir_nohidden(self.generated_path):
ZipUtils.zip_dir(self.generated_path + '/' + dir, self.cache_libs_path + '/' + dir + '.zip')
FileUtils.copy_file_or_dir(self.prebuild_path + self.manifest_file, self.cache_path)
def clean_and_pull(self, git_repo_dir):
subprocess.run(['git', '-C', git_repo_dir, 'reset', '--hard'])
subprocess.run(['git', '-C', git_repo_dir, 'clean', '-df'])
subprocess.run(['git', '-C', git_repo_dir, 'checkout', 'master'])
subprocess.run(['git', '-C', git_repo_dir, 'pull', '-X', 'theirs'])
@print_func_name
def fetch_cache(self):
with step('fetch_prebuild_libs'):
if not os.path.exists(self.cache_path):
subprocess.run(['git', 'clone', '--depth=1', self.cache_repo, self.cache_path])
else:
self.clean_and_pull(self.cache_path)
@print_func_name
def unzip_cache(self):
with step('unzip_prebuild_libs'):
FileUtils.remove_dir(self.prebuild_path)
FileUtils.create_dir(self.generated_path)
FileUtils.copy_file_or_dir(self.cache_path + self.manifest_file, self.prebuild_path)
# Unzip libs to pod-binary folder
for zipPath in glob.iglob(self.cache_libs_path + '/*.zip'):
ZipUtils.unzip(zipPath, self.generated_path)
@print_func_name
def fetch_and_apply_cache(self):
self.fetch_cache()
self.unzip_cache()
@print_func_name
def fetch_and_apply_devpod_cache(self):
with step('fetch_and_apply_devpod_cache'):
logger.info('Fetching devpod cache to {}'.format(self.devpod_cache_path))
if not os.path.exists(self.devpod_cache_path):
subprocess.run(['git', 'clone', '--depth=1', self.devpod_cache_repo, self.devpod_cache_path])
else:
self.clean_and_pull(self.devpod_cache_path)
# Unzip devpod libs
devpod_temp_dir = self.prebuild_path + 'devpod/'
logger.info('Unzip from: {} to: {}'.format(self.devpod_cache_libs_path, devpod_temp_dir))
for zip_path in glob.iglob(self.devpod_cache_libs_path + '/*.zip'):
ZipUtils.unzip(zip_path, devpod_temp_dir)
@print_func_name
def has_libs_change(self):
if os.path.exists(self.delta_path):
return True
return False
def push_all_to_git(self, git_dir):
git_input_path = 'git -C ' + self.cache_path
os.system('{} add .'.format(git_input_path))
os.system('{} commit -m "Prebuild pod libs"'.format(git_input_path))
os.system('{} push'.format(git_input_path))
@print_func_name
def prebuild_if_needed(self):
self.fetch_and_apply_cache()
subprocess.run(['bundle', 'exec', 'pod', 'install'], check=True)
# Sync with cache directory
if not os.path.isfile(self.delta_path):
logger.info('No change in prebuilt frameworks')
return
try:
with open(self.delta_path) as f:
FileUtils.create_dir(self.cache_path)
data = f.read()
data = re.sub('"', '', data)
updatedMatches = re.findall(r'Updated: \[(.*)\]', data)
if updatedMatches:
updated = updatedMatches[0].strip()
logger.info("Updated frameworks: {}".format(updated))
if len(updated):
libs = updated.split(',')
for lib in libs:
libName = lib.strip()
self.clean_cache(libName)
self.zip_to_cache(libName)
deletedMatches = re.findall(r'Deleted: \[(.*)\]', data)
if deletedMatches:
deleted = deletedMatches[0].strip()
logger.info('Deleted frameworks: {}'.format(deleted))
if len(deleted):
libs = deleted.split(',')
for lib in libs:
self.clean_cache(lib.strip())
# Copy manifest file
FileUtils.copy_file_or_dir(self.prebuild_path + self.manifest_file, self.cache_path)
self.push_all_to_git(self.cache_path)
except Exception as e:
raise e
@print_func_name
def prebuild_devpod(self):
self.fetch_and_apply_cache()
self.fetch_and_apply_devpod_cache()
subprocess.run(['bundle', 'exec', 'fastlane', 'run', 'cocoapods', 'try_repo_update_on_error:true'], check=True)
| 41.711765 | 119 | 0.624313 | [
"MIT"
] | qalandarov/cocoapods-binary-cache | lib/command/PythonScripts/prebuild_lib.py | 7,094 | Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.tools.prepare_node_join"""
import unittest
import shutil
import tempfile
import os.path
from ganeti import errors
from ganeti import constants
from ganeti import pathutils
from ganeti import compat
from ganeti import utils
from ganeti.tools import prepare_node_join
from ganeti.tools import common
import testutils
_JoinError = prepare_node_join.JoinError
_DATA_CHECK = prepare_node_join._DATA_CHECK
class TestVerifyCertificate(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoCert(self):
common.VerifyCertificateSoft({}, error_fn=prepare_node_join.JoinError,
_verify_fn=NotImplemented)
def testGivenPrivateKey(self):
cert_filename = testutils.TestDataFilename("cert2.pem")
cert_pem = utils.ReadFile(cert_filename)
self.assertRaises(_JoinError, common._VerifyCertificateSoft,
cert_pem, _JoinError, _check_fn=NotImplemented)
def testInvalidCertificate(self):
self.assertRaises(errors.X509CertError,
common._VerifyCertificateSoft,
"Something that's not a certificate",
_JoinError, _check_fn=NotImplemented)
@staticmethod
def _Check(cert):
assert cert.get_subject()
def testSuccessfulCheck(self):
cert_filename = testutils.TestDataFilename("cert1.pem")
cert_pem = utils.ReadFile(cert_filename)
common._VerifyCertificateSoft(cert_pem, _JoinError,
_check_fn=self._Check)
class TestUpdateSshDaemon(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.keyfiles = {
constants.SSHK_RSA:
(utils.PathJoin(self.tmpdir, "rsa.private"),
utils.PathJoin(self.tmpdir, "rsa.public")),
constants.SSHK_DSA:
(utils.PathJoin(self.tmpdir, "dsa.private"),
utils.PathJoin(self.tmpdir, "dsa.public")),
constants.SSHK_ECDSA:
(utils.PathJoin(self.tmpdir, "ecdsa.private"),
utils.PathJoin(self.tmpdir, "ecdsa.public")),
}
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoKeys(self):
data_empty_keys = {
constants.SSHS_SSH_HOST_KEY: [],
}
for data in [{}, data_empty_keys]:
for dry_run in [False, True]:
prepare_node_join.UpdateSshDaemon(data, dry_run,
_runcmd_fn=NotImplemented,
_keyfiles=NotImplemented)
self.assertEqual(os.listdir(self.tmpdir), [])
def _TestDryRun(self, data):
prepare_node_join.UpdateSshDaemon(data, True, _runcmd_fn=NotImplemented,
_keyfiles=self.keyfiles)
self.assertEqual(os.listdir(self.tmpdir), [])
def testDryRunRsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_RSA, "rsapriv", "rsapub"),
],
})
def testDryRunDsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_DSA, "dsapriv", "dsapub"),
],
})
def testDryRunEcdsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
],
})
def _RunCmd(self, fail, cmd, interactive=NotImplemented):
self.assertTrue(interactive)
self.assertEqual(cmd, [pathutils.DAEMON_UTIL, "reload-ssh-keys"])
if fail:
exit_code = constants.EXIT_FAILURE
else:
exit_code = constants.EXIT_SUCCESS
return utils.RunResult(exit_code, None, "stdout", "stderr",
utils.ShellQuoteArgs(cmd),
NotImplemented, NotImplemented)
def _TestUpdate(self, failcmd):
data = {
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_DSA, "dsapriv", "dsapub"),
(constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
(constants.SSHK_RSA, "rsapriv", "rsapub"),
],
constants.SSHS_SSH_KEY_TYPE: "dsa",
constants.SSHS_SSH_KEY_BITS: 1024,
}
runcmd_fn = compat.partial(self._RunCmd, failcmd)
if failcmd:
self.assertRaises(_JoinError, prepare_node_join.UpdateSshDaemon,
data, False, _runcmd_fn=runcmd_fn,
_keyfiles=self.keyfiles)
else:
prepare_node_join.UpdateSshDaemon(data, False, _runcmd_fn=runcmd_fn,
_keyfiles=self.keyfiles)
self.assertEqual(sorted(os.listdir(self.tmpdir)), sorted([
"rsa.public", "rsa.private",
"dsa.public", "dsa.private",
"ecdsa.public", "ecdsa.private",
]))
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.public")),
"rsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.private")),
"rsapriv")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.public")),
"dsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.private")),
"dsapriv")
self.assertEqual(utils.ReadFile(utils.PathJoin(
self.tmpdir, "ecdsa.public")), "ecdsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(
self.tmpdir, "ecdsa.private")), "ecdsapriv")
def testSuccess(self):
self._TestUpdate(False)
def testFailure(self):
self._TestUpdate(True)
class TestUpdateSshRoot(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.sshdir = utils.PathJoin(self.tmpdir, ".ssh")
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def _GetHomeDir(self, user):
self.assertEqual(user, constants.SSH_LOGIN_USER)
return self.tmpdir
def testDryRun(self):
data = {
constants.SSHS_SSH_ROOT_KEY: [
(constants.SSHK_RSA, "aaa", "bbb"),
]
}
prepare_node_join.UpdateSshRoot(data, True,
_homedir_fn=self._GetHomeDir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.listdir(self.sshdir), [])
def testUpdate(self):
data = {
constants.SSHS_SSH_ROOT_KEY: [
(constants.SSHK_DSA, "privatedsa", "ssh-dss pubdsa"),
],
constants.SSHS_SSH_KEY_TYPE: "dsa",
constants.SSHS_SSH_KEY_BITS: 1024,
}
prepare_node_join.UpdateSshRoot(data, False,
_homedir_fn=self._GetHomeDir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(sorted(os.listdir(self.sshdir)),
sorted(["authorized_keys", "id_dsa", "id_dsa.pub"]))
self.assertTrue(utils.ReadFile(utils.PathJoin(self.sshdir, "id_dsa"))
is not None)
pub_key = utils.ReadFile(utils.PathJoin(self.sshdir, "id_dsa.pub"))
self.assertTrue(pub_key is not None)
self.assertEquals(utils.ReadFile(utils.PathJoin(self.sshdir,
"authorized_keys")),
pub_key)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| 34.177165 | 80 | 0.665592 | [
"BSD-2-Clause"
] | YSelfTool/ganeti | test/py/ganeti.tools.prepare_node_join_unittest.py | 8,681 | Python |
from ewah.hooks.base import EWAHBaseHook
import requests
import time
class EWAHAircallHook(EWAHBaseHook):
_ATTR_RELABEL = {
"api_id": "login",
"api_token": "password",
}
conn_name_attr = "ewah_aircall_conn_id"
default_conn_name = "ewah_aircall_default"
conn_type = "ewah_aircall"
hook_name = "EWAH Aircall Connection"
_RESOURCES = {
"users": {"incremental": True},
"teams": {},
"calls": {"incremental": True},
"numbers": {"incremental": True},
"contacts": {"incremental": True},
"tags": {},
}
_BASE_URL = "https://api.aircall.io/v1/{0}"
@staticmethod
def get_ui_field_behaviour():
return {
"hidden_fields": ["port", "schema", "extra", "host"],
"relabeling": {
"login": "Basic Auth API ID",
"password": "Baisc Auth API Token",
},
}
def get_data_in_batches(
self,
resource,
data_from=None,
data_until=None,
batch_size=10000,
batch_call_pause_seconds=1,
):
_msg = "batch_size param must be a positive integer <= 10k "
assert isinstance(batch_size, int), _msg
assert batch_size > 0, _msg
assert batch_size <= 10000, _msg
page_size = 50 # maximum page size is 50
auth = requests.auth.HTTPBasicAuth(
self.conn.api_id,
self.conn.api_token,
)
url = self._BASE_URL.format(resource)
params = {
"per_page": page_size,
}
if data_from:
params["from"] = int(time.mktime(data_from.timetuple()))
if data_until:
params["to"] = int(time.mktime((data_until).timetuple()))
data = []
while url:
time.sleep(batch_call_pause_seconds)
request = requests.get(url, params=params, auth=auth)
assert request.status_code == 200, request.text
response = request.json()
url = response.get("meta", {}).get("next_page_link")
data += response.get(resource, [])
if (not url) or (len(data) + page_size > batch_size):
yield data
data = []
| 28.846154 | 69 | 0.549778 | [
"MIT"
] | Gemma-Analytics/ewah | ewah/hooks/aircall.py | 2,250 | Python |
# python 3.7
"""Utility functions to invert a given image back to a latent code."""
from tqdm import tqdm
import cv2
import numpy as np
import torch
from models.stylegan_generator import StyleGANGenerator
from models.stylegan_encoder import StyleGANEncoder
from models.perceptual_model import PerceptualModel
__all__ = ['StyleGANInverter']
def _softplus(x):
"""Implements the softplus function."""
return torch.nn.functional.softplus(x, beta=1, threshold=10000)
def _get_tensor_value(tensor):
"""Gets the value of a torch Tensor."""
return tensor.cpu().detach().numpy()
class StyleGANInverter(object):
"""Defines the class for StyleGAN inversion.
Even having the encoder, the output latent code is not good enough to recover
the target image satisfyingly. To this end, this class optimize the latent
code based on gradient descent algorithm. In the optimization process,
following loss functions will be considered:
(1) Pixel-wise reconstruction loss. (required)
(2) Perceptual loss. (optional, but recommended)
(3) Regularization loss from encoder. (optional, but recommended for in-domain
inversion)
NOTE: The encoder can be missing for inversion, in which case the latent code
will be randomly initialized and the regularization loss will be ignored.
"""
def __init__(self,
model_name,
learning_rate=1e-2,
iteration=100,
reconstruction_loss_weight=1.0,
perceptual_loss_weight=5e-5,
regularization_loss_weight=2.0,
logger=None):
"""Initializes the inverter.
NOTE: Only Adam optimizer is supported in the optimization process.
Args:
model_name: Name of the model on which the inverted is based. The model
should be first registered in `models/model_settings.py`.
logger: Logger to record the log message.
learning_rate: Learning rate for optimization. (default: 1e-2)
iteration: Number of iterations for optimization. (default: 100)
reconstruction_loss_weight: Weight for reconstruction loss. Should always
be a positive number. (default: 1.0)
perceptual_loss_weight: Weight for perceptual loss. 0 disables perceptual
loss. (default: 5e-5)
regularization_loss_weight: Weight for regularization loss from encoder.
This is essential for in-domain inversion. However, this loss will
automatically ignored if the generative model does not include a valid
encoder. 0 disables regularization loss. (default: 2.0)
"""
self.logger = logger
self.model_name = model_name
self.gan_type = 'stylegan'
self.G = StyleGANGenerator(self.model_name, self.logger)
self.E = StyleGANEncoder(self.model_name, self.logger)
self.F = PerceptualModel(min_val=self.G.min_val, max_val=self.G.max_val)
self.encode_dim = [self.G.num_layers, self.G.w_space_dim]
self.run_device = self.G.run_device
assert list(self.encode_dim) == list(self.E.encode_dim)
assert self.G.gan_type == self.gan_type
assert self.E.gan_type == self.gan_type
self.learning_rate = learning_rate
self.iteration = iteration
self.loss_pix_weight = reconstruction_loss_weight
self.loss_feat_weight = perceptual_loss_weight
self.loss_reg_weight = regularization_loss_weight
assert self.loss_pix_weight > 0
def preprocess(self, image):
"""Preprocesses a single image.
This function assumes the input numpy array is with shape [height, width,
channel], channel order `RGB`, and pixel range [0, 255].
The returned image is with shape [channel, new_height, new_width], where
`new_height` and `new_width` are specified by the given generative model.
The channel order of returned image is also specified by the generative
model. The pixel range is shifted to [min_val, max_val], where `min_val` and
`max_val` are also specified by the generative model.
"""
if not isinstance(image, np.ndarray):
raise ValueError(f'Input image should be with type `numpy.ndarray`!')
if image.dtype != np.uint8:
raise ValueError(f'Input image should be with dtype `numpy.uint8`!')
if image.ndim != 3 or image.shape[2] not in [1, 3]:
raise ValueError(f'Input should be with shape [height, width, channel], '
f'where channel equals to 1 or 3!\n'
f'But {image.shape} is received!')
if image.shape[2] == 1 and self.G.image_channels == 3:
image = np.tile(image, (1, 1, 3))
if image.shape[2] != self.G.image_channels:
raise ValueError(f'Number of channels of input image, which is '
f'{image.shape[2]}, is not supported by the current '
f'inverter, which requires {self.G.image_channels} '
f'channels!')
if self.G.image_channels == 3 and self.G.channel_order == 'BGR':
image = image[:, :, ::-1]
if image.shape[1:3] != [self.G.resolution, self.G.resolution]:
image = cv2.resize(image, (self.G.resolution, self.G.resolution))
image = image.astype(np.float32)
image = image / 255.0 * (self.G.max_val - self.G.min_val) + self.G.min_val
image = image.astype(np.float32).transpose(2, 0, 1)
return image
def get_init_code(self, image):
"""Gets initial latent codes as the start point for optimization.
The input image is assumed to have already been preprocessed, meaning to
have shape [self.G.image_channels, self.G.resolution, self.G.resolution],
channel order `self.G.channel_order`, and pixel range [self.G.min_val,
self.G.max_val].
"""
x = image[np.newaxis]
x = self.G.to_tensor(x.astype(np.float32))
z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim))
return z.astype(np.float32)
def invert(self, image, num_viz=0):
"""Inverts the given image to a latent code.
Basically, this function is based on gradient descent algorithm.
Args:
image: Target image to invert, which is assumed to have already been
preprocessed.
num_viz: Number of intermediate outputs to visualize. (default: 0)
Returns:
A two-element tuple. First one is the inverted code. Second one is a list
of intermediate results, where first image is the input image, second
one is the reconstructed result from the initial latent code, remainings
are from the optimization process every `self.iteration // num_viz`
steps.
"""
x = image[np.newaxis]
x = self.G.to_tensor(x.astype(np.float32))
x.requires_grad = False
init_z = self.get_init_code(image)
z = torch.Tensor(init_z).to(self.run_device)
z.requires_grad = True
optimizer = torch.optim.Adam([z], lr=self.learning_rate)
viz_results = []
viz_results.append(self.G.postprocess(_get_tensor_value(x))[0])
x_init_inv = self.G.net.synthesis(z)
viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0])
pbar = tqdm(range(1, self.iteration + 1), leave=True)
for step in pbar:
loss = 0.0
# Reconstruction loss.
x_rec = self.G.net.synthesis(z)
loss_pix = torch.mean((x - x_rec) ** 2)
loss = loss + loss_pix * self.loss_pix_weight
log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}'
# Perceptual loss.
if self.loss_feat_weight:
x_feat = self.F.net(x)
x_rec_feat = self.F.net(x_rec)
loss_feat = torch.mean((x_feat - x_rec_feat) ** 2)
loss = loss + loss_feat * self.loss_feat_weight
log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}'
# Regularization loss.
if self.loss_reg_weight:
z_rec = self.E.net(x_rec).view(1, *self.encode_dim)
loss_reg = torch.mean((z - z_rec) ** 2)
loss = loss + loss_reg * self.loss_reg_weight
log_message += f', loss_reg: {_get_tensor_value(loss_reg):.3f}'
log_message += f', loss: {_get_tensor_value(loss):.3f}'
pbar.set_description_str(log_message)
if self.logger:
self.logger.debug(f'Step: {step:05d}, '
f'lr: {self.learning_rate:.2e}, '
f'{log_message}')
# Do optimization.
optimizer.zero_grad()
loss.backward()
optimizer.step()
if num_viz > 0 and step % (self.iteration // num_viz) == 0:
viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0])
return _get_tensor_value(z), viz_results
def easy_invert(self, image, num_viz=0):
"""Wraps functions `preprocess()` and `invert()` together."""
return self.invert(self.preprocess(image), num_viz)
def diffuse(self,
target,
context,
center_x,
center_y,
crop_x,
crop_y,
num_viz=0):
"""Diffuses the target image to a context image.
Basically, this function is a motified version of `self.invert()`. More
concretely, the encoder regularizer is removed from the objectives and the
reconstruction loss is computed from the masked region.
Args:
target: Target image (foreground).
context: Context image (background).
center_x: The x-coordinate of the crop center.
center_y: The y-coordinate of the crop center.
crop_x: The crop size along the x-axis.
crop_y: The crop size along the y-axis.
num_viz: Number of intermediate outputs to visualize. (default: 0)
Returns:
A two-element tuple. First one is the inverted code. Second one is a list
of intermediate results, where first image is the direct copy-paste
image, second one is the reconstructed result from the initial latent
code, remainings are from the optimization process every
`self.iteration // num_viz` steps.
"""
image_shape = (self.G.image_channels, self.G.resolution, self.G.resolution)
mask = np.zeros((1, *image_shape), dtype=np.float32)
xx = center_x - crop_x // 2
yy = center_y - crop_y // 2
mask[:, :, yy:yy + crop_y, xx:xx + crop_x] = 1.0
target = target[np.newaxis]
context = context[np.newaxis]
x = target * mask + context * (1 - mask)
x = self.G.to_tensor(x.astype(np.float32))
x.requires_grad = False
mask = self.G.to_tensor(mask.astype(np.float32))
mask.requires_grad = False
init_z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim))
init_z = init_z.astype(np.float32)
z = torch.Tensor(init_z).to(self.run_device)
z.requires_grad = True
optimizer = torch.optim.Adam([z], lr=self.learning_rate)
viz_results = []
viz_results.append(self.G.postprocess(_get_tensor_value(x))[0])
x_init_inv = self.G.net.synthesis(z)
viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0])
pbar = tqdm(range(1, self.iteration + 1), leave=True)
for step in pbar:
loss = 0.0
# Reconstruction loss.
x_rec = self.G.net.synthesis(z)
loss_pix = torch.mean(((x - x_rec) * mask) ** 2)
loss = loss + loss_pix * self.loss_pix_weight
log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}'
# Perceptual loss.
if self.loss_feat_weight:
x_feat = self.F.net(x * mask)
x_rec_feat = self.F.net(x_rec * mask)
loss_feat = torch.mean((x_feat - x_rec_feat) ** 2)
loss = loss + loss_feat * self.loss_feat_weight
log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}'
log_message += f', loss: {_get_tensor_value(loss):.3f}'
pbar.set_description_str(log_message)
if self.logger:
self.logger.debug(f'Step: {step:05d}, '
f'lr: {self.learning_rate:.2e}, '
f'{log_message}')
# Do optimization.
optimizer.zero_grad()
loss.backward()
optimizer.step()
if num_viz > 0 and step % (self.iteration // num_viz) == 0:
viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0])
return _get_tensor_value(z), viz_results
def easy_diffuse(self, target, context, *args, **kwargs):
"""Wraps functions `preprocess()` and `diffuse()` together."""
return self.diffuse(self.preprocess(target),
self.preprocess(context),
*args, **kwargs)
| 39.209524 | 80 | 0.6644 | [
"MIT"
] | Twizwei/idinvert_pytorch | utils/inverter.py | 12,351 | Python |
"""
Copyright (c) 2017 Robbin Bouwmeester
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE."""
__author__ = "Robbin Bouwmeester"
__copyright__ = "Copyright 2017"
__credits__ = ["Robbin Bouwmeester"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Robbin Bouwmeester"
__email__ = "[email protected]"
__status__ = "nightly funzies"
import pandas as pd
from itertools import groupby
import logging
class LipidBLAST_entry():
def __init__(self,
name="",
ion="",
mw=0.0,
chem_form="",
num_ms2_peaks=0,
f_acyl_lengths=[],
unsats=[],
ms2=[]):
self.name = name
self.ion = ion
self.mw = mw
self.chem_form = chem_form
self.num_ms2_peaks = num_ms2_peaks
self.ms2 = ms2
self.f_acyl_lengths = f_acyl_lengths
self.unsats = unsats
def __str__(self):
ret_string = []
ret_string.append("================")
ret_string.append("")
ret_string.append("Lipid: %s" % (self.name))
ret_string.append("MW: %s" % (self.mw))
ret_string.append("Formula: %s" % (self.chem_form))
ret_string.append ("")
for f in self.ms2:
ret_string.append("%s\t%s\t%s" % (f[0],f[1],f[2]))
ret_string.append("")
ret_string.append("================")
return("\n".join(ret_string))
class LipidBLAST():
def __init__(self,
f_names=["LipidBlast-pos.msp","LipidBlast-neg.msp"],
min_acyl_length=10,
exclude_lyso=False,
include_ions=["[M-H]-"], #,"[M+]","[M+H]+","[M+NH4]+","[M-H]-","[M-2H](2-)","[M-Ac-H]-","[M+Na2-H]+","[M+]","[M+NH4]+","[M+Na]+","[M-2H](2-)","[M-Ac-H]-" "[M+]","[M+H]+","[M+NH4]+","[M-H]-","[M-2H](2-)","[M-Ac-H]-","[M+Na2-H]+","[M+]","[M+NH4]+","[M+Na]+","[M-2H](2-)","[M-Ac-H]-"
include_class=["PE","GPSer","GPCho","PC","GPA","PE","GPIns","GPEtn","GPGro"], #,"SM","TG","CL", #,"SM","TG","CL","GPSer","GPCho","PC","GPA","PE","GPIns","GPEtn","GPGro
aggregate_acyls=False,
use_simplified_names=True,
dalt_diff_lookup_bin=1):
self.f_names = f_names
self.min_acyl_length = min_acyl_length
self.exclude_lyso = exclude_lyso
self.include_ions = include_ions
self.include_class = include_class
self.use_simplified_names = use_simplified_names
self.dalt_diff_lookup_bin = dalt_diff_lookup_bin
self.aggregate_acyls = aggregate_acyls
self.lpb_dict = {}
self.ms1_dict = {}
self.ms1_dict_lookup = {}
self.tot_entr_read = 0
if len(self.f_names) > 0:
for f_name in f_names:
self.read_lpb(f_name)
def __str__(self):
ret_string = []
ret_string.append("Filenames: %s" % (self.f_names))
ret_string.append("Min acyl length: %s" % (self.min_acyl_length))
ret_string.append("Exclude lyso: %s" % (self.exclude_lyso))
ret_string.append("Include ions: %s" % (self.include_ions))
ret_string.append("Include lipid classes: %s" % (self.include_class))
ret_string.append("Use simplified names: %s" % (self.use_simplified_names))
ret_string.append("Lookup diff: %s Da" % (self.dalt_diff_lookup_bin))
ret_string.append("Total entries read: %s" % (self.tot_entr_read))
return("\n".join(ret_string))
def read_lpb(self,f_name):
def _get_general_info(name):
# Currently limited to max 9 unsats
unsats = [n[0] for n in name.split(":")[1:]]
class_name = name.split("(")[0]
if "-" in class_name:
name_split = name.split("(")
name_split[0] = name.split("(")[0].replace("-","")
name = "(".join(name_split)
acyl_lengths = name.split(":")
acyl_lengths.pop()
f_acyl_lengths = []
for acl in acyl_lengths:
try:
if "/" in acl:
f_acyl_lengths.append(acl.split("/")[1].replace("d","").replace("methyl-",""))
elif "-" in acl:
f_acyl_lengths.append(acl.split("-")[1].replace("d","").replace("methyl-",""))
else:
f_acyl_lengths.append(acl.split("(")[1].replace("d","").replace("methyl-",""))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
try:
f_acyl_lengths = list(map(int,f_acyl_lengths))
unsats = list(map(int,unsats))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
return(f_acyl_lengths,unsats,class_name)
def _simplify_name(class_name,acyls,unsats):
simplified_name = ""
simplified_name += class_name
simplified_name += "("
if not self.aggregate_acyls:
for f,u in zip(f_acyl_lengths,unsats):
simplified_name += str(f)
simplified_name += ":"
simplified_name += str(u)
simplified_name += "/"
simplified_name = simplified_name[:-1]
else:
simplified_name += str(sum(f_acyl_lengths))
simplified_name += ":"
simplified_name += str(sum(unsats))
simplified_name += ")"
return(simplified_name)
def _get_chem_form(chem_form_native,ion):
chem_form_ion = ""
for i,c in enumerate(chem_form_native):
if i+1 >= len(chem_form_native):
if c.isdigit(): chem_form_ion += c
else:
chem_form_ion += c
chem_form_ion += "1"
elif c.isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isupper():
chem_form_ion += c
chem_form_ion += "1"
elif chem_form_native[i+1].isdigit(): chem_form_ion += c
list_chem= [''.join(g) for _, g in groupby(chem_form_ion, str.isalpha)]
chem_form_ion = dict(zip(list_chem[::2],map(int,list_chem[1::2])))
if "+" not in ion:
if "[M-H]-" in ion:
try: chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-2H](2-)" in ion:
try: chem_form_ion["H"] -= 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-Ac-H]-" in ion:
try:
chem_form_ion["C"] += 2
chem_form_ion["H"] += 3
chem_form_ion["O"] += 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
else:
if "[M+H]+" in ion:
try: chem_form_ion["H"] += 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+NH4]+" in ion:
try:
if chem_form_ion.has_key("N"): chem_form_ion["N"] += 1
else: chem_form_ion["N"] = 1
chem_form_ion["H"] += 4
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 1
else: chem_form_ion["Na"] = 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na2-H]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 2
else: chem_form_ion["Na"] = 2
chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
return("".join([atom+str(num_atom) for atom,num_atom in sorted(chem_form_ion.items())]))
with open(f_name) as infile:
fragments = []
pre_c_mass = 0.0
name = ""
ion = ""
for line in infile:
line = line.strip()
#print(line)
if len(line) == 0:
f_acyl_lengths,unsats,class_name = _get_general_info(name)
f_acyl_lengths_error = [a for a in f_acyl_lengths if a < self.min_acyl_length and a != 0]
if (len(class_name) == 0) or \
(ion_type not in self.include_ions) or \
(len([c for c in self.include_class if c in name]) == 0) or \
(self.exclude_lyso and "/0:0" in name) or \
(len(f_acyl_lengths_error) > 0):
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
continue
simplified_name = _simplify_name(class_name,f_acyl_lengths,unsats)
new_entry = LipidBLAST_entry(name=name,
ion=ion_type,
mw=pre_c_mass,
chem_form=chem_form_ion,
num_ms2_peaks=num_peaks,
ms2=fragments,
f_acyl_lengths=f_acyl_lengths,
unsats=unsats)
self.lpb_dict["%s|%s" % (simplified_name,ion_type)] = new_entry
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.dalt_diff_lookup_bin
if loc_dict in self.ms1_dict_lookup.keys():
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
else:
self.ms1_dict_lookup[loc_dict] = {}
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
self.tot_entr_read += 1
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
elif ":" in line:
if line.startswith("PRECURSORMZ"):
pre_c_mass = float(line.split(": ")[1])
if line.startswith("Name: "):
name = line.split("; ")[-1]
ion_type = line.split("; ")[1]
if line.startswith("Comment: "):
# Some of the chemical formulas contain a ";" at the end; remove
chem_form_native = line.split("; ")[-1].replace(";","")
#print(chem_form_native)
chem_form_ion = _get_chem_form(chem_form_native,ion_type)
if line.startswith("Num Peaks:"):
num_peaks = int(line.split(": ")[-1])
else:
if line=="\x1a": #EOF
continue
fragments.append([float(line.split(" ")[0]),float(line.split(" ")[1]),line.split(" ")[2].replace("\"","")])
class PrecursorFilter():
def __init__(self,db,ppm=10):
self.db = db
self.ppm = ppm
def retrieve_entry_pre_c_mass(self,pre_c_mass):
mass_error_threshold = (pre_c_mass*self.ppm)/1000000
ret_entries = []
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.db.dalt_diff_lookup_bin
loc_dict_lower = (int(pre_c_mass-mass_error_threshold)) - (int(pre_c_mass-mass_error_threshold)) % self.db.dalt_diff_lookup_bin
loc_dict_upper = (int(pre_c_mass+mass_error_threshold)) - (int(pre_c_mass+mass_error_threshold)) % self.db.dalt_diff_lookup_bin
# TODO set does not have to be list
locs_to_search = list(set([loc_dict,loc_dict_lower,loc_dict_upper]))
for loc in locs_to_search:
try:
for name,entr in self.db.ms1_dict_lookup[loc].items():
mass_error = abs(entr.mw-pre_c_mass)
if mass_error < mass_error_threshold:
ret_entries.append([name,mass_error,entr])
except KeyError:
logging.warning("Could not find an entry in the database for prec mass: %s" % (pre_c_mass))
continue
return(ret_entries)
if __name__ == "__main__":
logging.basicConfig(filename="prec_filter.log",
level=logging.DEBUG,
filemode="w",
format="%(levelname)s:%(created)f:%(asctime)s:%(message)s")
logging.info("Reading the LPB database ...")
lpb = LipidBLAST()
logging.info("Done reading the LPB database ...")
logging.info(lpb)
step_three_df = pd.read_csv("stepone_new.csv")
precf = Precursor_filter(lpb)
prec_filt_result = []
for index,row in step_three_df.iterrows():
if (index % 10000==0):
logging.info("Analyzing row number and m/z: %s - %s" % (index,row["mz"]))
prec_hits = precf.retrieve_entry_pre_c_mass(row["mz"])
for hit in prec_hits:
prec_filt_result.append([row["mz"],hit[2].mw,hit[1],hit[0].split("|")[0],hit[2].chem_form,hit[0].split("|")[1]])
prec_filt_result = pd.DataFrame(prec_filt_result)
prec_filt_result.columns = ["Input Mass","Matched Mass","Delta","Abbreviation","Formula","Ion"]
prec_filt_result.to_excel("batch_results.xlsx",index=False)
| 36.162319 | 303 | 0.655579 | [
"Apache-2.0"
] | RobbinBouwmeester/LIT | src/lpb.py | 12,476 | Python |
"""
Support for MQTT discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/#discovery
"""
import asyncio
import json
import logging
import re
from homeassistant.components import mqtt
from homeassistant.components.mqtt import CONF_STATE_TOPIC, ATTR_DISCOVERY_HASH
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
TOPIC_MATCHER = re.compile(
r'(?P<prefix_topic>\w+)/(?P<component>\w+)/'
r'(?:(?P<node_id>[a-zA-Z0-9_-]+)/)?(?P<object_id>[a-zA-Z0-9_-]+)/config')
SUPPORTED_COMPONENTS = [
'binary_sensor', 'camera', 'cover', 'fan',
'light', 'sensor', 'switch', 'lock', 'climate',
'alarm_control_panel', 'vacuum']
CONFIG_ENTRY_COMPONENTS = [
'binary_sensor',
'camera',
'cover',
'light',
'lock',
'sensor',
'switch',
'climate',
'alarm_control_panel',
'fan',
'vacuum',
]
DEPRECATED_PLATFORM_TO_SCHEMA = {
'light': {
'mqtt_json': 'json',
'mqtt_template': 'template',
}
}
ALREADY_DISCOVERED = 'mqtt_discovered_components'
DATA_CONFIG_ENTRY_LOCK = 'mqtt_config_entry_lock'
CONFIG_ENTRY_IS_SETUP = 'mqtt_config_entry_is_setup'
MQTT_DISCOVERY_UPDATED = 'mqtt_discovery_updated_{}'
MQTT_DISCOVERY_NEW = 'mqtt_discovery_new_{}_{}'
TOPIC_BASE = '~'
ABBREVIATIONS = {
'aux_cmd_t': 'aux_command_topic',
'aux_stat_tpl': 'aux_state_template',
'aux_stat_t': 'aux_state_topic',
'avty_t': 'availability_topic',
'away_mode_cmd_t': 'away_mode_command_topic',
'away_mode_stat_tpl': 'away_mode_state_template',
'away_mode_stat_t': 'away_mode_state_topic',
'bri_cmd_t': 'brightness_command_topic',
'bri_scl': 'brightness_scale',
'bri_stat_t': 'brightness_state_topic',
'bri_val_tpl': 'brightness_value_template',
'clr_temp_cmd_tpl': 'color_temp_command_template',
'bat_lev_t': 'battery_level_topic',
'bat_lev_tpl': 'battery_level_template',
'chrg_t': 'charging_topic',
'chrg_tpl': 'charging_template',
'clr_temp_cmd_t': 'color_temp_command_topic',
'clr_temp_stat_t': 'color_temp_state_topic',
'clr_temp_val_tpl': 'color_temp_value_template',
'cln_t': 'cleaning_topic',
'cln_tpl': 'cleaning_template',
'cmd_t': 'command_topic',
'curr_temp_t': 'current_temperature_topic',
'dev_cla': 'device_class',
'dock_t': 'docked_topic',
'dock_tpl': 'docked_template',
'err_t': 'error_topic',
'err_tpl': 'error_template',
'fanspd_t': 'fan_speed_topic',
'fanspd_tpl': 'fan_speed_template',
'fanspd_lst': 'fan_speed_list',
'fx_cmd_t': 'effect_command_topic',
'fx_list': 'effect_list',
'fx_stat_t': 'effect_state_topic',
'fx_val_tpl': 'effect_value_template',
'exp_aft': 'expire_after',
'fan_mode_cmd_t': 'fan_mode_command_topic',
'fan_mode_stat_tpl': 'fan_mode_state_template',
'fan_mode_stat_t': 'fan_mode_state_topic',
'frc_upd': 'force_update',
'hold_cmd_t': 'hold_command_topic',
'hold_stat_tpl': 'hold_state_template',
'hold_stat_t': 'hold_state_topic',
'ic': 'icon',
'init': 'initial',
'json_attr': 'json_attributes',
'max_temp': 'max_temp',
'min_temp': 'min_temp',
'mode_cmd_t': 'mode_command_topic',
'mode_stat_tpl': 'mode_state_template',
'mode_stat_t': 'mode_state_topic',
'name': 'name',
'on_cmd_type': 'on_command_type',
'opt': 'optimistic',
'osc_cmd_t': 'oscillation_command_topic',
'osc_stat_t': 'oscillation_state_topic',
'osc_val_tpl': 'oscillation_value_template',
'pl_arm_away': 'payload_arm_away',
'pl_arm_home': 'payload_arm_home',
'pl_avail': 'payload_available',
'pl_cls': 'payload_close',
'pl_disarm': 'payload_disarm',
'pl_hi_spd': 'payload_high_speed',
'pl_lock': 'payload_lock',
'pl_lo_spd': 'payload_low_speed',
'pl_med_spd': 'payload_medium_speed',
'pl_not_avail': 'payload_not_available',
'pl_off': 'payload_off',
'pl_on': 'payload_on',
'pl_open': 'payload_open',
'pl_osc_off': 'payload_oscillation_off',
'pl_osc_on': 'payload_oscillation_on',
'pl_stop': 'payload_stop',
'pl_unlk': 'payload_unlock',
'pow_cmd_t': 'power_command_topic',
'ret': 'retain',
'rgb_cmd_tpl': 'rgb_command_template',
'rgb_cmd_t': 'rgb_command_topic',
'rgb_stat_t': 'rgb_state_topic',
'rgb_val_tpl': 'rgb_value_template',
'send_cmd_t': 'send_command_topic',
'send_if_off': 'send_if_off',
'set_pos_tpl': 'set_position_template',
'set_pos_t': 'set_position_topic',
'spd_cmd_t': 'speed_command_topic',
'spd_stat_t': 'speed_state_topic',
'spd_val_tpl': 'speed_value_template',
'spds': 'speeds',
'stat_clsd': 'state_closed',
'stat_off': 'state_off',
'stat_on': 'state_on',
'stat_open': 'state_open',
'stat_t': 'state_topic',
'stat_val_tpl': 'state_value_template',
'sup_feat': 'supported_features',
'swing_mode_cmd_t': 'swing_mode_command_topic',
'swing_mode_stat_tpl': 'swing_mode_state_template',
'swing_mode_stat_t': 'swing_mode_state_topic',
'temp_cmd_t': 'temperature_command_topic',
'temp_stat_tpl': 'temperature_state_template',
'temp_stat_t': 'temperature_state_topic',
'tilt_clsd_val': 'tilt_closed_value',
'tilt_cmd_t': 'tilt_command_topic',
'tilt_inv_stat': 'tilt_invert_state',
'tilt_max': 'tilt_max',
'tilt_min': 'tilt_min',
'tilt_opnd_val': 'tilt_opened_value',
'tilt_status_opt': 'tilt_status_optimistic',
'tilt_status_t': 'tilt_status_topic',
't': 'topic',
'uniq_id': 'unique_id',
'unit_of_meas': 'unit_of_measurement',
'val_tpl': 'value_template',
'whit_val_cmd_t': 'white_value_command_topic',
'whit_val_stat_t': 'white_value_state_topic',
'whit_val_tpl': 'white_value_template',
'xy_cmd_t': 'xy_command_topic',
'xy_stat_t': 'xy_state_topic',
'xy_val_tpl': 'xy_value_template',
}
async def async_start(hass: HomeAssistantType, discovery_topic, hass_config,
config_entry=None) -> bool:
"""Initialize of MQTT Discovery."""
async def async_device_message_received(topic, payload, qos):
"""Process the received message."""
match = TOPIC_MATCHER.match(topic)
if not match:
return
_prefix_topic, component, node_id, object_id = match.groups()
if component not in SUPPORTED_COMPONENTS:
_LOGGER.warning("Component %s is not supported", component)
return
if payload:
try:
payload = json.loads(payload)
except ValueError:
_LOGGER.warning("Unable to parse JSON %s: '%s'",
object_id, payload)
return
payload = dict(payload)
for key in list(payload.keys()):
abbreviated_key = key
key = ABBREVIATIONS.get(key, key)
payload[key] = payload.pop(abbreviated_key)
if TOPIC_BASE in payload:
base = payload[TOPIC_BASE]
for key, value in payload.items():
if isinstance(value, str) and value:
if value[0] == TOPIC_BASE and key.endswith('_topic'):
payload[key] = "{}{}".format(base, value[1:])
if value[-1] == TOPIC_BASE and key.endswith('_topic'):
payload[key] = "{}{}".format(value[:-1], base)
# If present, the node_id will be included in the discovered object id
discovery_id = ' '.join((node_id, object_id)) if node_id else object_id
discovery_hash = (component, discovery_id)
if payload:
if CONF_PLATFORM in payload and 'schema' not in payload:
platform = payload[CONF_PLATFORM]
if (component in DEPRECATED_PLATFORM_TO_SCHEMA and
platform in DEPRECATED_PLATFORM_TO_SCHEMA[component]):
schema = DEPRECATED_PLATFORM_TO_SCHEMA[component][platform]
payload['schema'] = schema
_LOGGER.warning('"platform": "%s" is deprecated, '
'replace with "schema":"%s"',
platform, schema)
payload[CONF_PLATFORM] = 'mqtt'
if CONF_STATE_TOPIC not in payload:
payload[CONF_STATE_TOPIC] = '{}/{}/{}{}/state'.format(
discovery_topic, component,
'%s/' % node_id if node_id else '', object_id)
payload[ATTR_DISCOVERY_HASH] = discovery_hash
if ALREADY_DISCOVERED not in hass.data:
hass.data[ALREADY_DISCOVERED] = {}
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
# Dispatch update
_LOGGER.info(
"Component has already been discovered: %s %s, sending update",
component, discovery_id)
async_dispatcher_send(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload)
elif payload:
# Add component
_LOGGER.info("Found new component: %s %s", component, discovery_id)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
if component not in CONFIG_ENTRY_COMPONENTS:
await async_load_platform(
hass, component, 'mqtt', payload, hass_config)
return
config_entries_key = '{}.{}'.format(component, 'mqtt')
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
await hass.config_entries.async_forward_entry_setup(
config_entry, component)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async_dispatcher_send(hass, MQTT_DISCOVERY_NEW.format(
component, 'mqtt'), payload)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
await mqtt.async_subscribe(
hass, discovery_topic + '/#', async_device_message_received, 0)
return True
| 36.734043 | 79 | 0.646491 | [
"Apache-2.0"
] | arnisoph/home-assistant | homeassistant/components/mqtt/discovery.py | 10,359 | Python |
from lingloss import Lingloss
| 15 | 29 | 0.866667 | [
"MIT"
] | captainalan/linglosspy | __init__.py | 30 | Python |
#!/usr/bin/python
import os
import json
def get_db_config():
# read config file and return data
data = {}
with open('config.json', 'r') as infile:
data = json.loads(infile.read())
return data
| 19.727273 | 44 | 0.631336 | [
"MIT"
] | sjsafranek/pomegranate | Config.py | 217 | Python |
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import re
from datetime import datetime
from uuid import uuid4
from .utils import normalize_package_name
from ..utils import (
create_file,
dir_exists,
ensure_parent_dir_exists,
path_join,
read_file,
read_file_binary,
write_file,
write_file_binary
)
TEMPLATES_DIR = path_join(os.path.dirname(os.path.abspath(__file__)), 'templates')
BINARY_EXTENSIONS = ('.png', )
SIMPLE_NAME = r'^\w+$'
def get_valid_templates():
return sorted(os.listdir(TEMPLATES_DIR))
def construct_template_fields(integration_name, repo_choice, **kwargs):
normalized_integration_name = normalize_package_name(integration_name)
check_name_cap = (
integration_name.capitalize()
if re.match(SIMPLE_NAME, integration_name)
else integration_name
)
if repo_choice == 'core':
author = 'Datadog'
email = '[email protected]'
email_packages = '[email protected]'
install_info = (
'The {check_name_cap} check is included in the [Datadog Agent][2] package, so you do not\n'
'need to install anything else on your server.'.format(check_name_cap=check_name_cap)
)
license_header = (
'# (C) Datadog, Inc. {year}\n'
'# All rights reserved\n'
'# Licensed under a 3-clause BSD style license (see LICENSE)\n'
.format(year=str(datetime.now().year))
)
support_type = 'core'
tox_base_dep = '../datadog_checks_base[deps]'
else:
author = 'U.N. Owen'
email = email_packages = '[email protected]'
install_info = (
'The {} check is not included in the [Datadog Agent][2] package, so you will\n'
'need to install it yourself.'.format(check_name_cap)
)
license_header = ''
support_type = 'contrib'
tox_base_dep = 'datadog-checks-base[deps]'
config = {
'author': author,
'check_class': '{}Check'.format(
''.join(part.capitalize() for part in normalized_integration_name.split('_'))
),
'check_name': normalized_integration_name,
'check_name_cap': check_name_cap,
'email': email,
'email_packages': email_packages,
'guid': uuid4(),
'license_header': license_header,
'install_info': install_info,
'repo_choice': repo_choice,
'support_type': support_type,
'tox_base_dep': tox_base_dep,
}
config.update(kwargs)
return config
def create_template_files(template_name, new_root, config, read=False):
files = []
template_root = path_join(TEMPLATES_DIR, template_name)
if not dir_exists(template_root):
return files
for root, _, template_files in os.walk(template_root):
for template_file in template_files:
template_path = path_join(root, template_file)
file_path = template_path.replace(template_root, '')
file_path = '{}{}'.format(new_root, file_path.format(**config))
files.append(
File(
file_path,
template_path,
config,
read=read
)
)
return files
class File(object):
def __init__(self, file_path, template_path, config, read=False):
self.file_path = file_path
self.template_path = template_path
self.config = config
self.binary = template_path.endswith(BINARY_EXTENSIONS)
self._read = read_file_binary if self.binary else read_file
self._write = write_file_binary if self.binary else write_file
self.contents = None
if read:
self.read()
def read(self):
contents = self._read(self.template_path)
if self.binary:
self.contents = contents
else:
self.contents = contents.format(**self.config)
def write(self):
if self.contents is None:
create_file(self.file_path)
else:
ensure_parent_dir_exists(self.file_path)
self._write(self.file_path, self.contents)
| 30.719424 | 103 | 0.622951 | [
"BSD-3-Clause"
] | zaquaz/integrations-core | datadog_checks_dev/datadog_checks/dev/tooling/create.py | 4,270 | Python |
import numpy as np
import math
from chefboost.training import Training
#from training import Training
def processContinuousFeatures(algorithm, df, column_name, entropy, config):
#if True:
if df[column_name].nunique() <= 20:
unique_values = sorted(df[column_name].unique())
else:
unique_values = []
df_mean = df[column_name].mean()
df_std = df[column_name].std(ddof=0)
df_min = df[column_name].min()
df_max = df[column_name].max()
unique_values.append(df[column_name].min())
unique_values.append(df[column_name].max())
unique_values.append(df[column_name].mean())
scales = list(range(-3,+4, 1))
for scale in scales:
if df_mean + scale * df_std > df_min and df_mean + scale * df_std < df_max:
unique_values.append(df_mean + scale * df_std)
unique_values.sort()
#print(column_name,"->",unique_values)
subset_gainratios = []; subset_gains = []; subset_ginis = []; subset_red_stdevs = []; subset_chi_squares = []
if len(unique_values) == 1:
winner_threshold = unique_values[0]
df[column_name] = np.where(df[column_name] <= winner_threshold, "<="+str(winner_threshold), ">"+str(winner_threshold))
return df
for i in range(0, len(unique_values)-1):
threshold = unique_values[i]
subset1 = df[df[column_name] <= threshold]
subset2 = df[df[column_name] > threshold]
subset1_rows = subset1.shape[0]; subset2_rows = subset2.shape[0]
total_instances = df.shape[0] #subset1_rows+subset2_rows
subset1_probability = subset1_rows / total_instances
subset2_probability = subset2_rows / total_instances
if algorithm == 'ID3' or algorithm == 'C4.5':
threshold_gain = entropy - subset1_probability*Training.calculateEntropy(subset1, config) - subset2_probability*Training.calculateEntropy(subset2, config)
subset_gains.append(threshold_gain)
if algorithm == 'C4.5': #C4.5 also need gain in the block above. That's why, instead of else if we used direct if condition here
threshold_splitinfo = -subset1_probability * math.log(subset1_probability, 2)-subset2_probability*math.log(subset2_probability, 2)
gainratio = threshold_gain / threshold_splitinfo
subset_gainratios.append(gainratio)
elif algorithm == 'CART':
decision_for_subset1 = subset1['Decision'].value_counts().tolist()
decision_for_subset2 = subset2['Decision'].value_counts().tolist()
gini_subset1 = 1; gini_subset2 = 1
for j in range(0, len(decision_for_subset1)):
gini_subset1 = gini_subset1 - math.pow((decision_for_subset1[j]/subset1_rows),2)
for j in range(0, len(decision_for_subset2)):
gini_subset2 = gini_subset2 - math.pow((decision_for_subset2[j]/subset2_rows),2)
gini = (subset1_rows/total_instances)*gini_subset1 + (subset2_rows/total_instances) * gini_subset2
subset_ginis.append(gini)
elif algorithm == "CHAID":
#subset1 = high, subset2 = normal
unique_decisions = df['Decision'].unique() #Yes, No
num_of_decisions = len(unique_decisions) #2
subset1_expected = subset1.shape[0] / num_of_decisions
subset2_expected = subset2.shape[0] / num_of_decisions
chi_square = 0
for d in unique_decisions: #Yes, No
#decision = Yes
subset1_d = subset1[subset1["Decision"] == d] #high, yes
subset2_d = subset2[subset2["Decision"] == d] #normal, yes
subset1_d_chi_square = math.sqrt(((subset1_d.shape[0] - subset1_expected) * (subset1_d.shape[0] - subset1_expected))/subset1_expected)
subset2_d_chi_square = math.sqrt(((subset2_d.shape[0] - subset2_expected) * (subset2_d.shape[0] - subset2_expected))/subset2_expected)
chi_square = chi_square + subset1_d_chi_square + subset2_d_chi_square
subset_chi_squares.append(chi_square)
#----------------------------------
elif algorithm == 'Regression':
superset_stdev = df['Decision'].std(ddof=0)
subset1_stdev = subset1['Decision'].std(ddof=0)
subset2_stdev = subset2['Decision'].std(ddof=0)
threshold_weighted_stdev = (subset1_rows/total_instances)*subset1_stdev + (subset2_rows/total_instances)*subset2_stdev
threshold_reducted_stdev = superset_stdev - threshold_weighted_stdev
subset_red_stdevs.append(threshold_reducted_stdev)
#----------------------------------
if algorithm == "C4.5":
winner_one = subset_gainratios.index(max(subset_gainratios))
elif algorithm == "ID3": #actually, ID3 does not support for continuous features but we can still do it
winner_one = subset_gains.index(max(subset_gains))
elif algorithm == "CART":
winner_one = subset_ginis.index(min(subset_ginis))
elif algorithm == "CHAID":
winner_one = subset_chi_squares.index(max(subset_chi_squares))
elif algorithm == "Regression":
winner_one = subset_red_stdevs.index(max(subset_red_stdevs))
winner_threshold = unique_values[winner_one]
#print(column_name,": ", winner_threshold," in ", unique_values)
#print("theshold is ",winner_threshold," for ",column_name)
df[column_name] = np.where(df[column_name] <= winner_threshold, "<="+str(winner_threshold), ">"+str(winner_threshold))
return df
| 39.18797 | 158 | 0.700691 | [
"MIT"
] | ARTeriosclerosis/chefboost | chefboost/training/Preprocess.py | 5,212 | Python |
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(nn.Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch', deep_stem=True, avg_down=True, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottle2neck):
# dcn in Res2Net bottle2neck is in ModuleList
for n in m.convs:
if hasattr(n, 'conv_offset'):
constant_init(n.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottle2neck):
constant_init(m.norm3, 0)
else:
raise TypeError('pretrained must be a str or None')
| 36.008523 | 79 | 0.527732 | [
"MIT"
] | 007gzs/insightface | detection/scrfd/mmdet/models/backbones/res2net.py | 12,675 | Python |
"""
eveparser.parsers.assets
~~~~~~~~~~~~~~~~~~~~~~~
Parse eve online asset lists. This also invludes inventory listings.
"""
import re
from eveparser.utils import regex_match_lines, f_int
ASSET_LIST_RE = re.compile(r"""^([\S ]*) # name
\t([\d,'\.]*) # quantity
(\t([\S ]*))? # group
(\t([\S ]*))? # category
(\t(XLarge|Large|Medium|Small|))? # size
(\t(High|Medium|Low|Rigs|[\d ]*))? # slot
(\t([\d ,\.]* m3))? # volume
(\t([\d]+|))? # meta level
(\t([\d]+|))?$ # tech level
""", re.X)
def parse_assets(lines):
""" Parse asset list
:param string paste_string: An asset list string
"""
matches, bad_lines = regex_match_lines(ASSET_LIST_RE, lines)
result = [{'name': name,
'quantity': f_int(quantity) or 1,
'group': group,
'category': category,
'size': size,
'slot': slot,
'volume': volume,
'meta_level': meta_level,
'tech_level': tech_level}
for (name,
quantity,
_, group,
_, category,
_, size,
_, slot,
_, volume,
_, meta_level,
_, tech_level) in matches]
return result, bad_lines
| 35.346939 | 79 | 0.363164 | [
"MIT"
] | Nothing4You/eveparser | eveparser/parsers/assets.py | 1,732 | Python |
import os
import platform
import threading
from asyncio import current_task, iscoroutinefunction
from collections.abc import Coroutine
from contextlib import AsyncExitStack
from datetime import datetime, timezone
from functools import partial
from logging import Logger, getLogger
from traceback import format_exc
from typing import Any, Callable, Dict, Optional, Set
from anyio import create_event, create_task_group, open_cancel_scope, run_sync_in_worker_thread
from anyio.abc import CancelScope, Event, TaskGroup
from ..abc import DataStore, Job
from ..events import EventHub, JobDeadlineMissed, JobFailed, JobSuccessful, JobUpdated
class AsyncWorker(EventHub):
"""Runs jobs locally in a task group."""
_task_group: Optional[TaskGroup] = None
_stop_event: Optional[Event] = None
_running: bool = False
_running_jobs: int = 0
_acquire_cancel_scope: Optional[CancelScope] = None
def __init__(self, data_store: DataStore, *, max_concurrent_jobs: int = 100,
identity: Optional[str] = None, logger: Optional[Logger] = None,
run_sync_functions_in_event_loop: bool = True):
super().__init__()
self.data_store = data_store
self.max_concurrent_jobs = max_concurrent_jobs
self.identity = identity or f'{platform.node()}-{os.getpid()}-{threading.get_ident()}'
self.logger = logger or getLogger(__name__)
self.run_sync_functions_in_event_loop = run_sync_functions_in_event_loop
self._acquired_jobs: Set[Job] = set()
self._exit_stack = AsyncExitStack()
if self.max_concurrent_jobs < 1:
raise ValueError('max_concurrent_jobs must be at least 1')
async def __aenter__(self):
await self._exit_stack.__aenter__()
# Initialize the data store
await self._exit_stack.enter_async_context(self.data_store)
# Start the actual worker
self._task_group = create_task_group()
await self._exit_stack.enter_async_context(self._task_group)
start_event = create_event()
await self._task_group.spawn(self.run, start_event)
await start_event.wait()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.stop(force=exc_type is not None)
await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb)
async def run(self, start_event: Optional[Event] = None) -> None:
self._stop_event = create_event()
self._running = True
if start_event:
await start_event.set()
while self._running:
limit = self.max_concurrent_jobs - self._running_jobs
jobs = []
async with open_cancel_scope() as self._acquire_cancel_scope:
try:
jobs = await self.data_store.acquire_jobs(self.identity, limit)
finally:
del self._acquire_cancel_scope
for job in jobs:
await self._task_group.spawn(self._run_job, job)
await self._stop_event.set()
del self._stop_event
del self._task_group
async def _run_job(self, job: Job) -> None:
# Check if the job started before the deadline
now = datetime.now(timezone.utc)
if job.start_deadline is not None:
if now.timestamp() > job.start_deadline.timestamp():
self.logger.info('Missed the deadline of job %r', job.id)
event = JobDeadlineMissed(
now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,
scheduled_fire_time=job.scheduled_fire_time, start_time=now,
start_deadline=job.start_deadline
)
await self.publish(event)
return
# Set the job as running and publish a job update event
self.logger.info('Started job %r', job.id)
job.started_at = now
event = JobUpdated(
timestamp=now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id
)
await self.publish(event)
self._running_jobs += 1
try:
return_value = await self._call_job_func(job.func, job.args, job.kwargs)
except BaseException as exc:
self.logger.exception('Job %r raised an exception', job.id)
event = JobFailed(
timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,
schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,
start_time=now, start_deadline=job.start_deadline,
traceback=format_exc(), exception=exc
)
else:
self.logger.info('Job %r completed successfully', job.id)
event = JobSuccessful(
timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,
schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,
start_time=now, start_deadline=job.start_deadline, return_value=return_value
)
self._running_jobs -= 1
await self.data_store.release_jobs(self.identity, [job])
await self.publish(event)
async def _call_job_func(self, func: Callable, args: tuple, kwargs: Dict[str, Any]):
if not self.run_sync_functions_in_event_loop and not iscoroutinefunction(func):
wrapped = partial(func, *args, **kwargs)
return await run_sync_in_worker_thread(wrapped)
return_value = func(*args, **kwargs)
if isinstance(return_value, Coroutine):
return_value = await return_value
return return_value
async def stop(self, force: bool = False) -> None:
self._running = False
if self._acquire_cancel_scope:
await self._acquire_cancel_scope.cancel()
if force and self._task_group:
await self._task_group.cancel_scope.cancel()
async def wait_until_stopped(self) -> None:
if self._stop_event:
await self._stop_event.wait()
| 40.278146 | 95 | 0.65735 | [
"MIT"
] | Ghosque/apscheduler | apscheduler/workers/async_.py | 6,082 | Python |
#!/usr/bin/env python3
import json
import os, sys, os.path
import string
from configparser import ConfigParser
J2_CONF_PATH='autobuild/configs/'
def write_file(filename, data):
with open(filename, "w") as fname:
json.dump(data, fname, indent = 4)
return
def get_wallet_conf(path):
wallet_conf_parser = ConfigParser()
with open(path) as wallet_stream:
wallet_conf_parser.read_string("[top]\n" + wallet_stream.read())
return dict(wallet_conf_parser.items('top'))
def get_xbridge_conf(path, ticker):
xbridge_conf_parser = ConfigParser()
xbridge_conf_parser.read(path)
return dict(xbridge_conf_parser.items(ticker))
with open('manifest.json') as json_file:
data = json.load(json_file)
tickers = list(set([chain['ticker'] for chain in data]))
tickers.sort(key = lambda t:t, reverse = False)
for ticker in tickers:
chains = [chain for chain in data if chain['ticker'] == ticker]
chains.sort(key = lambda c:c['ver_id'], reverse = False)
template_data = {}
# get latest version
latest_version_chain = chains[-1]
xbridge_conf_data = get_xbridge_conf('xbridge-confs/' + latest_version_chain['xbridge_conf'], latest_version_chain['ticker'])
wallet_conf_data = get_wallet_conf('wallet-confs/' + latest_version_chain['wallet_conf'])
template_data['Title'] = xbridge_conf_data['title']
template_data['Address'] = xbridge_conf_data['address']
template_data['Ip'] = xbridge_conf_data['ip']
template_data['rpcPort'] = '{{ rpcPort|default(' + wallet_conf_data['rpcport'] + ')}}'
template_data['p2pPort'] = '{{ p2pPort|default(' + wallet_conf_data['port'] + ')}}'
template_data['Username'] = '{{ rpcusername }}'
template_data['Password'] = '{{ rpcpassword }}'
if 'addressprefix' in xbridge_conf_data:
template_data['AddressPrefix'] = xbridge_conf_data['addressprefix']
if 'scriptprefix' in xbridge_conf_data:
template_data['ScriptPrefix'] = xbridge_conf_data['scriptprefix']
if 'secretprefix' in xbridge_conf_data:
template_data['SecretPrefix'] = xbridge_conf_data['secretprefix']
if 'coin' in xbridge_conf_data:
template_data['COIN'] = xbridge_conf_data['coin']
if 'minimumamount' in xbridge_conf_data:
template_data['MinimumAmount'] = xbridge_conf_data['minimumamount']
if 'dustamount' in xbridge_conf_data:
template_data['DustAmount'] = xbridge_conf_data['dustamount']
if 'createtxmethod' in xbridge_conf_data:
template_data['CreateTxMethod'] = xbridge_conf_data['createtxmethod']
if 'getnewkeysupported' in xbridge_conf_data:
template_data['GetNewKeySupported'] = xbridge_conf_data['getnewkeysupported']
if 'importwithnoscansupported' in xbridge_conf_data:
template_data['ImportWithNoScanSupported'] = xbridge_conf_data['importwithnoscansupported']
if 'mintxfee' in xbridge_conf_data:
template_data['MinTxFee'] = xbridge_conf_data['mintxfee']
if 'blocktime' in xbridge_conf_data:
template_data['BlockTime'] = xbridge_conf_data['blocktime']
if 'txversion' in xbridge_conf_data:
template_data['TxVersion'] = xbridge_conf_data['txversion']
if 'feeperbyte' in xbridge_conf_data:
template_data['FeePerByte'] = xbridge_conf_data['feeperbyte']
if 'confirmations' in xbridge_conf_data:
template_data['Confirmations'] = xbridge_conf_data['confirmations']
coin_base_j2_data_versions = {}
for chain in chains:
wallet_conf_data = get_wallet_conf('wallet-confs/' + chain['wallet_conf'])
xbridge_conf_data = get_xbridge_conf('xbridge-confs/' + chain['xbridge_conf'], chain['ticker'])
# get first of versions list of chain
version = chain['versions'][0]
coin_base_j2_data_versions[version] = {
'legacy': 'addresstype' in wallet_conf_data,
'deprecatedrpc': 'deprecatedrpc' in wallet_conf_data,
'xbridge_conf': chain['xbridge_conf'],
'wallet_conf': chain['wallet_conf'],
'GetNewKeySupported': 'GetNewKeySupported' in xbridge_conf_data
}
template_data['versions'] = coin_base_j2_data_versions
template = {}
template[ticker] = template_data
write_file(J2_CONF_PATH + chain['ticker'].lower() + '.base.j2', template)
print(','.join(tickers)) | 45.460784 | 133 | 0.659694 | [
"MIT"
] | mastrip2/blockchain-configuration-files | autobuild/create-j2-confs.py | 4,637 | Python |
from electrum.plugins import hook
from .digitalbitbox import DigitalBitboxPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(DigitalBitboxPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
| 29.416667 | 57 | 0.736544 | [
"MIT"
] | AlecZadikian9001/electrum | plugins/digitalbitbox/cmdline.py | 353 | Python |
"""
interface.py
DNAC parsers for the following show commands:
* /dna/intent/api/v1/interface
"""
import os
import logging
import pprint
import re
import unittest
from genie import parsergen
from collections import defaultdict
from ats.log.utils import banner
from genie.metaparser import MetaParser
from genie.metaparser.util import merge_dict, keynames_convert
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
logger = logging.getLogger(__name__)
# ============================================
# Schema for '/dna/intent/api/v1/interface'
# ============================================
class InterfaceSchema(MetaParser):
"""schema for /dna/intent/api/v1/interface, /dna/intent/api/v1/interface/{interface}"""
schema = {
Any(): {
"adminStatus": str,
Optional("className"): str,
Optional("description"): str,
"deviceId": str,
Optional("duplex"): str,
Optional("id"): str,
"ifIndex": str,
Optional("instanceTenantId"): str,
Optional("instanceUuid"): str,
"interfaceType": str,
Optional("ipv4Address"): str,
Optional("ipv4Mask"): str,
"isisSupport": str,
"lastUpdated": str,
Optional("macAddress"): str,
Optional("mappedPhysicalInterfaceId"): str,
Optional("mappedPhysicalInterfaceName"): str,
Optional("mediaType"): str,
Optional("nativeVlanId"): str,
"ospfSupport": str,
"pid": str,
"portMode": str,
"portName": str,
Optional("portType"): str,
"serialNo": str,
"series": str,
Optional("speed"): str,
"status": str,
Optional("vlanId"): str,
Optional("voiceVlan"): str
}
}
# ============================================
# Parser for '/dna/intent/api/v1/interface'
# ============================================
class Interface(InterfaceSchema):
"""parser for /dna/intent/api/v1/interface, /dna/intent/api/v1/interface/{interface}"""
cli_command = ['/dna/intent/api/v1/interface', '/dna/intent/api/v1/interface/{interface}']
def cli(self,interface="", output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.get(cmd).json()['response']
else:
out = output
result_dict={}
for intf_dict in out:
# remove None values
result_dict[intf_dict['portName']] = {k: v for k, v in intf_dict.items() if v is not None}
return result_dict
| 35.806122 | 102 | 0.455685 | [
"Apache-2.0"
] | devbollinger/genieparser | src/genie/libs/parser/dnac/interface.py | 3,509 | Python |
###############################################################################
# Name: __init__.py #
# Purpose: Import the required base modules needed for launching Editra into #
# into the namespace. #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2007 Cody Precord <[email protected]> #
# Licence: wxWindows Licence #
###############################################################################
"""Main package module"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: __init__.py 49807 2007-11-10 07:08:33Z CJP $"
__revision__ = "$Revision: 49807 $"
| 58.357143 | 79 | 0.383109 | [
"MIT"
] | CrankySupertoon01/Toontown-2 | dependencies/panda/Panda3D-1.10.0-x64/python/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/__init__.py | 817 | Python |
import pytest
from temp import (download_to_file, ensure_datafile, records_from_lines, make_record, make_value, min_spread_record,
min_spread_day_num, parse_header)
def test_download_to_file(tmpdir):
file = tmpdir.join('test.txt')
download_to_file(file.strpath, 'https://httpbin.org/get?testParam=1')
assert 'testParam' in file.read()
def test_ensure_datafile_downloads(tmpdir):
file = tmpdir.join('test.txt')
ensure_datafile(file.strpath, 'https://httpbin.org/get?testParam=1')
assert 'testParam' in file.read()
def test_ensure_datafile_uses_existing(tmpdir):
file = tmpdir.join('test.txt')
file.write('content')
ensure_datafile(file.strpath, 'https://httpbin.org/get?testParam=1')
assert file.read() == 'content'
def test_make_record():
header = {'One': (0, 1), 'Two': (3, 3), 'Three': (7, 2), 'Four': (10, 4)}
line = "1 2.2 3* FOUR"
rv = make_record(header, line)
assert set(rv.keys()) == set(header)
assert all(list(rv.values()))
def test_parse_header_rv():
rv = parse_header(" One Two Three")
assert len(rv) == 3
assert rv['One'] == (1, 4)
assert rv['Two'] == (5, 4)
assert rv['Three'] == (9, 6)
def test_make_value():
assert make_value('123') == 123
assert make_value('ASDF') == 'ASDF'
assert make_value('123.45') == 123.45
assert make_value('123*') == 123
assert make_value(' ') == None
def test_records_from_lines_skips_empty():
lines = iter(['One', ' ', 'Two'])
assert len(list(records_from_lines(lines))) == 1
def test_min_spread_record_rv():
records = [
{'max': 10, 'min': 0},
{'max': 1, 'min': 0}, # this one should be returned
{'max': None, 'min': None}
]
assert min_spread_record(records, 'max', 'min') == {'max': 1, 'min': 0}
def test_min_spread_day_num_rv():
records = [
{'Dy': 1, 'MxT': 10, 'MnT': 0},
{'Dy': 2, 'MxT': 5, 'MnT': 0},
]
rv = min_spread_day_num(records)
assert rv == 2
| 28.366197 | 116 | 0.619662 | [
"MIT"
] | notapresent/codekata | python/kata04/test_temp.py | 2,014 | Python |
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_splash import SplashRequest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
import requests
class A55CrawlSpider(CrawlSpider):
name = '55_crawl'
allowed_domains = ['fsx.sxxz.gov.cn']
# start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/xxgkzn/']
start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/wj/']
# start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/jgsz_6342/']
rules = (
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul//li'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[1]/dl//dd'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[5]/dl//dd'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[10]/dl//dd'), follow=True),
Rule(LinkExtractor(allow=r'/\d+/t\d+_\d+\.html'), callback='parse_item', follow=True),
Rule(LinkExtractor(restrict_xpaths='//*[@id="searchsection"]/div[2]/a[3]'), follow=True),
Rule(LinkExtractor(allow=r'index_\d+\.html/'), follow=True),
)
# def start_requests(self):
# for url in self.start_urls:
# yield scrapy.Request(url)
def _build_request(self, rule, link):
r = SplashRequest(url=link.url, callback=self._response_downloaded, args={"wait": 0.5})
r.meta.update(rule=rule, link_text=link.text)
return r
def _requests_to_follow(self, response):
# if not isinstance(response, HtmlResponse):
# return
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link)
yield rule.process_request(r)
def parse_item(self, response):
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[2]/div/div/div[1]/p').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[2]/div/div/div[1]/h2[1]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-con"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[2]/div/div/div[2]/p').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[2]/div/div/div[2]/h2/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-con"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[3]/div[2]/div/div[1]/ul/li[8]').extract_first()
date = re.search(r"(\d{4}年\d{2}月\d{2}日)", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[3]/div[2]/div/div[1]/ul/li[6]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-body"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
pass
| 41.701923 | 121 | 0.572516 | [
"Apache-2.0"
] | senlyu163/crawler | spiders/a55_crawl.py | 4,343 | Python |
from sys import argv
from requests import get
import re
app_name = argv[1]
result = get('https://nvd.nist.gov/view/vuln/search-results?query={0}'.format(app_name))
cves = re.findall(r"CVE-\d{4}-\d+", result.text)
for cve in reversed(cves):
result = get('https://nvd.nist.gov/vuln/detail/' + cve)
cpes = re.findall(r">(cpe.*?)</a>", result.text)
if cpes:
print("{0}.tb:{1}".format(app_name, cpes[0]))
break
| 27.0625 | 88 | 0.642032 | [
"Apache-2.0"
] | FCG-LLC/aucote | scripts/what_web_fetch_cpes.py | 433 | Python |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SpmHistoryEndHostHistoryGridRemote(RemoteModel):
"""
This table lists the end host history within the user specified period of time for a given end host.
| ``id:`` The internal NetMRI identifier of the grid entry.
| ``attribute type:`` number
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this end host.
| ``attribute type:`` datetime
| ``LastSeen:`` The timestamp of when NetMRI last polled data from this end host.
| ``attribute type:`` datetime
| ``HostIPNumeric:`` The numerical value of the end host IP address.
| ``attribute type:`` number
| ``HostIPAddress:`` The management IP address of the end host, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``HostName:`` The NetMRI name of the end host; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``DeviceID:`` The NetMRI internal identifier for the switch.
| ``attribute type:`` number
| ``DeviceType:`` The NetMRI-determined device type of the switch.
| ``attribute type:`` string
| ``DeviceName:`` The NetMRI name of the switch; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``InterfaceID:`` The internal NetMRI identifier for the interface on the switch configured with this address.
| ``attribute type:`` number
| ``ifIndex:`` The SNMP interface index of the interface on the switch configured with this address.
| ``attribute type:`` string
| ``Interface:`` The interface on the switch configured with this address.
| ``attribute type:`` string
| ``ifMAC:`` The interface Media Access Controller (MAC) address of this interface.
| ``attribute type:`` string
| ``ifOperStatus:`` The operational status (up/down) of this interface.
| ``attribute type:`` string
| ``VlanIndex:`` The numerical VLAN number (VLAN ID).
| ``attribute type:`` number
| ``VlanName:`` The name of the VLAN on the root bridge.
| ``attribute type:`` string
| ``VlanID:`` The internal NetMRI identifier of the VLAN.
| ``attribute type:`` number
| ``VTPDomain:`` Management domain name if VLAN is VTP managed.
| ``attribute type:`` string
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
| ``VirtualNetworkID:`` Internal identifier for the network view.
| ``attribute type:`` number
| ``HostMAC:`` The MAC Address of the end host.
| ``attribute type:`` string
"""
properties = ("id",
"FirstSeen",
"LastSeen",
"HostIPNumeric",
"HostIPAddress",
"HostName",
"DeviceID",
"DeviceType",
"DeviceName",
"InterfaceID",
"ifIndex",
"Interface",
"ifMAC",
"ifOperStatus",
"VlanIndex",
"VlanName",
"VlanID",
"VTPDomain",
"Network",
"VirtualNetworkID",
"HostMAC",
)
| 30.857143 | 157 | 0.566176 | [
"Apache-2.0"
] | IngmarVG-IB/infoblox-netmri | infoblox_netmri/api/remote/models/spm_history_end_host_history_grid_remote.py | 3,672 | Python |
# -*- Python -*-
import os
import platform
import re
import lit.formats
# Get shlex.quote if available (added in 3.3), and fall back to pipes.quote if
# it's not available.
try:
import shlex
sh_quote = shlex.quote
except:
import pipes
sh_quote = pipes.quote
def get_required_attr(config, attr_name):
attr_value = getattr(config, attr_name, None)
if attr_value == None:
lit_config.fatal(
"No attribute %r in test configuration! You may need to run "
"tests from your build directory or add this attribute "
"to lit.site.cfg.py " % attr_name)
return attr_value
# Setup config name.
config.name = 'MemProfiler' + config.name_suffix
# Platform-specific default MEMPROF_OPTIONS for lit tests.
default_memprof_opts = list(config.default_sanitizer_opts)
default_memprof_opts_str = ':'.join(default_memprof_opts)
if default_memprof_opts_str:
config.environment['MEMPROF_OPTIONS'] = default_memprof_opts_str
config.substitutions.append(('%env_memprof_opts=',
'env MEMPROF_OPTIONS=' + default_memprof_opts_str))
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
libdl_flag = '-ldl'
# Setup default compiler flags used with -fmemory-profile option.
# FIXME: Review the set of required flags and check if it can be reduced.
target_cflags = [get_required_attr(config, 'target_cflags')]
target_cxxflags = config.cxx_mode_flags + target_cflags
clang_memprof_static_cflags = (['-fmemory-profile',
'-mno-omit-leaf-frame-pointer',
'-fno-omit-frame-pointer',
'-fno-optimize-sibling-calls'] +
config.debug_info_flags + target_cflags)
clang_memprof_static_cxxflags = config.cxx_mode_flags + clang_memprof_static_cflags
memprof_dynamic_flags = []
if config.memprof_dynamic:
memprof_dynamic_flags = ['-shared-libsan']
config.available_features.add('memprof-dynamic-runtime')
else:
config.available_features.add('memprof-static-runtime')
clang_memprof_cflags = clang_memprof_static_cflags + memprof_dynamic_flags
clang_memprof_cxxflags = clang_memprof_static_cxxflags + memprof_dynamic_flags
def build_invocation(compile_flags):
return ' ' + ' '.join([config.clang] + compile_flags) + ' '
config.substitutions.append( ("%clang ", build_invocation(target_cflags)) )
config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) )
config.substitutions.append( ("%clang_memprof ", build_invocation(clang_memprof_cflags)) )
config.substitutions.append( ("%clangxx_memprof ", build_invocation(clang_memprof_cxxflags)) )
if config.memprof_dynamic:
shared_libmemprof_path = os.path.join(config.compiler_rt_libdir, 'libclang_rt.memprof{}.so'.format(config.target_suffix))
config.substitutions.append( ("%shared_libmemprof", shared_libmemprof_path) )
config.substitutions.append( ("%clang_memprof_static ", build_invocation(clang_memprof_static_cflags)) )
config.substitutions.append( ("%clangxx_memprof_static ", build_invocation(clang_memprof_static_cxxflags)) )
# Some tests uses C++11 features such as lambdas and need to pass -std=c++11.
config.substitutions.append(("%stdcxx11 ", '-std=c++11 '))
config.substitutions.append( ("%libdl", libdl_flag) )
config.available_features.add('memprof-' + config.bits + '-bits')
config.available_features.add('fast-unwinder-works')
# Set LD_LIBRARY_PATH to pick dynamic runtime up properly.
new_ld_library_path = os.path.pathsep.join(
(config.compiler_rt_libdir, config.environment.get('LD_LIBRARY_PATH', '')))
config.environment['LD_LIBRARY_PATH'] = new_ld_library_path
# Default test suffixes.
config.suffixes = ['.c', '.cpp']
config.substitutions.append(('%fPIC', '-fPIC'))
config.substitutions.append(('%fPIE', '-fPIE'))
config.substitutions.append(('%pie', '-pie'))
# Only run the tests on supported OSs.
if config.host_os not in ['Linux']:
config.unsupported = True
if not config.parallelism_group:
config.parallelism_group = 'shadow-memory'
| 38.653846 | 123 | 0.746766 | [
"Apache-2.0"
] | AaronBallman/llvm | compiler-rt/test/memprof/lit.cfg.py | 4,020 | Python |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Rhilip <[email protected]>
import re
import time
from flask import Blueprint, request, jsonify
from app import mysql, app, cache
from pymysql import escape_string
ptboard_blueprint = Blueprint('ptboard', __name__)
search_default = app.config.get("PTBOARD_SEARCH", "")
site_default = app.config.get("PTBOARD_SITE", "")
no_site_default = app.config.get("PTBOARD_NO_SITE", "")
order_default = app.config.get("PTBOARD_ORDER", "desc")
limit_default = app.config.get("PTBOARD_LIMIT", 100)
offset_default = app.config.get("PTBOARD_OFFSET", 0)
start_time_default = app.config.get("PTBOARD_START_TIME", 0)
end_time_default = app.config.get("PTBOARD_END_TIME", "CURRENT_TIMESTAMP")
predb_prefix = "https://trace.corrupt-net.org/?q="
def recover_int_to_default(value, default):
try:
ret = int(value)
except(ValueError, TypeError):
ret = default
return ret
def warp_str(string):
return "({})".format(string)
@ptboard_blueprint.route("/ptboard", methods=["GET"])
def ptboard():
t0 = time.time()
ret = {
"success": False,
"error": None
}
token = request.args.get("token") or ""
@cache.memoize(timeout=86400)
def token_valid(token_):
if len(token_) != 32:
return False
row, data = mysql.exec("SELECT * FROM `api`.`ptboard_token` WHERE token = %s", token_, ret_row=True)
if row > 0:
return True
else:
return False
if not token_valid(token):
ret["error"] = "Token is not exist."
return jsonify(ret)
mysql.exec('UPDATE `api`.`ptboard_token` set `useage_count` = `useage_count` + 1 WHERE token = %s', (token,))
# 1. Get user requests
search_raw = request.args.get("search") or search_default
order_raw = request.args.get("order") or order_default
site_raw = request.args.get("site") or site_default
no_site_raw = request.args.get("no_site") or no_site_default
limit = request.args.get("limit") or limit_default
offset = request.args.get("offset") or offset_default
start_time = request.args.get("start_time") or start_time_default
end_time = request.args.get("end_time") or end_time_default
# 2. Clean user requests
search = re.sub(r"[ _\-,.+]", " ", search_raw)
search = search.split()
search = list(filter(lambda l: len(l) > 1, search)) # Remove those too short letter
search = search[:10]
search_opt = site_opt = no_site_opt = "1=1"
if search:
search_opt = warp_str(" AND ".join(map(lambda i: "title LIKE '%{}%'".format(escape_string(i)), search)))
start_time = recover_int_to_default(start_time, start_time_default)
end_time = recover_int_to_default(end_time, end_time_default)
time_opt = warp_str("ptboard_record.pubDate BETWEEN {start} AND {end}".format(start=start_time, end=end_time))
@cache.cached(timeout=86400)
def get_site_list():
return [i[0] for i in mysql.exec("SELECT `site` FROM `api`.`ptboard_site`", fetch_all=True)]
site_list = get_site_list()
site = list(filter(lambda i: i in site_list, site_raw.split(",")))
no_site = list(filter(lambda i: i in site_list, no_site_raw.split(",")))
if site:
site_opt = warp_str(" OR ".join(["ptboard_record.site = '{site}'".format(site=s) for s in site]))
if no_site:
no_site_opt = warp_str(" AND ".join(["ptboard_record.site != '{site}'".format(site=s) for s in no_site]))
limit = recover_int_to_default(limit, limit_default)
offset = recover_int_to_default(offset, offset_default)
if limit > 200:
limit = 200
order = "desc" if order_raw.lower() not in ["desc", "asc"] else order_raw
# 3. Get response data from Database
opt = " AND ".join([time_opt, site_opt, no_site_opt, search_opt])
sql = ("SELECT ptboard_record.sid, ptboard_site.site, ptboard_record.title, "
"concat(ptboard_site.torrent_prefix,ptboard_record.sid, ptboard_site.torrent_suffix) AS link, "
"ptboard_record.pubDate FROM api.ptboard_record "
"INNER JOIN api.ptboard_site ON api.ptboard_site.site = api.ptboard_record.site "
"WHERE {opt} ORDER BY `pubDate` {_da} "
"LIMIT {_offset}, {_limit}".format(opt=opt, _da=order.upper(), _offset=offset, _limit=limit)
)
record_count, rows_data = mysql.exec(sql=sql, r_dict=True, fetch_all=True, ret_row=True)
# 4. Sort Response data
if app.config.get("DEBUG"):
ret["sql"] = sql
def fix_predb(d: dict):
if d["site"] == "PreDB":
d["link"] = predb_prefix + d["title"].split(" | ")[1]
return d
ret.update({
"success": True,
"rows": list(map(fix_predb, rows_data)),
"total": record_count if search else mysql.exec("SELECT count(*) FROM `api`.`ptboard_record`")[0],
})
ret["cost"] = time.time() - t0
return jsonify(ret)
| 35.669065 | 114 | 0.656313 | [
"MIT"
] | Rhilip/PT-help | modules/ptboard/__init__.py | 4,958 | Python |
import os
import unittest
from unit import Unit
from unit.models.authorization import *
class AuthorizationsE2eTests(unittest.TestCase):
token = os.environ.get("token")
client = Unit("https://api.s.unit.sh", token)
def test_list_and_get_authorization(self):
authorizations = self.client.authorizations.list()
for authorization in authorizations.data:
response = self.client.authorizations.get(authorization.id)
self.assertTrue(response.data.type == "authorization")
def test_list_with_parameters(self):
params = AuthorizationListParams(10, 0, "", "49423")
authorizations = self.client.authorizations.list(params)
for authorization in authorizations.data:
response = self.client.authorizations.get(authorization.id)
self.assertTrue(response.data.type == "authorization")
def test_list_with_wrong_parameters(self):
params = AuthorizationListParams(10, 0, "", "-1")
response = self.client.authorizations.list(params)
self.assertTrue(response.data == [])
if __name__ == '__main__':
unittest.main()
| 34.484848 | 71 | 0.695958 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | unit-finance/unit-python-sdk | e2e_tests/authorization_test.py | 1,138 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for zones."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core.console import console_io
class ZoneResourceFetcher(object):
"""A (small) collection of utils for working with zones."""
def __init__(self, compute_client):
"""Instantiate ZoneResourceFetcher and embed all required data into it.
ZoneResourceFetcher is a class depending on "base_classes"
class layout (properties side-derived from one of base_class class). This
function can be used to avoid unfeasible inheritance and use composition
instead when refactoring away from base_classes into stateless style.
This constructor embeds following properties into ZoneResourceFetcher
instance:
- compute
- messages
- http
- batch_url
Example:
compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = compute_holder.client
zone_resource_fetcher = ZoneResourceFetcher(client)
or
zone_resource_fetcher = ZoneResourceFetcher(self.compute_client)
to use in a class derived from some of base_classes
zone_resource_fetcher.WarnForZonalCreation(...)
Args:
compute_client: compute_holder.client
"""
self._compute = compute_client.apitools_client
self._messages = compute_client.messages
self._http = compute_client.apitools_client.http
self._batch_url = compute_client.batch_url
def GetZones(self, resource_refs):
"""Fetches zone resources."""
errors = []
requests = []
zone_names = set()
for resource_ref in resource_refs:
if resource_ref.zone not in zone_names:
zone_names.add(resource_ref.zone)
requests.append((
self._compute.zones,
'Get',
self._messages.ComputeZonesGetRequest(
project=resource_ref.project,
zone=resource_ref.zone)))
res = list(request_helper.MakeRequests(
requests=requests,
http=self._http,
batch_url=self._batch_url,
errors=errors))
if errors:
return None
else:
return res
def WarnForZonalCreation(self, resource_refs):
"""Warns the user if a zone has upcoming deprecation."""
zones = self.GetZones(resource_refs)
if not zones:
return
prompts = []
zones_with_deprecated = []
for zone in zones:
if zone.deprecated:
zones_with_deprecated.append(zone)
if not zones_with_deprecated:
return
if zones_with_deprecated:
phrases = []
if len(zones_with_deprecated) == 1:
phrases = ('zone is', 'this zone', 'the')
else:
phrases = ('zones are', 'these zones', 'their')
title = ('\n'
'WARNING: The following selected {0} deprecated.'
' All resources in {1} will be deleted after'
' {2} turndown date.'.format(phrases[0], phrases[1], phrases[2]))
printable_deprecated_zones = []
for zone in zones_with_deprecated:
if zone.deprecated.deleted:
printable_deprecated_zones.append(('[{0}] {1}').format(zone.name,
zone.deprecated
.deleted))
else:
printable_deprecated_zones.append('[{0}]'.format(zone.name))
prompts.append(utils.ConstructList(title, printable_deprecated_zones))
final_message = ' '.join(prompts)
if not console_io.PromptContinue(message=final_message):
raise calliope_exceptions.ToolException('Creation aborted by user.')
| 35.0625 | 80 | 0.679367 | [
"MIT"
] | bopopescu/JobSniperRails | gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/zone_utils.py | 4,488 | Python |
"""
Implementation of gaussian filter algorithm
"""
from cv2 import imread, cvtColor, COLOR_BGR2GRAY, imshow, waitKey
from numpy import pi, mgrid, exp, square, zeros, ravel, dot, uint8
from itertools import product
def gen_gaussian_kernel(k_size, sigma):
center = k_size // 2
x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma)))
return g
def gaussian_filter(image, k_size, sigma):
height, width = image.shape[0], image.shape[1]
# dst image height and width
dst_height = height - k_size + 1
dst_width = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
image_array = zeros((dst_height * dst_width, k_size * k_size))
row = 0
for i, j in product(range(dst_height), range(dst_width)):
window = ravel(image[i : i + k_size, j : j + k_size])
image_array[row, :] = window
row += 1
# turn the kernel into shape(k*k, 1)
gaussian_kernel = gen_gaussian_kernel(k_size, sigma)
filter_array = ravel(gaussian_kernel)
# reshape and get the dst image
dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8)
return dst
if __name__ == "__main__":
# read original image
img = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
gray = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
gaussian3x3 = gaussian_filter(gray, 3, sigma=1)
gaussian5x5 = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussian3x3)
imshow("gaussian filter with 5x5 mask", gaussian5x5)
waitKey()
| 32.537037 | 85 | 0.669892 | [
"MIT"
] | 13261922481/Python | digital_image_processing/filters/gaussian_filter.py | 1,757 | Python |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import re
import tensorflow as tf
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_graph(model_file, output_nodes_for_freeze=None):
is_meta = os.path.splitext(model_file)[-1] == ".meta"
tf.compat.v1.reset_default_graph()
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef() if not is_meta else tf.compat.v1.MetaGraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
nodes_to_clear_device = graph_def.node if isinstance(graph_def, tf.compat.v1.GraphDef) else graph_def.graph_def.node
for node in nodes_to_clear_device:
node.device = ""
if is_meta:
with tf.compat.v1.Session() as sess:
restorer = tf.compat.v1.train.import_meta_graph(graph_def)
restorer.restore(sess, re.sub('\.meta$', '', model_file))
graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, graph_def.graph_def, output_nodes_for_freeze)
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
def collect_tf_references(model_path, feed_dict, out_layer, output_nodes_for_freeze=None):
_feed_dict = dict()
graph = load_graph(model_path, output_nodes_for_freeze)
output_tensors_list = list()
outputs_list = list()
for input in feed_dict:
input_node = [node for node in graph.as_graph_def().node if node.name == input][0]
if input_node.op == "Placeholder":
tensor = graph.get_tensor_by_name(input + ":0")
_feed_dict[tensor] = feed_dict[input]
else:
for parrent_input in input_node.input:
in_node = [node for node in graph.as_graph_def().node if node.name == parrent_input][0]
if in_node.op in ['Const', 'Assign', 'NoOp', 'Assert']:
continue
else:
tensor = graph.get_tensor_by_name(parrent_input + ":0")
_feed_dict[tensor] = feed_dict[input]
for output in out_layer:
tensor = graph.get_tensor_by_name(output + ":0")
output_tensors_list.append(tensor)
outputs_list.append(output)
with graph.as_default():
with tf.compat.v1.Session(graph=graph) as sess:
outputs = sess.run(output_tensors_list, feed_dict=_feed_dict)
out_dict = dict(zip(outputs_list, outputs))
return out_dict
def children(op, graph):
op = graph.get_operation_by_name(op)
return set(op for out in op.outputs for op in out.consumers())
def summarize_graph(model_path, output_nodes_for_freeze=None, reshape_net=None):
placeholders = dict()
variables = list()
outputs = list()
graph = load_graph(model_path, output_nodes_for_freeze)
unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Placeholder', 'Assert', 'switch_t', 'switch_f']
for node in graph.as_graph_def().node:
if node.op == 'Placeholder':
node_dict = dict()
node_dict['type'] = tf.DType(node.attr['dtype'].type).name
node_dict['shape'] = str(node.attr['shape'].shape.dim).replace('\n', '').replace(' ', '').replace(
'size:', '').replace('[', '').replace(']', '')
node_dict['shape'] = tuple(map(lambda x: int(x), node_dict['shape'].split(',')))
placeholders[node.name] = node_dict
if node.op == "Variable" or node.op == "VariableV2":
variables.append(node.name)
if len(children(node.name, graph)) == 0:
if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types:
outputs.append(node.name)
result = dict()
result['inputs'] = placeholders
result['outputs'] = outputs
if reshape_net:
out_layer = list(result['inputs'].keys()) + result['outputs']
feed_dict = {}
for inputl in reshape_net:
feed_dict.update({inputl: np.ones(shape=reshape_net[inputl])})
scoring_res = collect_tf_references(model_path=model_path, feed_dict=feed_dict, out_layer=out_layer)
for layer in scoring_res:
if layer in result['inputs']:
result['inputs'][layer]['shape'] = scoring_res[layer].shape
return result
| 39.336364 | 130 | 0.645251 | [
"Apache-2.0"
] | AkillesAILimited/openvino | tests/layer_tests/common/utils/tf_utils.py | 4,327 | Python |
# content of test_electricity.py
from premise import DATA_DIR
from premise.electricity import Electricity
from premise.data_collection import IAMDataCollection
REGION_MAPPING_FILEPATH = (DATA_DIR / "regionmappingH12.csv")
PRODUCTION_PER_TECH = (DATA_DIR / "electricity" / "electricity_production_volumes_per_tech.csv")
LOSS_PER_COUNTRY = (DATA_DIR / "electricity" / "losses_per_country.csv")
LHV_FUELS = (DATA_DIR / "fuels_lower_heating_value.txt")
def get_db():
dummy_db = [{
'name': 'fake activity',
'reference product': 'fake product',
'location': 'IAI Area, Africa',
'unit': 'kilogram',
'exchanges': [
{'name': 'fake activity',
'product': 'fake product',
'amount': 1,
'type': 'production',
'unit': 'kilogram',
'input': ('dummy_db', '6543541'), },
{'name': '1,4-Butanediol',
'categories': ('air', 'urban air close to ground'),
'amount': 1,
'type': 'biosphere',
'unit': 'kilogram',
'input': ('dummy_bio', '123'),
},
]
}]
version = 3.5
return dummy_db, version
rdc = IAMDataCollection(model="remind", pathway='SSP2-Base', year=2012, filepath_iam_files=DATA_DIR / "iam_output_files")
db, _ = get_db()
el = Electricity(db=db, iam_data=rdc, model="remind", pathway='SSP2-Base', year=2012)
def test_losses():
assert len(el.losses) == 174
assert el.losses['AL']['Production volume'] == 7630
def test_fuels_lhv():
assert float(el.fuels_lhv['hard coal']) == 20.1
def test_powerplant_map():
s = el.powerplant_map['Biomass IGCC CCS']
assert isinstance(s, set)
def test_emissions_map():
s = el.emissions_map['Sulfur dioxide']
assert isinstance(s, str)
| 30.25 | 121 | 0.616529 | [
"BSD-3-Clause"
] | kais-siala/premise | tests/test_electricity.py | 1,815 | Python |
#!/usr/bin/python2.6
'''
Creates a MyU3 class that adds higher-level functionality to the base
LabJack U3 class.
'''
from __future__ import division
import u3
from time import sleep
import math
def getU3(**kargs):
'''Returns an open MyU3 object but retries until successful if errors occur.'''
while True:
try:
return MyU3(**kargs)
except:
sleep(2)
print('Trying to Open U3...')
class MyU3(u3.U3):
'''
Class that adds some functionality to the base u3.U3 class, which
operates a U3 data acquisition device.
'''
def __init__(self, **kargs):
# call the constructor in the base class
u3.U3.__init__(self, **kargs)
def getRMS(self, ch, signalFreq=60, numCycles=4):
'''
Returns the RMS voltage of a stream of readings on a channel.
'ch' is the channel to sample.
'signalFreq' is the fundamental frequency of the signal being sampled.
'numCycles' is the number of full cycles of the signal that you want to
sample for purposes of calculating the RMS value.
I found that for 60 Hz signals, sampling 4 cycles produces stable
readings.
NOTE: there are limits to the frequency calculation below. Getting
a packet from the U3 in streaming mode is limited to 1 second I think,
and it will reduces the # of samples if the frequency is set so that
less than 1200 samples arrive in 1 second.
'''
# There are 1200 samples in one streaming request of the U3. Calculate
# the required streaming frequency from that and the other input parameters.
freq = int(signalFreq / numCycles * 1200.0)
freq = min(50000, freq) # cap at 50 kHz
# the U3 must operate at lower resolution if the streaming is very fast.
if freq < 2500:
resolution = 0
elif freq < 10000:
resolution = 1
elif freq < 20000:
resolution = 2
else:
resolution = 3
self.streamConfig( NumChannels = 1,
PChannels = [ ch ],
NChannels = [ 31 ], # 31 indicates single-ended read
Resolution = resolution,
SampleFrequency = freq )
try:
self.streamStart()
for r in self.streamData():
# calculate the sum of the squares, average that, and take square root
# to determine RMS
vals = r['AIN' + str(ch)]
sum_sqr = reduce(lambda x,y: x + y*y, vals, 0.0)
return math.sqrt(sum_sqr / len(vals))
finally:
self.streamStop()
def getAvg(self, ch, reads=8, specialRange=False, longSettle=True):
'''
Returns an analog reading of channel 'ch', but samples
multiple times = 'reads' and then averages. If 'specialRange'
is True, uses the higher voltage range for the channel.
If 'longSettle' is True, a higher source impedance can be tolerated.
'''
if specialRange:
negCh = 32
else:
negCh = 31
tot = 0.0
for i in range(reads):
tot += self.getAIN(ch, negCh, longSettle=longSettle)
return tot / reads
# Could add a routine to average an analog reading across
# 4 60 Hz cycles using the stream function as in getRMS().
def getDutyCycle(self, timer, reads=8):
'''
Returns the duty cycle measured by a timer. Assumes that the timer is already
set to Mode = 4 for reading duty cycles.
timer - the timer number, either 0 or 1.
reads - the number of times to read the duty cycle and average
'''
tot = 0.0 # used to average the duty cycle readings
for i in range(reads):
val = self.getFeedback(u3.Timer(timer=timer))[0]
hi = float(val % 2**16)
lo = float(val / 2**16)
tot += hi / (lo + hi)
return tot / reads
if __name__=='__main__':
# create the device object
d = MyU3()
# Set all possible inputs to Analog
# Create a bit mask indicating which channels are analog:
FIOEIOAnalog = ( 2 ** 16 ) - 1;
fios = FIOEIOAnalog & (0xFF) # the bottom 8 bits
eios = FIOEIOAnalog/256 # shift 8 places to get top 8 bits
d.configIO( FIOAnalog = fios, EIOAnalog = int(eios) )
try:
while True:
#print '%.3f' % d.getAvg(6)
#print '%.2f' % ( (d.getAvg(30) - 273.15)*1.8 + 32.0 )
print '%.3f' % d.getRMS(6)
sleep(0.5)
finally:
d.close() | 32.29078 | 84 | 0.597408 | [
"Apache-2.0"
] | dayne/mini-monitor | readers/myU3.py | 4,553 | Python |
import os
import threading
import uuid
from ipaddress import ip_address, ip_interface, ip_network
import yaml
from django.db import models
from ansible_api.models.mixins import AbstractExecutionModel
from cloud_provider import get_cloud_client
from common import models as common_models
from fit2ansible import settings
from django.utils.translation import ugettext_lazy as _
from kubeops_api.models.host import Host
class CloudProviderTemplate(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
meta = common_models.JsonTextField(blank=True, null=True, verbose_name=_('Meta'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template_dir = os.path.join(settings.BASE_DIR, 'resource', 'clouds')
@property
def path(self):
return os.path.join(self.template_dir, self.name)
@classmethod
def lookup(cls):
for d in os.listdir(cls.template_dir):
full_path = os.path.join(cls.template_dir, d)
meta_path = os.path.join(full_path, 'meta.yml')
if not os.path.isdir(full_path) or not os.path.isfile(meta_path):
continue
with open(meta_path) as f:
metadata = yaml.load(f)
defaults = {'name': d, 'meta': metadata}
cls.objects.update_or_create(defaults=defaults, name=d)
class Region(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template = models.ForeignKey('CloudProviderTemplate', on_delete=models.SET_NULL, null=True)
cloud_region = models.CharField(max_length=128, null=True, default=None)
vars = common_models.JsonDictTextField(default={})
comment = models.CharField(max_length=128, blank=True, null=True, verbose_name=_("Comment"))
@property
def zone_size(self):
zones = Zone.objects.filter(region=self)
return len(zones)
@property
def cluster_size(self):
clusters = []
plans = Plan.objects.filter(region=self)
for plan in plans:
from kubeops_api.models.cluster import Cluster
cs = Cluster.objects.filter(plan=plan)
for c in cs:
clusters.append(c)
return len(clusters)
@property
def image_ovf_path(self):
return self.vars['image_ovf_path']
@property
def image_vmdk_path(self):
return self.vars['image_vmdk_path']
@property
def image_name(self):
return self.vars['image_name']
def set_vars(self):
meta = self.template.meta.get('region', None)
if meta:
_vars = meta.get('vars', {})
self.vars.update(_vars)
self.save()
def on_region_create(self):
self.set_vars()
def to_dict(self):
dic = {
"region": self.cloud_region
}
dic.update(self.vars)
return dic
class Zone(models.Model):
ZONE_STATUS_READY = "READY"
ZONE_STATUS_INITIALIZING = "INITIALIZING"
ZONE_STATUS_ERROR = "ERROR"
ZONE_STATUS_CHOICES = (
(ZONE_STATUS_READY, 'READY'),
(ZONE_STATUS_INITIALIZING, 'INITIALIZING'),
(ZONE_STATUS_ERROR, 'ERROR'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
vars = common_models.JsonDictTextField(default={})
region = models.ForeignKey('Region', on_delete=models.CASCADE, null=True)
cloud_zone = models.CharField(max_length=128, null=True, default=None)
ip_used = common_models.JsonListTextField(null=True, default=[])
status = models.CharField(max_length=64, choices=ZONE_STATUS_CHOICES, null=True)
@property
def host_size(self):
hosts = Host.objects.filter(zone=self)
return len(hosts)
def change_status(self, status):
self.status = status
self.save()
def create_image(self):
try:
self.change_status(Zone.ZONE_STATUS_INITIALIZING)
client = get_cloud_client(self.region.vars)
client.create_image(zone=self)
self.change_status(Zone.ZONE_STATUS_READY)
except Exception as e:
self.change_status(Zone.ZONE_STATUS_ERROR)
def on_zone_create(self):
thread = threading.Thread(target=self.create_image)
thread.start()
def allocate_ip(self):
ip = self.ip_pools().pop()
self.ip_used.append(ip)
self.save()
return ip
def recover_ip(self, ip):
self.ip_used.remove(ip)
self.save()
def to_dict(self):
dic = {
"key": "z" + str(self.id).split("-")[3],
"name": self.cloud_zone,
"zone_name": self.name,
"ip_pool": self.ip_pools()
}
dic.update(self.vars)
return dic
def ip_pools(self):
ip_pool = []
ip_start = ip_address(self.vars['ip_start'])
ip_end = ip_address(self.vars['ip_end'])
if self.region.template.name == 'openstack':
while ip_start <= ip_end:
ip_pool.append(str(ip_start))
ip_start += 1
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
net_mask = self.vars['net_mask']
interface = ip_interface("{}/{}".format(str(ip_start), net_mask))
network = interface.network
for host in network.hosts():
if ip_start <= host <= ip_end:
ip_pool.append(str(host))
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
def ip_available_size(self):
return len(self.ip_pools())
@property
def provider(self):
return self.region.template.name
class Plan(models.Model):
DEPLOY_TEMPLATE_SINGLE = "SINGLE"
DEPLOY_TEMPLATE_MULTIPLE = "MULTIPLE"
DEPLOY_TEMPLATE_CHOICES = (
(DEPLOY_TEMPLATE_SINGLE, 'single'),
(DEPLOY_TEMPLATE_MULTIPLE, 'multiple'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
zone = models.ForeignKey('Zone', null=True, on_delete=models.CASCADE)
region = models.ForeignKey('Region', null=True, on_delete=models.CASCADE)
zones = models.ManyToManyField('Zone', related_name='zones')
deploy_template = models.CharField(choices=DEPLOY_TEMPLATE_CHOICES, default=DEPLOY_TEMPLATE_SINGLE, max_length=128)
vars = common_models.JsonDictTextField(default={})
@property
def mixed_vars(self):
_vars = self.vars.copy()
_vars.update(self.region.to_dict())
zones = self.get_zones()
zone_dicts = []
for zone in zones:
zone_dicts.append(zone.to_dict())
_vars['zones'] = zone_dicts
return _vars
def get_zones(self):
zones = []
if self.zone:
zones.append(self.zone)
if self.zones:
zones.extend(self.zones.all())
return zones
@property
def compute_models(self):
return {
"master": self.vars.get('master_model', None),
"worker": self.vars.get('worker_model', None)
}
| 33.626087 | 119 | 0.641195 | [
"Apache-2.0"
] | 99fu/KubeOperator | api/cloud_provider/models.py | 7,734 | Python |
class DirectionNotDetermined(BaseException):
def __init__(self, message="Hand did not move in only one direction. Direction of movement cannot be determined."):
self.message = message
super().__init__(self.message)
class ConfigError(BaseException):
def __init__(self, message="Possible error in syntax of config."):
self.message = message
super().__init__(self.message)
class GestureNotDetermined(BaseException):
def __init__(self, message="The Gesture did not match with any registered gestures"):
self.message = message
super().__init__(self.message)
| 35 | 119 | 0.703175 | [
"MIT"
] | david-0609/OpenCV-Hand-Gesture-Control | modules/Exceptions.py | 630 | Python |
import asyncio
import collections
import math
import signal
import sys
from functools import wraps
class Spy(object):
"""Spy is the debugging system for farc.
farc contains a handful of Spy.on_*() methods
placed at useful locations in the framework.
It is up to a Spy driver (such as the included VcdSpy)
to implement the Spy.on_*() methods.
The programmer calls Spy.enable_spy(<Spy implementation class>)
to activate the Spy system; otherwise, Spy does nothing.
Therefore, this class is designed so that calling Spy.anything()
is inert unless the application first calls Spy.enable_spy()
"""
_actv_cls = None
@staticmethod
def enable_spy(spy_cls):
"""Sets the Spy to use the given class
and calls its initializer.
"""
Spy._actv_cls = spy_cls
spy_cls.init()
def __getattr__(*args):
"""Returns
1) the enable_spy static method if requested by name, or
2) the attribute from the active class (if active class was set), or
3) a function that swallows any arguments and does nothing.
"""
if args[1] == "enable_spy":
return Spy.enable_spy
if Spy._actv_cls:
return getattr(Spy._actv_cls, args[1])
return lambda *x: None
# Singleton pattern:
# Turn Spy into an instance of itself so __getattribute__ works
# on anyone who calls "import Spy; Spy.foo()"
# This prevents Spy() from creating a new instance
# and gives everyone who calls "import Spy" the same object
Spy = Spy()
class Signal(object):
"""An asynchronous stimulus that triggers reactions.
A unique identifier that, along with a value, specifies an Event.
p. 154
"""
_registry = {} # signame:str to sigid:int
_lookup = [] # sigid:int to signame:str
@staticmethod
def exists(signame):
"""Returns True if signame is in the Signal registry.
"""
return signame in Signal._registry
@staticmethod
def register(signame):
"""Registers the signame if it is not already registered.
Returns the signal number for the signame.
"""
assert type(signame) is str
if signame in Signal._registry:
# TODO: emit warning that signal is already registered
return Signal._registry[signame]
else:
sigid = len(Signal._lookup)
Signal._registry[signame] = sigid
Signal._lookup.append(signame)
Spy.on_signal_register(signame, sigid)
return sigid
def __getattr__(self, signame):
assert type(signame) is str
return Signal._registry[signame]
# Singleton pattern:
# Turn Signal into an instance of itself so getattr works.
# This also prevents Signal() from creating a new instance.
Signal = Signal()
# Register the reserved (system) signals
Signal.register("EMPTY") # 0
Signal.register("ENTRY") # 1
Signal.register("EXIT") # 2
Signal.register("INIT") # 3
# Signals that mirror POSIX signals
Signal.register("SIGINT") # (i.e. Ctrl+C)
Signal.register("SIGTERM") # (i.e. kill <pid>)
Event = collections.namedtuple("Event", ["signal", "value"])
Event.__doc__ = """Events are a tuple of (signal, value) that are passed from
one AHSM to another. Signals are defined in each AHSM's source code
by name, but resolve to a unique number. Values are any python value,
including containers that contain even more values. Each AHSM state
(static method) accepts an Event as the parameter and handles the event
based on its Signal."""
# Instantiate the reserved (system) events
Event.EMPTY = Event(Signal.EMPTY, None)
Event.ENTRY = Event(Signal.ENTRY, None)
Event.EXIT = Event(Signal.EXIT, None)
Event.INIT = Event(Signal.INIT, None)
# Events for POSIX signals
Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C)
Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>)
# The order of this tuple MUST match their respective signals
Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT)
class Hsm(object):
"""A Hierarchical State Machine (HSM).
Full support for hierarchical state nesting.
Guaranteed entry/exit action execution on arbitrary state transitions.
Full support of nested initial transitions.
Support for events with arbitrary parameters.
"""
# Every state handler must return one of these values
RET_HANDLED = 0
RET_IGNORED = 1
RET_TRAN = 2
RET_SUPER = 3
def __init__(self,):
"""Sets this Hsm's current state to Hsm.top(), the default state
and stores the given initial state.
"""
# self.state is the Hsm/act's current active state.
# This instance variable references the message handler (method)
# that will be called whenever a message is sent to this Hsm.
# We initialize this to self.top, the default message handler
self.state = self.top
# Farc differs from QP here in that we hardcode
# the initial state to be "_initial"
self.initial_state = self._initial
def _initial(self, event):
"""Raises a NotImplementedError to force the derived class
to implement its own initial state.
"""
raise NotImplementedError
def state(func):
"""A decorator that identifies which methods are states.
The presence of the farc_state attr, not the value of the attr,
determines statehood.
The Spy debugging system uses the farc_state attribute
to determine which methods inside a class are actually states.
Other uses of the attribute may come in the future.
"""
@wraps(func)
def func_wrap(self, evt):
result = func(self, evt)
Spy.on_state_handler_called(func_wrap, evt, result)
return result
setattr(func_wrap, "farc_state", True)
return staticmethod(func_wrap)
# Helper functions to process reserved events through the current state
@staticmethod
def trig(me, state_func, signal): return state_func(me, Event.reserved[signal])
@staticmethod
def enter(me, state_func): return state_func(me, Event.ENTRY)
@staticmethod
def exit(me, state_func): return state_func(me, Event.EXIT)
# Other helper functions
@staticmethod
def handled(me, event): return Hsm.RET_HANDLED
@staticmethod
def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN
@staticmethod
def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158
@state
def top(me, event):
"""This is the default state handler.
This handler ignores all signals except
the POSIX-like events, SIGINT/SIGTERM.
Handling SIGINT/SIGTERM here causes the Exit path
to be executed from the application's active state
to top/here.
The application may put something useful
or nothing at all in the Exit path.
"""
# Handle the Posix-like events to force the HSM
# to execute its Exit path all the way to the top
if Event.SIGINT == event:
return Hsm.RET_HANDLED
if Event.SIGTERM == event:
return Hsm.RET_HANDLED
# All other events are quietly ignored
return Hsm.RET_IGNORED # p. 165
@staticmethod
def _perform_init_chain(me, current):
"""Act on the chain of initializations required starting from current.
"""
t = current
while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN:
# The state handles the INIT message and needs to make a transition. The
# "top" state is special in that it does not handle INIT messages, so we
# defer to me.initial_state in this case
path = [] # Trace the path back to t via superstates
while me.state != t:
path.append(me.state)
Hsm.trig(me, me.state, Signal.EMPTY)
# Restore the state to the target state
me.state = path[0]
assert len(path) < 32 # MAX_NEST_DEPTH
# Perform ENTRY action for each state from current to the target
path.reverse() # in-place
for s in path:
Hsm.enter(me, s)
# The target state has now to be checked to see if it responds to the INIT message
t = path[-1] # -1 because path was reversed
return t
@staticmethod
def _perform_transition(me, source, target):
# Handle the state transition from source to target in the HSM.
s, t = source, target
path = [t]
if s == t: # Case (a), transition to self
Hsm.exit(me,s)
Hsm.enter(me,t)
else:
# Find parent of target
Hsm.trig(me, t, Signal.EMPTY)
t = me.state # t is now parent of target
if s == t: # Case (b), source is parent of target
Hsm.enter(me, path[0])
else:
# Find parent of source
Hsm.trig(me, s, Signal.EMPTY)
if me.state == t: # Case (c), source and target share a parent
Hsm.exit(me, s)
Hsm.enter(me, path[0])
else:
if me.state == path[0]: # Case (d), target is parent of source
Hsm.exit(me, s)
else: # Check if the source is an ancestor of the target (case (e))
lca_found = False
path.append(t) # Populates path[1]
t = me.state # t is now parent of source
# Find and save ancestors of target into path
# until we find the source or hit the top
me.state = path[1]
while me.state != Hsm.top:
Hsm.trig(me, me.state, Signal.EMPTY)
path.append(me.state)
assert len(path) < 32 # MAX_NEST_DEPTH
if me.state == s:
lca_found = True
break
if lca_found: # This is case (e), enter states to get to target
for st in reversed(path[:-1]):
Hsm.enter(me, st)
else:
Hsm.exit(me, s) # Exit the source for cases (f), (g), (h)
me.state = t # Start at parent of the source
while me.state not in path:
# Keep exiting up into superstates until we reach the LCA.
# Depending on whether the EXIT signal is handled, we may also need
# to send the EMPTY signal to make me.state climb to the superstate.
if Hsm.exit(me, me.state) == Hsm.RET_HANDLED:
Hsm.trig(me, me.state, Signal.EMPTY)
t = me.state
# Step into children until we enter the target
for st in reversed(path[:path.index(t)]):
Hsm.enter(me, st)
@staticmethod
def init(me, event = None):
"""Transitions to the initial state. Follows any INIT transitions
from the inital state and performs ENTRY actions as it proceeds.
Use this to pass any parameters to initialize the state machine.
p. 172
"""
# TODO: The initial state MUST transition to another state
# The code that formerly did this was:
# status = me.initial_state(me, event)
# assert status == Hsm.RET_TRAN
# But the above code is commented out so an Ahsm's _initial()
# isn't executed twice.
me.state = Hsm._perform_init_chain(me, Hsm.top)
@staticmethod
def dispatch(me, event):
"""Dispatches the given event to this Hsm.
Follows the application's state transitions
until the event is handled or top() is reached
p. 174
"""
Spy.on_hsm_dispatch_event(event)
# Save the current state
t = me.state
# Proceed to superstates if event is not handled, we wish to find the superstate
# (if any) that does handle the event and to record the path to that state
exit_path = []
r = Hsm.RET_SUPER
while r == Hsm.RET_SUPER:
s = me.state
exit_path.append(s)
Spy.on_hsm_dispatch_pre(s)
r = s(me, event) # invoke state handler
# We leave the while loop with s at the state which was able to respond
# to the event, or to Hsm.top if none did
Spy.on_hsm_dispatch_post(exit_path)
# If the state handler for s requests a transition
if r == Hsm.RET_TRAN:
t = me.state
# Store target of transition
# Exit from the current state to the state s which handles
# the transition. We do not exit from s=exit_path[-1] itself.
for st in exit_path[:-1]:
r = Hsm.exit(me, st)
assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED)
s = exit_path[-1]
# Transition to t through the HSM
Hsm._perform_transition(me, s, t)
# Do initializations starting at t
t = Hsm._perform_init_chain(me, t)
# Restore the state
me.state = t
class Framework(object):
"""Framework is a composite class that holds:
- the asyncio event loop
- the registry of AHSMs
- the set of TimeEvents
- the handle to the next TimeEvent
- the table subscriptions to events
"""
_event_loop = asyncio.get_event_loop()
# The Framework maintains a registry of Ahsms in a list.
_ahsm_registry = []
# The Framework maintains a dict of priorities in use
# to prevent duplicates.
# An Ahsm's priority is checked against this dict
# within the Ahsm.start() method
# when the Ahsm is added to the Framework.
# The dict's key is the priority (integer) and the value is the Ahsm.
_priority_dict = {}
# The Framework maintains a group of TimeEvents in a dict. The next
# expiration of the TimeEvent is the key and the event is the value.
# Only the event with the next expiration time is scheduled for the
# timeEventCallback(). As TimeEvents are added and removed, the scheduled
# callback must be re-evaluated. Periodic TimeEvents should only have
# one entry in the dict: the next expiration. The timeEventCallback() will
# add a Periodic TimeEvent back into the dict with its next expiration.
_time_events = {}
# When a TimeEvent is scheduled for the timeEventCallback(),
# a handle is kept so that the callback may be cancelled if necessary.
_tm_event_handle = None
# The Subscriber Table is a dictionary. The keys are signals.
# The value for each key is a list of Ahsms that are subscribed to the
# signal. An Ahsm may subscribe to a signal at any time during runtime.
_subscriber_table = {}
@staticmethod
def post(event, act):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is an Ahsm instance.
"""
assert isinstance(act, Ahsm)
act.postFIFO(event)
@staticmethod
def post_by_name(event, act_name):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is a string of the name of the class
to which the event is sent. The event will post to all actors
having the given classname.
"""
assert type(act_name) is str
for act in Framework._ahsm_registry:
if act.__class__.__name__ == act_name:
act.postFIFO(event)
@staticmethod
def publish(event):
"""Posts the event to the message queue of every Ahsm
that is subscribed to the event's signal.
"""
if event.signal in Framework._subscriber_table:
for act in Framework._subscriber_table[event.signal]:
act.postFIFO(event)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def subscribe(signame, act):
"""Adds the given Ahsm to the subscriber table list
for the given signal. The argument, signame, is a string of the name
of the Signal to which the Ahsm is subscribing. Using a string allows
the Signal to be created in the registry if it is not already.
"""
sigid = Signal.register(signame)
if sigid not in Framework._subscriber_table:
Framework._subscriber_table[sigid] = []
Framework._subscriber_table[sigid].append(act)
@staticmethod
def addTimeEvent(tm_event, delta):
"""Adds the TimeEvent to the list of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
after the delay, delta.
"""
expiration = Framework._event_loop.time() + delta
Framework.addTimeEventAt(tm_event, expiration)
@staticmethod
def addTimeEventAt(tm_event, abs_time):
"""Adds the TimeEvent to the list of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
at the given absolute time (_event_loop.time()).
"""
assert tm_event not in Framework._time_events.values()
Framework._insortTimeEvent(tm_event, abs_time)
@staticmethod
def _insortTimeEvent(tm_event, expiration):
"""Inserts a TimeEvent into the list of time events,
sorted by the next expiration of the timer.
If the expiration time matches an existing expiration,
we add the smallest amount of time to the given expiration
to avoid a key collision in the Dict
and make the identically-timed events fire in a FIFO fashion.
"""
# If the event is to happen in the past, post it now
now = Framework._event_loop.time()
if expiration < now:
tm_event.act.postFIFO(tm_event)
# TODO: if periodic, need to schedule next?
# If an event already occupies this expiration time,
# increase this event's expiration by the smallest measurable amount
while expiration in Framework._time_events.keys():
m, e = math.frexp(expiration)
expiration = (m + sys.float_info.epsilon) * 2**e
Framework._time_events[expiration] = tm_event
# If this is the only active TimeEvent, schedule its callback
if len(Framework._time_events) == 1:
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event, expiration)
# If there are other TimeEvents,
# check if this one should replace the scheduled one
else:
if expiration < min(Framework._time_events.keys()):
Framework._tm_event_handle.cancel()
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event,
expiration)
@staticmethod
def removeTimeEvent(tm_event):
"""Removes the TimeEvent from the list of active time events.
Cancels the TimeEvent's callback if there is one.
Schedules the next event's callback if there is one.
"""
for k,v in Framework._time_events.items():
if v is tm_event:
# If the event being removed is scheduled for callback,
# cancel and schedule the next event if there is one
if k == min(Framework._time_events.keys()):
del Framework._time_events[k]
if Framework._tm_event_handle:
Framework._tm_event_handle.cancel()
if len(Framework._time_events) > 0:
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = \
Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback,
next_event, next_expiration)
else:
Framework._tm_event_handle = None
else:
del Framework._time_events[k]
break
@staticmethod
def timeEventCallback(tm_event, expiration):
"""The callback function for all TimeEvents.
Posts the event to the event's target Ahsm.
If the TimeEvent is periodic, re-insort the event
in the list of active time events.
"""
assert expiration in Framework._time_events.keys(), (
"Exp:%d _time_events.keys():%s" %
(expiration, Framework._time_events.keys()))
# Remove this expired TimeEvent from the active list
del Framework._time_events[expiration]
Framework._tm_event_handle = None
# Post the event to the target Ahsm
tm_event.act.postFIFO(tm_event)
# If this is a periodic time event, schedule its next expiration
if tm_event.interval > 0:
Framework._insortTimeEvent(tm_event,
expiration + tm_event.interval)
# If not set already and there are more events, set the next event callback
if (Framework._tm_event_handle == None and
len(Framework._time_events) > 0):
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback, next_event,
next_expiration)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def add(act):
"""Makes the framework aware of the given Ahsm.
"""
Framework._ahsm_registry.append(act)
assert act.priority not in Framework._priority_dict, (
"Priority MUST be unique")
Framework._priority_dict[act.priority] = act
Spy.on_framework_add(act)
@staticmethod
def run():
"""Dispatches an event to the highest priority Ahsm
until all event queues are empty (i.e. Run To Completion).
"""
getPriority = lambda x : x.priority
while True:
allQueuesEmpty = True
sorted_acts = sorted(Framework._ahsm_registry, key=getPriority)
for act in sorted_acts:
if act.has_msgs():
event_next = act.pop_msg()
act.dispatch(act, event_next)
allQueuesEmpty = False
break
if allQueuesEmpty:
return
@staticmethod
def stop():
"""EXITs all Ahsms and stops the event loop.
"""
# Disable the timer callback
if Framework._tm_event_handle:
Framework._tm_event_handle.cancel()
Framework._tm_event_handle = None
# Post EXIT to all Ahsms
for act in Framework._ahsm_registry:
Framework.post(Event.EXIT, act)
# Run to completion and stop the asyncio event loop
Framework.run()
Framework._event_loop.stop()
Spy.on_framework_stop()
@staticmethod
def print_info():
"""Prints the name and current state
of each actor in the framework.
Meant to be called when ctrl+T (SIGINFO/29) is issued.
"""
for act in Framework._ahsm_registry:
print(act.__class__.__name__, act.state.__name__)
# Bind a useful set of POSIX signals to the handler
# (ignore a NotImplementedError on Windows)
try:
_event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop())
_event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop())
_event_loop.add_signal_handler(29, print_info.__func__)
except NotImplementedError:
pass
def run_forever():
"""Runs the asyncio event loop with and
ensures state machines are exited upon a KeyboardInterrupt.
"""
loop = asyncio.get_event_loop()
try:
loop.run_forever()
except KeyboardInterrupt:
Framework.stop()
loop.close()
class Ahsm(Hsm):
"""An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO.
Adds a priority, message queue and methods to work with the queue.
"""
def start(self, priority, initEvent=None):
# must set the priority before Framework.add() which uses the priority
self.priority = priority
Framework.add(self)
self.mq = collections.deque()
self.init(self, initEvent)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
def postLIFO(self, evt):
self.mq.append(evt)
def postFIFO(self, evt):
self.mq.appendleft(evt)
def pop_msg(self,):
return self.mq.pop()
def has_msgs(self,):
return len(self.mq) > 0
class TimeEvent(object):
"""TimeEvent is a composite class that contains an Event.
A TimeEvent is created by the application and added to the Framework.
The Framework then emits the event after the given delay.
A one-shot TimeEvent is created by calling either postAt() or postIn().
A periodic TimeEvent is created by calling the postEvery() method.
"""
def __init__(self, signame):
assert type(signame) == str
self.signal = Signal.register(signame)
self.value = None
def postAt(self, act, abs_time):
"""Posts this TimeEvent to the given Ahsm at a specified time.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEventAt(self, abs_time)
def postIn(self, act, delta):
"""Posts this TimeEvent to the given Ahsm after the time delta.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEvent(self, delta)
def postEvery(self, act, delta):
"""Posts this TimeEvent to the given Ahsm after the time delta
and every time delta thereafter until disarmed.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = delta
Framework.addTimeEvent(self, delta)
def disarm(self):
"""Removes this TimeEvent from the Framework's active time events.
"""
self.act = None
Framework.removeTimeEvent(self)
from .VcdSpy import VcdSpy
| 37.072306 | 101 | 0.614668 | [
"MIT"
] | SzeMengTan/farc | farc/__init__.py | 27,174 | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: type_bank.py
@time: 2019-08-17 18:23
"""
from __future__ import unicode_literals
from flask_babel import lazy_gettext as _
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT, DEFAULT_SELECT_CHOICES_INT
# 银行类型(1:基本账户,2:一般账户)
TYPE_BANK_BASIC = 1
TYPE_BANK_GENERAL = 2
TYPE_BANK_DICT = {
TYPE_BANK_BASIC: _('Basic Account'), # 基本账户(对公)
TYPE_BANK_GENERAL: _('General Account'), # 一般账户(对公)
}
TYPE_BANK_SELECT_CHOICES = DEFAULT_SELECT_CHOICES_INT + TYPE_BANK_DICT.items() # 选择
TYPE_BANK_SEARCH_CHOICES = DEFAULT_SEARCH_CHOICES_INT + TYPE_BANK_DICT.items() # 搜索
| 24.035714 | 90 | 0.768202 | [
"MIT"
] | zhanghe06/bearing_project | app_common/maps/type_bank.py | 741 | Python |
"""
Django settings for mycalendar project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mycalendar.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mycalendar.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Copenhagen'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.307692 | 91 | 0.700912 | [
"MIT"
] | spralja/mycalendar | mycalendar/settings.py | 3,290 | Python |
import logging
from Models import Values
from classes import PlayerActions, OnePosition
from ActionsDispatcher import Action
class PositionsManagerDBException(RuntimeError):
pass
class PositionsManager(object):
def __init__(self, models, accountId, logger=None):
self.models = models
self.accountId = accountId
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.restore()
def restore(self):
models = self.models
profitThres = models.Values.get(Values.PositionThresProfit,
accountId=self.accountId)
if profitThres is None:
raise PositionsManagerDBException('Settings "{k}" not initialized.'
.format(k=Values.PositionThresProfit))
self.profitThres = profitThres
lossCutThres = models.Values.get(Values.PositionThresLossCut,
accountId=self.accountId)
if lossCutThres is None:
raise PositionsManagerDBException('Settings "{k}" not initialized.'
.format(k=Values.PositionThresLossCut))
self.lossCutThres = lossCutThres
@staticmethod
def calcVariation(onetick, oneposition):
"""
(tick: OneTick, position: OnePosition) -> float
"""
created = oneposition.priceMean()
if oneposition.side == OnePosition.SideLong:
current = onetick.bid
else:
current = onetick.ask
return current / created
def makeDecision(self, positions):
tick = self.models.Ticks.one()
for p in positions:
onePosition = p.positions[0]
oneTick = tick[onePosition.exchanger]
var = PositionsManager.calcVariation(oneTick, onePosition)
if onePosition.side == OnePosition.SideLong:
if var >= self.profitThres:
return [(PlayerActions.CloseForProfit, p)] # Long, Profit
elif var <= self.lossCutThres:
return [(PlayerActions.CloseForLossCut, p)] # Long, LossCut
else:
if var <= 1.0 / self.profitThres:
return [(PlayerActions.CloseForProfit, p)] # Short, Profit
elif var >= 1.0 / self.lossCutThres:
return [(PlayerActions.CloseForLossCut, p)] # Short, LossCut
return []
def getOpenPositions(self):
return self.models.Positions.currentOpen(accountId=self.accountId)
def createAction(self):
positions = self.getOpenPositions()
positions = filter(lambda p:p.isOpen(), positions)
closes = self.makeDecision(positions)
self.logger.debug('Completed decision, #close={n}.'.format(n=len(closes)))
if len(closes) > 0:
actionType, position = closes[0]
return Action(actionType, position)
else:
return None
| 35.402597 | 79 | 0.657373 | [
"Apache-2.0"
] | kikei/btc-bot-ai | apps/trade/src/PositionsManager.py | 2,726 | Python |
from django.contrib import admin
from user.models import *
# 此处设置页面头部标题
admin.site.site_title = '新码农站点后台'
# 此处设置页面显示标题
admin.site.site_header = '新码农后台管理系统'
@admin.register(User)
class Useradmin(admin.ModelAdmin):
list_display = ['id', 'username', 'password', 'nickname', 'birthday', 'gender', 'photo', 'phone', 'email', 'desc',
'addtime']
# list_display_links 设置其他字段也可以点击链接进入编辑界面
list_display_links = ['id', 'username']
list_per_page = 50
list_filter = ['gender', 'birthday']
search_fields = ['username', 'nickname', 'phone']
# list_editable 设置默认可编辑字段
list_editable = ['nickname', 'birthday', 'gender', 'phone', 'email', 'desc']
ordering = ['-addtime']
# date_hierarchy 详细时间分层筛选
date_hierarchy = 'addtime'
@admin.register(Leavemsg)
class Leavemsgadmin(admin.ModelAdmin):
list_display = ['id', 'content', 'user', 'addtime']
# list_display_links 设置其他字段也可以点击链接进入编辑界面
list_display_links = ['id', 'user']
list_per_page = 50
list_filter = ['user']
search_fields = ['user']
# list_editable 设置默认可编辑字段
list_editable = ['content']
ordering = ['-addtime']
# date_hierarchy 详细时间分层筛选
date_hierarchy = 'addtime'
| 30.35 | 118 | 0.666392 | [
"Apache-2.0"
] | guoxianru/newcoder | apps/user/admin.py | 1,434 | Python |
from typing import Optional, List
import autofit as af
import autoarray as aa
import autogalaxy as ag
from autogalaxy.aggregator.imaging import _imaging_from
from autogalaxy.aggregator.abstract import AbstractAgg
from autolens.imaging.fit_imaging import FitImaging
from autolens.analysis.preloads import Preloads
from autolens.aggregator.tracer import _tracer_from
def _fit_imaging_from(
fit: af.Fit,
galaxies: List[ag.Galaxy],
settings_imaging: aa.SettingsImaging = None,
settings_pixelization: aa.SettingsPixelization = None,
settings_inversion: aa.SettingsInversion = None,
use_preloaded_grid: bool = True,
use_hyper_scaling: bool = True,
) -> FitImaging:
"""
Returns a `FitImaging` object from a PyAutoFit database `Fit` object and an instance of galaxies from a non-linear
search model-fit.
This function adds the `hyper_model_image` and `hyper_galaxy_image_path_dict` to the galaxies before performing the
fit, if they were used.
Parameters
----------
fit
A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.
galaxies
A list of galaxies corresponding to a sample of a non-linear search and model-fit.
Returns
-------
FitImaging
The fit to the imaging dataset computed via an instance of galaxies.
"""
imaging = _imaging_from(fit=fit, settings_imaging=settings_imaging)
tracer = _tracer_from(fit=fit, galaxies=galaxies)
settings_pixelization = settings_pixelization or fit.value(
name="settings_pixelization"
)
settings_inversion = settings_inversion or fit.value(name="settings_inversion")
preloads = Preloads(use_w_tilde=False)
if use_preloaded_grid:
sparse_grids_of_planes = fit.value(name="preload_sparse_grids_of_planes")
if sparse_grids_of_planes is not None:
preloads = Preloads(
sparse_image_plane_grid_pg_list=sparse_grids_of_planes,
use_w_tilde=False,
)
if len(preloads.sparse_image_plane_grid_pg_list) == 2:
if type(preloads.sparse_image_plane_grid_pg_list[1]) != list:
preloads.sparse_image_plane_grid_pg_list[1] = [
preloads.sparse_image_plane_grid_pg_list[1]
]
return FitImaging(
dataset=imaging,
tracer=tracer,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
preloads=preloads,
use_hyper_scaling=use_hyper_scaling,
)
class FitImagingAgg(AbstractAgg):
def __init__(
self,
aggregator: af.Aggregator,
settings_imaging: Optional[aa.SettingsImaging] = None,
settings_pixelization: Optional[aa.SettingsPixelization] = None,
settings_inversion: Optional[aa.SettingsInversion] = None,
use_preloaded_grid: bool = True,
use_hyper_scaling: bool = True,
):
"""
Wraps a PyAutoFit aggregator in order to create generators of fits to imaging data, corresponding to the
results of a non-linear search model-fit.
"""
super().__init__(aggregator=aggregator)
self.settings_imaging = settings_imaging
self.settings_pixelization = settings_pixelization
self.settings_inversion = settings_inversion
self.use_preloaded_grid = use_preloaded_grid
self.use_hyper_scaling = use_hyper_scaling
def make_object_for_gen(self, fit, galaxies) -> FitImaging:
"""
Creates a `FitImaging` object from a `ModelInstance` that contains the galaxies of a sample from a non-linear
search.
Parameters
----------
fit
A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.
galaxies
A list of galaxies corresponding to a sample of a non-linear search and model-fit.
Returns
-------
FitImaging
A fit to imaging data whose galaxies are a sample of a PyAutoFit non-linear search.
"""
return _fit_imaging_from(
fit=fit,
galaxies=galaxies,
settings_imaging=self.settings_imaging,
settings_pixelization=self.settings_pixelization,
settings_inversion=self.settings_inversion,
use_preloaded_grid=self.use_preloaded_grid,
use_hyper_scaling=self.use_hyper_scaling,
)
| 35.738462 | 120 | 0.667456 | [
"MIT"
] | Jammy2211/AutoLens | autolens/aggregator/fit_imaging.py | 4,646 | Python |
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 14:25:43 2020
@author: greg6
"""
import numpy as np
t = [i for i in range(3)]
lam = [100+i*10 for i in range(2)]
com = ["A","B","C"]
S = dict()
for l in lam:
for u,c in enumerate(com):
S[(l,c)] = l+0.1*u
C = dict()
for i in t:
for u,c in enumerate(com):
C[(i,c)] = (i+0.1*u)
nt = len(t)
nw = len(lam)
nc = len(com)
nparams = 2
nd = nw*nt
ntheta = nc*(nw+nt)+nparams
B_matrix = np.zeros((ntheta,nw*nt))
for i, t in enumerate(t):
for j, l in enumerate(lam):
for k, c in enumerate(com):
# r_idx1 = k*nt+i
r_idx1 = i * nc + k
r_idx2 = j * nc + k + nc * nt
# r_idx2 = j * nc + k + nc * nw
# c_idx = i+j*nt
c_idx = i * nw + j
# print(j, k, r_idx2)
B_matrix[r_idx1, c_idx] = S[l, c]
# try:
B_matrix[r_idx2, c_idx] = C[t, c] | 20.666667 | 45 | 0.476344 | [
"BSD-3-Clause"
] | kuanhanl/k_aug | Reduce_hessian/tests/B1.py | 930 | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mikeeiei.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(954, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayoutWidget = QtGui.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(80, 240, 160, 141))
self.formLayoutWidget.setObjectName(_fromUtf8("formLayoutWidget"))
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.kaojai = QtGui.QPushButton(self.formLayoutWidget)
self.kaojai.setStyleSheet(_fromUtf8("int main(int argc, char *argv[])\n"
"\n"
"#upLeft {\n"
"background-color: transparent;\n"
"border-image: url(:/images/frame.png);\n"
"background: none;\n"
"border: none;\n"
"background-repeat: none;\n"
"}\n"
"{\n"
"border-image: url(:mike2.jpg);\n"
"}"))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/mike/mike.jpg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.kaojai.setIcon(icon)
self.kaojai.setObjectName(_fromUtf8("kaojai"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.kaojai)
self.maikaojai = QtGui.QPushButton(self.formLayoutWidget)
self.maikaojai.setObjectName(_fromUtf8("maikaojai"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.maikaojai)
self.vote = QtGui.QPushButton(self.formLayoutWidget)
self.vote.setObjectName(_fromUtf8("vote"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.vote)
self.question = QtGui.QPushButton(self.formLayoutWidget)
self.question.setObjectName(_fromUtf8("question"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.question)
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 331, 161))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.tabWidget = QtGui.QTabWidget(self.frame)
self.tabWidget.setGeometry(QtCore.QRect(100, 110, 135, 80))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 100, 831, 271))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8("")))
self.label.setObjectName(_fromUtf8("label"))
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionSend = QtGui.QAction(MainWindow)
self.actionSend.setIcon(icon)
self.actionSend.setObjectName(_fromUtf8("actionSend"))
self.toolBar.addAction(self.actionSend)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.kaojai.setText(_translate("MainWindow", "PushButton", None))
self.maikaojai.setText(_translate("MainWindow", "PushButton", None))
self.vote.setText(_translate("MainWindow", "PushButton", None))
self.question.setText(_translate("MainWindow", "d", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2", None))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None))
self.actionSend.setText(_translate("MainWindow", "send", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 45.661017 | 111 | 0.68467 | [
"MIT"
] | pection-zz/Interactionstudent | PyqtProject/test2.py | 5,388 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ModelBuilderRLF.py:
"""
__author__ = "Antonio Jesús Banegas-Luna"
__version__ = "1.0"
__maintainer__ = "Antonio"
__email__ = "[email protected]"
__status__ = "Development"
from BaseModelBuilder import BaseModelBuilder
class ModelBuilderRLF(BaseModelBuilder):
def get_default_model(self):
p = {}
p['model'] = self.model_name
p['train_grid'] = 'NONE'
p['type_ml'] = 'classification'
p['n_jobs'] = 8
p['params'] = {}
p['params']['tree_size'] = 4
p['params']['sample_fract'] = 'default'
p['params']['max_rules'] = 2000
p['params']['memory_par'] = 0.01
p['params']['rfmode'] = 'classify'
p['params']['lin_trim_quantile'] = 0.025
p['params']['lin_standardise'] = True
p['params']['exp_rand_tree_size'] = True
p['params_grid'] = {}
return p
| 24.972973 | 48 | 0.580087 | [
"Apache-2.0"
] | bio-hpc/sibila | Scripts/GridSearch/ModelBuilderRLF.py | 925 | Python |
import warnings
from textwrap import indent
import astropy.units as u
import numpy as np
from astropy.constants import c
from astropy.coordinates import (ICRS,
CartesianDifferential,
CartesianRepresentation, SkyCoord)
from astropy.coordinates.spectral_quantity import SpectralQuantity
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['SpectralCoord']
class NoVelocityWarning(AstropyUserWarning):
pass
class NoDistanceWarning(AstropyUserWarning):
pass
KMS = u.km / u.s
C_KMS = c.to(KMS)
ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS)
# Default distance to use for target when none is provided
DEFAULT_DISTANCE = 1e6 * u.kpc
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ['SpectralCoord.*']
def _velocity_to_redshift(velocity):
"""
Convert a velocity to a relativistic redshift.
"""
beta = velocity / C_KMS
return np.sqrt((1 + beta) / (1 - beta)) - 1
def _redshift_to_velocity(redshift):
"""
Convert a relativistic redshift to a velocity.
"""
zponesq = (1 + redshift) ** 2
return (C_KMS * (zponesq - 1) / (zponesq + 1))
def _apply_relativistic_doppler_shift(scoord, velocity):
"""
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity`
that is Doppler shifted by this amount.
Note that the Doppler shift applied is the full relativistic one, so
`SpectralQuantity` currently expressed in velocity and not using the
relativistic convention will temporarily be converted to use the
relativistic convention while the shift is applied.
Positive velocities are assumed to redshift the spectral quantity,
while negative velocities blueshift the spectral quantity.
"""
# NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact
# since we can't guarantee that their metadata would be correct/consistent.
squantity = scoord.view(SpectralQuantity)
beta = velocity / c
doppler_factor = np.sqrt((1 + beta) / (1 - beta))
if squantity.unit.is_equivalent(u.m): # wavelength
return squantity * doppler_factor
elif (squantity.unit.is_equivalent(u.Hz) or
squantity.unit.is_equivalent(u.eV) or
squantity.unit.is_equivalent(1 / u.m)):
return squantity / doppler_factor
elif squantity.unit.is_equivalent(KMS): # velocity
return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit)
else: # pragma: no cover
raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. "
"This should not happen, so please report this in the "
"astropy issue tracker!")
def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False):
"""
Given an original coordinate object, update the differentials so that
the final coordinate is at the same location as the original coordinate
but co-moving with the velocity reference object.
If preserve_original_frame is set to True, the resulting object will be in
the frame of the original coordinate, otherwise it will be in the frame of
the velocity reference.
"""
if not velocity_reference.data.differentials:
raise ValueError("Reference frame has no velocities")
# If the reference has an obstime already defined, we should ignore
# it and stick with the original observer obstime.
if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'):
velocity_reference = velocity_reference.replicate(obstime=original.obstime)
# We transform both coordinates to ICRS for simplicity and because we know
# it's a simple frame that is not time-dependent (it could be that both
# the original and velocity_reference frame are time-dependent)
original_icrs = original.transform_to(ICRS())
velocity_reference_icrs = velocity_reference.transform_to(ICRS())
differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation,
CartesianDifferential).differentials
data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation)
.with_differentials(differentials))
final_icrs = original_icrs.realize_frame(data_with_differentials)
if preserve_observer_frame:
final = final_icrs.transform_to(original)
else:
final = final_icrs.transform_to(velocity_reference)
return final.replicate(representation_type=CartesianRepresentation,
differential_type=CartesianDifferential)
def attach_zero_velocities(coord):
"""
Set the differentials to be stationary on a coordinate object.
"""
new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES)
return coord.realize_frame(new_data)
def _get_velocities(coord):
if 's' in coord.data.differentials:
return coord.velocity
else:
return ZERO_VELOCITIES
class SpectralCoord(SpectralQuantity):
"""
A spectral coordinate with its corresponding unit.
.. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be
considered experimental at this time. Note that we do not fully
support cases where the observer and target are moving
relativistically relative to each other, so care should be taken
in those cases. It is possible that there will be API changes in
future versions of Astropy based on user feedback. If you have
specific ideas for how it might be improved, please let us know
on the `astropy-dev mailing list`_ or at
http://feedback.astropy.org.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : str or `~astropy.units.Unit`
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer. If no velocities
are present on this object, the observer is assumed to be stationary
relative to the frame origin.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target. If no velocities
are present on this object, the target is assumed to be stationary
relative to the frame origin.
radial_velocity : `~astropy.units.Quantity`, optional
The radial velocity of the target with respect to the observer. This
can only be specified if ``redshift`` is not specified.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
This can only be specified if ``radial_velocity`` cannot be specified.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
"""
@u.quantity_input(radial_velocity=u.km/u.s)
def __new__(cls, value, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
**kwargs):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# There are two main modes of operation in this class. Either the
# observer and target are both defined, in which case the radial
# velocity and redshift are automatically computed from these, or
# only one of the observer and target are specified, along with a
# manually specified radial velocity or redshift. So if a target and
# observer are both specified, we can't also accept a radial velocity
# or redshift.
if target is not None and observer is not None:
if radial_velocity is not None or redshift is not None:
raise ValueError("Cannot specify radial velocity or redshift if both "
"target and observer are specified")
# We only deal with redshifts here and in the redshift property.
# Otherwise internally we always deal with velocities.
if redshift is not None:
if radial_velocity is not None:
raise ValueError("Cannot set both a radial velocity and redshift")
redshift = u.Quantity(redshift)
# For now, we can't specify redshift=u.one in quantity_input above
# and have it work with plain floats, but if that is fixed, for
# example as in https://github.com/astropy/astropy/pull/10232, we
# can remove the check here and add redshift=u.one to the decorator
if not redshift.unit.is_equivalent(u.one):
raise u.UnitsError('redshift should be dimensionless')
radial_velocity = _redshift_to_velocity(redshift)
# If we're initializing from an existing SpectralCoord, keep any
# parameters that aren't being overridden
if observer is None:
observer = getattr(value, 'observer', None)
if target is None:
target = getattr(value, 'target', None)
# As mentioned above, we should only specify the radial velocity
# manually if either or both the observer and target are not
# specified.
if observer is None or target is None:
if radial_velocity is None:
radial_velocity = getattr(value, 'radial_velocity', None)
obj._radial_velocity = radial_velocity
obj._observer = cls._validate_coordinate(observer, label='observer')
obj._target = cls._validate_coordinate(target, label='target')
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._radial_velocity = getattr(obj, '_radial_velocity', None)
self._observer = getattr(obj, '_observer', None)
self._target = getattr(obj, '_target', None)
@staticmethod
def _validate_coordinate(coord, label=''):
"""
Checks the type of the frame and whether a velocity differential and a
distance has been defined on the frame object.
If no distance is defined, the target is assumed to be "really far
away", and the observer is assumed to be "in the solar system".
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame`
The new frame to be used for target or observer.
label : str, optional
The name of the object being validated (e.g. 'target' or 'observer'),
which is then used in error messages.
"""
if coord is None:
return
if not issubclass(coord.__class__, BaseCoordinateFrame):
if isinstance(coord, SkyCoord):
coord = coord.frame
else:
raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance")
# If the distance is not well-defined, ensure that it works properly
# for generating differentials
# TODO: change this to not set the distance and yield a warning once
# there's a good way to address this in astropy.coordinates
# https://github.com/astropy/astropy/issues/10247
with np.errstate(all='ignore'):
distance = getattr(coord, 'distance', None)
if distance is not None and distance.unit.physical_type == 'dimensionless':
coord = SkyCoord(coord, distance=DEFAULT_DISTANCE)
warnings.warn(
"Distance on coordinate object is dimensionless, an "
f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.",
NoDistanceWarning)
# If the observer frame does not contain information about the
# velocity of the system, assume that the velocity is zero in the
# system.
if 's' not in coord.data.differentials:
warnings.warn(
"No velocity defined on frame, assuming {}.".format(
ZERO_VELOCITIES),
NoVelocityWarning)
coord = attach_zero_velocities(coord)
return coord
def replicate(self, value=None, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
doppler_convention=None, doppler_rest=None,
copy=False):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : str or `~astropy.units.Unit`
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity`, optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError("Cannot specify value as a Quantity and also specify unit")
else:
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is Tru
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None:
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter('ignore', NoVelocityWarning)
return self.__class__(value=value, unit=unit,
observer=observer, target=target,
radial_velocity=radial_velocity, redshift=redshift,
doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
@property
def quantity(self):
"""
Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.
Equivalent to ``self.view(u.Quantity)``.
Returns
-------
`~astropy.units.Quantity`
This object viewed as a `~astropy.units.Quantity`.
"""
return self.view(u.Quantity)
@property
def observer(self):
"""
The coordinates of the observer.
If set, and a target is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the observation.
"""
return self._observer
@observer.setter
def observer(self, value):
if self.observer is not None:
raise ValueError("observer has already been set")
self._observer = self._validate_coordinate(value, label='observer')
# Switch to auto-computing radial velocity
if self._target is not None:
self._radial_velocity = None
@property
def target(self):
"""
The coordinates of the target being observed.
If set, and an observer is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the target.
"""
return self._target
@target.setter
def target(self, value):
if self.target is not None:
raise ValueError("target has already been set")
self._target = self._validate_coordinate(value, label='target')
# Switch to auto-computing radial velocity
if self._observer is not None:
self._radial_velocity = None
@property
def radial_velocity(self):
"""
Radial velocity of target relative to the observer.
Returns
-------
`~astropy.units.Quantity`
Radial velocity of target.
Notes
-----
This is different from the ``.radial_velocity`` property of a
coordinate frame in that this calculates the radial velocity with
respect to the *observer*, not the origin of the frame.
"""
if self._observer is None or self._target is None:
if self._radial_velocity is None:
return 0 * KMS
else:
return self._radial_velocity
else:
return self._calculate_radial_velocity(self._observer, self._target,
as_scalar=True)
@property
def redshift(self):
"""
Redshift of target relative to observer. Calculated from the radial
velocity.
Returns
-------
float
Redshift of target.
"""
return _velocity_to_redshift(self.radial_velocity)
@staticmethod
def _calculate_radial_velocity(observer, target, as_scalar=False):
"""
Compute the line-of-sight velocity from the observer to the target.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the observer.
target : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the target.
as_scalar : bool
If `True`, the magnitude of the velocity vector will be returned,
otherwise the full vector will be returned.
Returns
-------
`~astropy.units.Quantity`
The radial velocity of the target with respect to the observer.
"""
# Convert observer and target to ICRS to avoid finite differencing
# calculations that lack numerical precision.
observer_icrs = observer.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
d_vel = target_icrs.velocity - observer_icrs.velocity
vel_mag = pos_hat.dot(d_vel)
if as_scalar:
return vel_mag
else:
return vel_mag * pos_hat
@staticmethod
def _normalized_position_vector(observer, target):
"""
Calculate the normalized position vector between two frames.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame or coordinate.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The target frame or coordinate.
Returns
-------
pos_hat : `BaseRepresentation`
Position representation.
"""
d_pos = (target.cartesian.without_differentials() -
observer.cartesian.without_differentials())
dp_norm = d_pos.norm()
# Reset any that are 0 to 1 to avoid nans from 0/0
dp_norm[dp_norm == 0] = 1 * dp_norm.unit
pos_hat = d_pos / dp_norm
return pos_hat
@u.quantity_input(velocity=u.km/u.s)
def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
if self.observer is None or self.target is None:
raise ValueError("This method can only be used if both observer "
"and target are defined on the SpectralCoord.")
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km))
if frame.data.differentials:
if velocity is not None:
raise ValueError('frame already has differentials, cannot also specify velocity')
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(frame.data.with_differentials(differentials))
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError('velocity should be a Quantity vector with 3 elements')
frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m,
*velocity,
representation_type='cartesian',
differential_type='cartesian')
observer = update_differentials_to_match(self.observer, frame,
preserve_observer_frame=preserve_observer_frame)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
new_coord = self.replicate(value=new_data, observer=observer)
return new_coord
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity`
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity`
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
if observer_shift is not None and (self.target is None or
self.observer is None):
raise ValueError("Both an observer and target must be defined "
"before applying a velocity shift.")
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError("Argument must have unit physical type "
"'speed' for radial velocty or "
"'dimensionless' for redshift.")
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == 'dimensionless':
target_shift = _redshift_to_velocity(target_shift)
if self._observer is None or self._target is None:
return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == 'dimensionless':
observer_shift = _redshift_to_velocity(observer_shift)
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = (target_icrs
.realize_frame(target_icrs.cartesian.with_differentials(target_velocity))
.transform_to(self._target))
new_observer = (observer_icrs
.realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity))
.transform_to(self._observer))
init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data,
observer=new_observer,
target=new_target)
def to_rest(self):
"""
Transforms the spectral axis to the rest frame.
"""
if self.observer is not None and self.target is not None:
return self.with_observer_stationary_relative_to(self.target)
result = _apply_relativistic_doppler_shift(self, -self.radial_velocity)
return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None)
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
try:
radial_velocity = self.radial_velocity
redshift = self.redshift
except ValueError:
radial_velocity = redshift = 'Undefined'
repr_items = [f'{prefixstr}']
if self.observer is not None:
observer_repr = indent(repr(self.observer), 14 * ' ').lstrip()
repr_items.append(f' observer: {observer_repr}')
if self.target is not None:
target_repr = indent(repr(self.target), 12 * ' ').lstrip()
repr_items.append(f' target: {target_repr}')
if (self._observer is not None and self._target is not None) or self._radial_velocity is not None:
if self.observer is not None and self.target is not None:
repr_items.append(' observer to target (computed from above):')
else:
repr_items.append(' observer to target:')
repr_items.append(f' radial_velocity={radial_velocity}')
repr_items.append(f' redshift={redshift}')
if self.doppler_rest is not None or self.doppler_convention is not None:
repr_items.append(f' doppler_rest={self.doppler_rest}')
repr_items.append(f' doppler_convention={self.doppler_convention}')
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=' ')
if len(repr_items) == 1:
repr_items[0] += f'{arrstr}{self._unitstr:s}'
else:
repr_items[1] = ' (' + repr_items[1].lstrip()
repr_items[-1] += ')'
repr_items.append(f' {arrstr}{self._unitstr:s}')
return '\n'.join(repr_items) + '>'
| 41.219922 | 111 | 0.638107 | [
"MIT"
] | honeybhardwaj/Language_Identification | LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py | 31,863 | Python |
"""项目配置"""
# 图灵机器人,99元一月付费版,尽情享用!
tuling_api_key = '88f17f853d974387af64955bed9466f4'
# 自动回复
is_friend_auto_reply = False # 好友自动回复
is_group_reply = False # 此项表示群中是否回复
is_group_at_reply = False # 上一项开启后此项才生效
is_forward_revoke_msg = True # 开启防撤回模式
is_forward_group_at_msg = False # 转发群@我的消息
# 机器人主人
bot_master_name = '' # 使用备注名更安全,只允许一个,可远程控制机器人,如果不设置(空)则将文件助手设置为管理员,但不具备远程控制功能
# 监听某些好友群聊,如老板
is_listen_friend = False
listen_friend_names = '猪哥' # 需要监听的人名称,使用备注名更安全,允许多个用|分隔,如:主管|项目经理|产品狗
listen_friend_groups = 'Python新手交流' # 在这些群里监听好友说的话,匹配模式:包含“唯一集团工作群”的群
# 转发信息至群
is_forward_mode = False # 打开转发模式,主人发送给机器人的消息都将转发至forward_groups群
forward_groups = 'Python新手交流' # 需要将消息转发的群,匹配模式同上
# 群分享监控
is_listen_sharing = False
listen_sharing_groups = 'Python新手交流' # 监控群分享,匹配模式同上
| 25.322581 | 79 | 0.782166 | [
"MIT"
] | ChriXChan/tools | pyp/wxrobot-master/config.py | 1,349 | Python |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../cortex")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cortex'
copyright = u'2017, sherazkhan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from cortex import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cortex-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'cortex Documentation',
u'sherazkhan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| 34.768 | 85 | 0.70168 | [
"MIT"
] | SherazKhan/cortex | docs/conf.py | 8,692 | Python |
# -*- coding: UTF-8 -*-
# Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
#
# You may assume no duplicates in the array.
#
# Here are few examples.
# [1,3,5,6], 5 → 2
# [1,3,5,6], 2 → 1
# [1,3,5,6], 7 → 4
# [1,3,5,6], 0 → 0
#
# Python, Python 3 all accepted.
class SearchInsertPosition(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if nums is None or len(nums) == 0:
return 0
for i in range(0, len(nums)):
if nums[i] == target:
return i
elif nums[i] < target:
if (i + 1 < len(nums) and nums[i + 1] > target) or i + 1 == len(nums):
return i + 1
return 0
| 26.69697 | 156 | 0.523269 | [
"MIT"
] | TonnyL/Windary | Python/SearchInsertPosition.py | 889 | Python |
import math
import numpy as np
from kinematics.forward import ForwardKinematics
from kinematics.kinematics import Kinematics
from kinematics.solution import InverseKinematicsShoulderSolution, InverseKinematicsSpecificSolution, \
InverseKinematicsSolution, InverseKinematicsWristSolution
class InverseKinematics(Kinematics):
def __init__(self):
super().__init__()
self.forward_kinematics = ForwardKinematics()
def __clamp_cos_sin_within_threshold(self, cos_or_sin):
new_val = cos_or_sin
if 1 < new_val <= 1.2:
new_val = 1.0
elif -1.2 <= new_val < -1:
new_val = -1.0
return new_val
def __compute_solution_for_theta_1(self, T06, theta_1, debug=False):
wrist_solution = InverseKinematicsWristSolution()
# Theta 5
P06 = T06[:, 3]
theta_5_1 = None
theta_5_2 = None
theta_5_cos = (P06[0] * math.sin(theta_1) - P06[1] * np.cos(
theta_1) - self.joint4_dh.d) / self.joint6_dh.d
theta_5_cos = self.__clamp_cos_sin_within_threshold(theta_5_cos)
if -1 <= theta_5_cos <= 1:
theta_5_1 = math.acos(theta_5_cos)
theta_5_2 = -math.acos(theta_5_cos)
sigma = 0.00001
if theta_5_1 is not None and not -sigma <= math.sin(theta_5_1) <= sigma:
wrist_solution.solution_wrist_up = self.__compute_solution_for_wrist(theta_1, theta_5_1, T06)
else:
wrist_solution.solution_wrist_up.is_valid_solution = False
if theta_5_2 is not None and not -sigma <= math.sin(theta_5_2) <= sigma:
wrist_solution.solution_wrist_down = self.__compute_solution_for_wrist(theta_1, theta_5_2, T06)
else:
wrist_solution.solution_wrist_down.is_valid_solution = False
if not wrist_solution.solution_wrist_up.is_valid_solution and not wrist_solution.solution_wrist_down.is_valid_solution:
wrist_solution.is_valid_solution = False
if debug:
print(f"Theta 5: {theta_5_1:.3f}, {theta_5_2:.3f}")
return wrist_solution
def __compute_solution_for_wrist(self, theta_1, theta_5, T06, debug=False):
shoulder_solution = InverseKinematicsShoulderSolution()
# Theta 6
T60 = np.linalg.inv(T06)
X60 = T60[:, 0]
Y60 = T60[:, 1]
theta_6_cos = (X60[0] * math.sin(theta_1) - Y60[0] * math.cos(theta_1)) / math.sin(
theta_5) # only using one of the theta 5's for now..
theta_6_sin = (-X60[1] * math.sin(theta_1) + Y60[1] * math.cos(theta_1)) / math.sin(
theta_5) # only using one of the theta 5's for now..
theta_6 = math.atan2(theta_6_sin, theta_6_cos)
if debug:
print(f"Theta 6: {theta_6:.3f}")
tm_dict = {}
# Theta 3
T01 = self.compute_transformation_matrix(theta_1, self.joint1_dh)
T45 = self.compute_transformation_matrix(theta_5, self.joint5_dh)
T56 = self.compute_transformation_matrix(theta_6, self.joint6_dh)
T46 = np.matmul(T45, T56)
T64 = np.linalg.inv(T46)
T10 = np.linalg.inv(T01)
T14 = np.matmul(np.matmul(T10, T06), T64)
P14 = T14[:, 3]
tm_dict["T06"] = T06
tm_dict["T01"] = T01
tm_dict["T45"] = T45
tm_dict["T56"] = T56
tm_dict["T64"] = T64
tm_dict["T10"] = T10
tm_dict["T14"] = T14
tm_dict["P14"] = P14
theta_3_cos = (math.sqrt(
P14[0] ** 2 + P14[2] ** 2) ** 2 - self.joint3_dh.a ** 2 - self.joint4_dh.a ** 2) / (
2 * (-self.joint3_dh.a) * (-self.joint4_dh.a))
if debug:
print("theta3_cos: ", theta_3_cos)
theta_3_cos = self.__clamp_cos_sin_within_threshold(theta_3_cos)
if not -1 <= theta_3_cos <= 1:
shoulder_solution.is_valid_solution = False
return shoulder_solution
theta_3_up = math.acos(theta_3_cos)
theta_3_down = -math.acos(theta_3_cos)
if debug:
print(f"Theta 3: Up: {theta_3_up:.3f} Down: {theta_3_down:.3f}")
shoulder_solution.solution_elbow_up = self.__compute_specific_solution(theta_1, theta_3_up, theta_5, theta_6, tm_dict)
shoulder_solution.solution_elbow_down = self.__compute_specific_solution(theta_1, theta_3_down, theta_5, theta_6, tm_dict)
return shoulder_solution
def __compute_specific_solution(self, theta_1, theta_3, theta_5, theta_6, tm_dict, debug=False):
specific_solution = InverseKinematicsSpecificSolution()
P14 = tm_dict["P14"]
phi_1 = math.atan2(-P14[2], -P14[0])
phi_2 = math.asin((-self.joint4_dh.a * math.sin(theta_3)) / math.sqrt(P14[0]**2 + P14[2]**2))
theta_2 = phi_1 - phi_2
if debug:
print(f"Theta 2: {theta_2:.3f}")
T01 = tm_dict["T01"]
T12 = self.compute_transformation_matrix(theta_2, self.joint2_dh)
T23 = self.compute_transformation_matrix(theta_3, self.joint3_dh)
T45 = tm_dict["T45"]
T56 = tm_dict["T56"]
T06 = tm_dict["T06"]
T03 = np.matmul(np.matmul(T01, T12), T23)
T30 = np.linalg.inv(T03)
T64 = tm_dict["T64"]
T34 = np.matmul(np.matmul(T30, T06), T64)
X34 = T34[:, 0]
theta_4 = math.atan2(X34[1], X34[0])
if debug:
print(f"Theta 4: {theta_4:.3f}")
specific_solution.thetas = [theta_1, theta_2, theta_3, theta_4, theta_5, theta_6]
return specific_solution
def __print_all_solutions(self, solution):
print("Inverse Solutions:")
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
print(
f"Shoulder left, wrist up, elbow up: {solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
print(
f"Shoulder left, wrist up, elbow down: {solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas}")
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
print(
f"Shoulder left, wrist down, elbow up: {solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas}")
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
print(
f"Shoulder left, wrist down, elbow down: {solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas}")
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
print(
f"Shoulder right, wrist up, elbow up: {solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
print(
f"Shoulder right, wrist up, elbow down: {solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
print(
f"Shoulder right, wrist down, elbow up: {solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
print(
f"Shoulder right, wrist down, elbow down: {solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas}")
def compute_joint_angles(self, T06, debug=False):
solution = InverseKinematicsSolution()
#Theta 1
P05 = np.dot(T06, [0, 0, -self.joint6_dh.d, 1])
phi_1 = math.atan2(P05[1], P05[0])
phi_2_cos = self.joint4_dh.d / math.sqrt(P05[0]**2 + P05[1]**2)
phi_2 = math.acos(phi_2_cos)
theta_1_1 = phi_1 + phi_2 + (np.pi / 2)
theta_1_2 = phi_1 - phi_2 + (np.pi / 2)
if debug:
print(f"Theta 1: {theta_1_1:.3f}, {theta_1_2:.3f}")
if not math.isnan(theta_1_1):
solution.solution_shoulder_left = self.__compute_solution_for_theta_1(T06, theta_1_1, debug)
else:
solution.solution_shoulder_left = InverseKinematicsWristSolution().is_valid_solution = False
if not math.isnan(theta_1_2):
solution.solution_shoulder_right = self.__compute_solution_for_theta_1(T06, theta_1_2, debug)
else:
solution.solution_shoulder_right = InverseKinematicsWristSolution().is_valid_solution = False
if debug:
self.__print_all_solutions(solution)
return solution
def get_solution_for_config_id(self, solution, config_id):
if config_id == 0:
return solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas
elif config_id == 1:
return solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas
elif config_id == 2:
return solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas
elif config_id == 3:
return solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas
elif config_id == 4:
return solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas
elif config_id == 5:
return solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.thetas
elif config_id == 6:
return solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas
elif config_id == 7:
return solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.thetas
else:
raise Exception("invalid config solution id")
def get_best_solution_for_config_id(self, T06, config_id):
solution = self.compute_joint_angles(T06)
if self.is_valid_solution_by_config_id(solution, config_id):
return self.get_solution_for_config_id(solution, config_id)
else:
index = config_id + 1
checked_all = False
while not checked_all:
if index >= 8:
index = 0
if index == config_id:
print('Found no valid solutions..')
return None
if self.is_valid_solution_by_config_id(solution, index):
return self.get_solution_for_config_id(solution, index)
index += 1
def is_valid_solution_by_config_id(self, solution, config_id):
if 0 <= config_id < 4 and solution.solution_shoulder_left.is_valid_solution:
if 0 <= config_id < 2 and solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if config_id == 0 and solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
return True
if config_id == 1 and solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
return True
if 2 <= config_id < 4 and solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if config_id == 2 and solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
return True
if config_id == 3 and solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
return True
if 4 <= config_id < 8 and solution.solution_shoulder_right.is_valid_solution:
if 4 <= config_id < 6 and solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if config_id == 4 and solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
return True
if config_id == 5 and solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
return True
if 6 <= config_id < 8 and solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if config_id == 6 and solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
return True
if config_id == 7 and solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
return True
else:
return False
def get_current_configuration_id(self, joint_angles):
T06 = self.forward_kinematics.compute_0_to_6_matrix(joint_angles)
solution = self.compute_joint_angles(T06)
differences = np.full(8, 1000)
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[0] = 0
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[1] = 0
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[2] = 0
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
differences[3] = 0
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[4] = 0
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[5] = 0
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[6] = 0
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
differences[7] = 0
for i in range(6):
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[0] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas[i])
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[1] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas[i])
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[2] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas[i])
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
differences[3] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas[i])
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[4] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas[i])
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[5] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.thetas[i])
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[6] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas[i])
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
differences[7] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.thetas[i])
print(differences)
return np.argmin(differences)
| 53.836364 | 147 | 0.672295 | [
"MIT"
] | EmilRyberg/P8LH7Grounding | webots/controllers/ur_controller/kinematics/inverse.py | 17,766 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
das developers note:
This a is modification of the original SpacePy pycdf package. All
refereneces to the greater spacepy package have been removed to create
a small standalone module.
--cwp 2018-10-18
The libcdf.so location code has been changed to find the version installed
in anaconda.
--cwp 2020-04-06
This package provides a Python interface to the Common Data Format (CDF)
library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/.
It is targeted at Python 2.6+ and should work without change on either
Python 2 or Python 3.
The interface is intended to be 'pythonic' rather than reproducing the
C interface. To open or close a CDF and access its variables, see the :class:`CDF`
class. Accessing data within the variables is via the :class:`Var`
class. The :data:`lib` object provides access to some routines
that affect the functionality of the library in general. The
:mod:`~pycdf.const` module contains constants useful for accessing
the underlying library.
Authors: Jon Niehof
Institution: University of New Hampshire
Contact: [email protected]
Copyright 2010-2015 Los Alamos National Security, LLC.
"""
__contact__ = 'Jon Niehof, [email protected]'
try:
from collections.abc import MutableMapping, MutableSequence
except ImportError:
from collections import MutableMapping, MutableSequence
import ctypes
import ctypes.util
import datetime
import operator
import os
import os.path
import shutil
import sys
import tempfile
import warnings
import weakref
import numpy
import numpy.ma
#Import const AFTER library loaded, so failed load doesn't leave half-imported
#from . import const
try:
str_classes = (str, bytes, unicode)
except NameError:
str_classes = (str, bytes)
class Library(object):
"""
Abstraction of the base CDF C library and its state.
Not normally intended for end-user use. An instance of this class
is created at package load time as the :data:`~pycdf.lib` variable, providing
access to the underlying C library if necessary. The CDF library itself
is described in section 2.1 of the CDF user's guide, as well as the CDF
C reference manual.
Calling the C library directly requires knowledge of
:mod:`ctypes`.
Instantiating this object loads the C library, see :doc:`/pycdf` docs
for details.
.. autosummary::
~Library.call
~Library.check_status
~Library.datetime_to_epoch
~Library.datetime_to_epoch16
~Library.datetime_to_tt2000
~Library.epoch_to_datetime
~Library.epoch_to_epoch16
~Library.epoch_to_num
~Library.epoch_to_tt2000
~Library.epoch16_to_datetime
~Library.epoch16_to_epoch
~Library.epoch16_to_tt2000
~Library.set_backward
supports_int8
~Library.tt2000_to_datetime
~Library.tt2000_to_epoch
~Library.tt2000_to_epoch16
v_datetime_to_epoch
v_datetime_to_epoch16
v_datetime_to_tt2000
v_epoch_to_datetime
v_epoch_to_tt2000
v_epoch16_to_datetime
v_epoch16_to_tt2000
v_tt2000_to_datetime
v_tt2000_to_epoch
v_tt2000_to_epoch16
libpath
version
.. automethod:: call
.. automethod:: check_status
.. automethod:: datetime_to_epoch
.. automethod:: datetime_to_epoch16
.. automethod:: datetime_to_tt2000
.. automethod:: epoch_to_datetime
.. automethod:: epoch_to_epoch16
.. automethod:: epoch_to_num
.. automethod:: epoch_to_tt2000
.. automethod:: epoch16_to_datetime
.. automethod:: epoch16_to_epoch
.. automethod:: epoch16_to_tt2000
.. automethod:: set_backward
.. attribute:: supports_int8
True if this library supports INT8 and TIME_TT2000 types; else False.
.. automethod:: tt2000_to_datetime
.. automethod:: tt2000_to_epoch
.. automethod:: tt2000_to_epoch16
.. method:: v_datetime_to_epoch(datetime)
A vectorized version of :meth:`datetime_to_epoch` which takes a
numpy array of datetimes as input and returns an array of epochs.
.. method:: v_datetime_to_epoch16(datetime)
A vectorized version of :meth:`datetime_to_epoch16` which takes a
numpy array of datetimes as input and returns an array of epoch16.
.. method:: v_datetime_to_tt2000(datetime)
A vectorized version of :meth:`datetime_to_tt2000` which takes a
numpy array of datetimes as input and returns an array of TT2000.
.. method:: v_epoch_to_datetime(epoch)
A vectorized version of :meth:`epoch_to_datetime` which takes a
numpy array of epochs as input and returns an array of datetimes.
.. method:: v_epoch_to_tt2000(epoch)
A vectorized version of :meth:`epoch_to_tt2000` which takes a
numpy array of epochs as input and returns an array of tt2000s.
.. method:: v_epoch16_to_datetime(epoch0, epoch1)
A vectorized version of :meth:`epoch16_to_datetime` which takes
a numpy array of epoch16 as input and returns an array of datetimes.
An epoch16 is a pair of doubles; the input array's last dimension
must be two (and the returned array will have one fewer dimension).
.. method:: v_epoch16_to_tt2000(epoch16)
A vectorized version of :meth:`epoch16_to_tt2000` which takes
a numpy array of epoch16 as input and returns an array of tt2000s.
An epoch16 is a pair of doubles; the input array's last dimension
must be two (and the returned array will have one fewer dimension).
.. method:: v_tt2000_to_datetime(tt2000)
A vectorized version of :meth:`tt2000_to_datetime` which takes
a numpy array of tt2000 as input and returns an array of datetimes.
.. method:: v_tt2000_to_epoch(tt2000)
A vectorized version of :meth:`tt2000_to_epoch` which takes
a numpy array of tt2000 as input and returns an array of epochs.
.. method:: v_tt2000_to_epoch16(tt2000)
A vectorized version of :meth:`tt2000_to_epoch16` which takes
a numpy array of tt2000 as input and returns an array of epoch16.
.. attribute:: libpath
The path where pycdf found the CDF C library, potentially useful in
debugging. If this contains just the name of a file (with no path
information), then the system linker found the library for pycdf.
On Linux, ``ldconfig -p`` may be useful for displaying the system's
library resolution.
.. attribute:: version
Version of the CDF library, (version, release, increment, subincrement)
"""
def __init__(self, libpath=None, library=None):
"""Load the CDF C library.
Searches for the library in the order:
1. Appropriately-named file in CDF_LIB
2. Appropriately-named file in CDF_BASE
3. Standard library search path
@raise CDFError: BAD_DATA_TYPE if can't map types properly
"""
if not 'CDF_TMP' in os.environ:
os.environ['CDF_TMP'] = tempfile.gettempdir()
if not library:
if not libpath:
self.libpath, self._library = self._find_lib()
if self._library is None:
raise Exception((
'Cannot load CDF C library; checked {0}. '
'Try \'os.environ["CDF_LIB"] = library_directory\' '
'before import.').format(', '.join(self.libpath)))
else:
self._library = ctypes.CDLL(libpath)
self.libpath = libpath
else:
self._library = library
self.libpath = libpath
self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here
self._library.EPOCHbreakdown.restype = ctypes.c_long
self._library.computeEPOCH.restype = ctypes.c_double
self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7
self._library.computeEPOCH16.restype = ctypes.c_double
self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \
[ctypes.POINTER(ctypes.c_double * 2)]
if hasattr(self._library, 'CDFsetFileBackward'):
self._library.CDFsetFileBackward.restype = None
self._library.CDFsetFileBackward.argtypes = [ctypes.c_long]
#Map old name to the 3.7.1+ name
if not hasattr(self._library, 'computeTT2000') \
and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'):
self._library.computeTT2000 \
= self._library.CDF_TT2000_from_UTC_parts
if hasattr(self._library, 'computeTT2000'):
self._library.computeTT2000.restype = ctypes.c_longlong
self._library.computeTT2000.argtypes = \
[ctypes.c_double] *9
#Map old name to the 3.7.1+ name
if not hasattr(self._library, 'breakdownTT2000') \
and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'):
self._library.breakdownTT2000 \
= self._library.CDF_TT2000_to_UTC_parts
if hasattr(self._library, 'breakdownTT2000'):
self._library.breakdownTT2000.restype = None
self._library.breakdownTT2000.argtypes = \
[ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9
if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'):
self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double
self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong]
if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'):
self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong
self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double]
if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'):
self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double
self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \
[ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)]
if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'):
self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \
ctypes.c_longlong
self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \
[ctypes.POINTER(ctypes.c_double * 2)]
#Get CDF version information
ver = ctypes.c_long(0)
rel = ctypes.c_long(0)
inc = ctypes.c_long(0)
sub = ctypes.c_char(b' ')
self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver),
const.GET_, const.LIB_RELEASE_, ctypes.byref(rel),
const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc),
const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub))
ver = ver.value
rel = rel.value
inc = inc.value
sub = sub.value
self.version = (ver, rel, inc, sub)
self._del_middle_rec_bug = ver < 3 or (ver == 3 and
(rel < 4 or
(rel == 4 and inc < 1)))
self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4))
self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE',
const.CDF_CHAR.value: 'CDF_CHAR',
const.CDF_INT1.value: 'CDF_INT1',
const.CDF_UCHAR.value: 'CDF_UCHAR',
const.CDF_UINT1.value: 'CDF_UINT1',
const.CDF_INT2.value: 'CDF_INT2',
const.CDF_UINT2.value: 'CDF_UINT2',
const.CDF_INT4.value: 'CDF_INT4',
const.CDF_UINT4.value: 'CDF_UINT4',
const.CDF_INT8.value: 'CDF_INT8',
const.CDF_FLOAT.value: 'CDF_FLOAT',
const.CDF_REAL4.value: 'CDF_REAL4',
const.CDF_DOUBLE.value: 'CDF_DOUBLE',
const.CDF_REAL8.value: 'CDF_REAL8',
const.CDF_EPOCH.value: 'CDF_EPOCH',
const.CDF_EPOCH16.value: 'CDF_EPOCH16',
const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000',
}
self.numpytypedict = {const.CDF_BYTE.value: numpy.int8,
const.CDF_CHAR.value: numpy.int8,
const.CDF_INT1.value: numpy.int8,
const.CDF_UCHAR.value: numpy.uint8,
const.CDF_UINT1.value: numpy.uint8,
const.CDF_INT2.value: numpy.int16,
const.CDF_UINT2.value: numpy.uint16,
const.CDF_INT4.value: numpy.int32,
const.CDF_UINT4.value: numpy.uint32,
const.CDF_INT8.value: numpy.int64,
const.CDF_FLOAT.value: numpy.float32,
const.CDF_REAL4.value: numpy.float32,
const.CDF_DOUBLE.value: numpy.float64,
const.CDF_REAL8.value: numpy.float64,
const.CDF_EPOCH.value: numpy.float64,
const.CDF_EPOCH16.value:
numpy.dtype((numpy.float64, 2)),
const.CDF_TIME_TT2000.value: numpy.int64,
}
self.timetypes = [const.CDF_EPOCH.value,
const.CDF_EPOCH16.value,
const.CDF_TIME_TT2000.value]
if not self.supports_int8:
del self.cdftypenames[const.CDF_INT8.value]
del self.numpytypedict[const.CDF_INT8.value]
del self.cdftypenames[const.CDF_TIME_TT2000.value]
del self.numpytypedict[const.CDF_TIME_TT2000.value]
elif sys.platform.startswith('linux') \
and os.uname()[4].startswith('arm') \
and hasattr(self._library, 'computeTT2000') \
and self._library.computeTT2000(
2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000:
#TT2000 call failed, so probably need to type-pun
#double arguments to variadic functions.
#Calling convention for non-variadic functions with floats
#is unique, but convention for ints is same as variadic.
#So type-pun arguments to integers to force that calling
#convention.
if ctypes.sizeof(ctypes.c_longlong) != \
ctypes.sizeof(ctypes.c_double):
warnings.warn('ARM with unknown type sizes; '
'TT2000 functions will not work.')
else:
self._library.computeTT2000.argtypes = \
[ctypes.c_longlong] * 9
c_ll_p = ctypes.POINTER(ctypes.c_longlong)
if self._library.computeTT2000(
ctypes.cast(ctypes.pointer(ctypes.c_double(
2010)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
1)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
1)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents) != 315576066184000000:
warnings.warn('ARM with unknown calling convention; '
'TT2000 functions will not work.')
self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned
v_epoch16_to_datetime = numpy.frompyfunc(
self.epoch16_to_datetime, 2, 1)
self.v_epoch16_to_datetime = \
lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1])
self.v_epoch_to_datetime = numpy.frompyfunc(
self.epoch_to_datetime, 1, 1)
self.v_tt2000_to_datetime = numpy.frompyfunc(
self.tt2000_to_datetime, 1, 1)
self.v_datetime_to_epoch = numpy.vectorize(
self.datetime_to_epoch, otypes=[numpy.float64])
v_datetime_to_epoch16 = numpy.frompyfunc(
self.datetime_to_epoch16, 1, 2)
#frompyfunc returns a TUPLE of the returned values,
#implicitly the 0th dimension. We want everything from one
#call paired, so this rolls the 0th dimension to the last
#(via the second-to-last)
def _v_datetime_to_epoch16(x):
retval = numpy.require(v_datetime_to_epoch16(x),
dtype=numpy.float64)
if len(retval.shape) > 1:
return numpy.rollaxis(
numpy.rollaxis(retval, 0, -1),
-1, -2)
else:
return retval
self.v_datetime_to_epoch16 = _v_datetime_to_epoch16
self.v_datetime_to_tt2000 = numpy.vectorize(
self.datetime_to_tt2000, otypes=[numpy.int64])
self.v_epoch_to_tt2000 = numpy.vectorize(
self.epoch_to_tt2000, otypes=[numpy.int64])
self.v_tt2000_to_epoch = numpy.vectorize(
self.tt2000_to_epoch, otypes=[numpy.float64])
v_epoch16_to_tt2000 = numpy.frompyfunc(
self.epoch16_to_tt2000, 2, 1)
self.v_epoch16_to_tt2000 = \
lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1])
v_tt2000_to_epoch16 = numpy.frompyfunc(
self.tt2000_to_epoch16, 1, 2)
#frompyfunc returns a TUPLE of the returned values,
#implicitly the 0th dimension. We want everything from one
#call paired, so this rolls the 0th dimension to the last
#(via the second-to-last)
def _v_tt2000_to_epoch16(x):
retval = numpy.require(v_tt2000_to_epoch16(x),
dtype=numpy.float64)
if len(retval.shape) > 1:
return numpy.rollaxis(
numpy.rollaxis(retval, 0, -1),
-1, -2)
else:
return retval
self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16
if not self.supports_int8:
self.datetime_to_tt2000 = self._bad_tt2000
self.tt2000_to_datetime = self._bad_tt2000
self.v_datetime_to_tt2000 = self._bad_tt2000
self.v_tt2000_to_datetime = self._bad_tt2000
self.epoch_to_tt2000 = self._bad_tt2000
self.v_epoch_to_tt2000 = self._bad_tt2000
self.tt2000_to_epoch = self._bad_tt2000
self.v_tt2000_to_epoch = self._bad_tt2000
self.epoch_16_to_tt2000 = self._bad_tt2000
self.v_epoch16_to_tt2000 = self._bad_tt2000
self.tt2000_to_epoch16 = self._bad_tt2000
self.v_tt2000_to_epoch16 = self._bad_tt2000
#Default to V2 CDF
self.set_backward(True)
@staticmethod
def _find_lib():
"""
Search for the CDF library
Searches in likely locations for CDF libraries and attempts to load
them. Stops at first successful load and, if fails, reports all
the files that were tried as libraries.
Returns
=======
out : tuple
This is either (path to library, loaded library)
or, in the event of failure, (None, list of libraries tried)
"""
failed = []
for libpath in Library._lib_paths():
try:
lib = ctypes.CDLL(libpath)
except:
failed.append(libpath)
else:
return libpath, lib
return (failed, None)
@staticmethod
def _lib_paths():
"""Find candidate paths for the CDF library
Does not check that the library is actually in any particular directory,
just returns a list of possible locations, in priority order.
Returns
=======
out : generator of str
paths that look like the CDF library
"""
#What the library might be named
names = { 'win32': ['cdf.dll'],
'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'],
'linux2': ['libcdf.so'],
'linux': ['libcdf.so'],
}
names = names.get(sys.platform, ['libcdf.so'])
#All existing CDF-library-like paths within a directory
search_dir = lambda x: \
[os.path.join(x, fname) for fname in names
if os.path.exists(os.path.join(x, fname))]
# Only use anaconda locations...
# Defined during builds ...
if 'PREFIX' in os.environ:
if sys.platform == 'win32':
for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')):
yield p
else:
for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')):
yield p
# defined when conda is activated ...
if 'CONDA_PREFIX' in os.environ:
if sys.platform == 'win32':
for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')):
yield p
else:
for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')):
yield p
# Special subdirectory for anaconda unix packages on windows
if 'LIBRARY_BIN' in os.environ:
for p in search_dir(os.environ['LIBRARY_BIN']):
yield p
ctypespath = ctypes.util.find_library(
'cdf.dll' if sys.platform == 'win32' else 'cdf')
if ctypespath:
yield ctypespath
def check_status(self, status, ignore=()):
"""
Raise exception or warning based on return status of CDF call
Parameters
==========
status : int
status returned by the C library
Other Parameters
================
ignore : sequence of ctypes.c_long
CDF statuses to ignore. If any of these is returned by CDF library,
any related warnings or exceptions will *not* be raised.
(Default none).
Raises
======
CDFError : if status < CDF_WARN, indicating an error
Warns
=====
CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning.
Returns
=======
out : int
status (unchanged)
"""
if status == const.CDF_OK or status in ignore:
return status
if status < const.CDF_WARN:
raise CDFError(status)
else:
warning = CDFWarning(status)
warning.warn()
return status
def call(self, *args, **kwargs):
"""
Call the CDF internal interface
Passes all parameters directly through to the CDFlib routine of the
CDF library's C internal interface. Checks the return value with
:meth:`check_status`.
Terminal NULL is automatically added to args.
Parameters
==========
args : various, see :mod:`ctypes`
Passed directly to the CDF library interface. Useful
constants are defined in the :mod:`~pycdf.const` module.
Other Parameters
================
ignore : sequence of CDF statuses
sequence of CDF statuses to ignore. If any of these
is returned by CDF library, any related warnings or
exceptions will *not* be raised.
Returns
=======
out : int
CDF status from the library
Raises
======
CDFError : if CDF library reports an error
Warns
=====
CDFWarning : if CDF library reports a warning
"""
if 'ignore' in kwargs:
return self.check_status(self._library.CDFlib(
*(args + (const.NULL_, ))
), kwargs['ignore'])
else:
return self.check_status(self._library.CDFlib(
*(args + (const.NULL_, ))
))
def set_backward(self, backward=True):
"""
Set backward compatibility mode for new CDFs
Unless backward compatible mode is set, CDF files created by
the version 3 library can not be read by V2.
Parameters
==========
backward : boolean
Set backward compatible mode if True; clear it if False.
Raises
======
ValueError : if backward=False and underlying CDF library is V2
"""
if self.version[0] < 3:
if not backward:
raise ValueError(
'Cannot disable backward-compatible mode for CDF version 2.')
else:
return
self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward
else const.BACKWARDFILEoff)
def epoch_to_datetime(self, epoch):
"""
Converts a CDF epoch value to a datetime
Parameters
==========
epoch : float
epoch value from CDF
Returns
=======
out : :class:`datetime.datetime`
date and time corresponding to epoch. Invalid values are set to
usual epoch invalid value, i.e. last moment of year 9999.
See Also
========
v_epoch_to_datetime
"""
yyyy = ctypes.c_long(0)
mm = ctypes.c_long(0)
dd = ctypes.c_long(0)
hh = ctypes.c_long(0)
min = ctypes.c_long(0)
sec = ctypes.c_long(0)
msec = ctypes.c_long(0)
self._library.EPOCHbreakdown(ctypes.c_double(epoch),
ctypes.byref(yyyy), ctypes.byref(mm),
ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min),
ctypes.byref(sec), ctypes.byref(msec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000)
else:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
msec.value * 1000)
def datetime_to_epoch(self, dt):
"""
Converts a Python datetime to a CDF Epoch value
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : float
epoch corresponding to dt
See Also
========
v_datetime_to_epoch
"""
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt.replace(tzinfo=None)
micro = dt.microsecond % 1000
if micro >= 500 and dt.year < 9999:
dt += datetime.timedelta(0, 0, 1000)
return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000))
def epoch16_to_datetime(self, epoch0, epoch1):
"""
Converts a CDF epoch16 value to a datetime
.. note::
The call signature has changed since SpacePy 0.1.2. Formerly
this method took a single argument with two values; now it
requires two arguments (one for each value). To convert existing
code, replace ``epoch16_to_datetime(epoch)`` with
``epoch16_to_datetime(*epoch)``.
Parameters
==========
epoch0 : float
epoch16 value from CDF, first half
epoch1 : float
epoch16 value from CDF, second half
Raises
======
EpochError : if input invalid
Returns
=======
out : :class:`datetime.datetime`
date and time corresponding to epoch. Invalid values are set to
usual epoch invalid value, i.e. last moment of year 9999.
See Also
========
v_epoch16_to_datetime
"""
yyyy = ctypes.c_long(0)
mm = ctypes.c_long(0)
dd = ctypes.c_long(0)
hh = ctypes.c_long(0)
min = ctypes.c_long(0)
sec = ctypes.c_long(0)
msec = ctypes.c_long(0)
usec = ctypes.c_long(0)
nsec = ctypes.c_long(0)
psec = ctypes.c_long(0)
self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1),
ctypes.byref(yyyy), ctypes.byref(mm),
ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min),
ctypes.byref(sec), ctypes.byref(msec),
ctypes.byref(usec), ctypes.byref(nsec),
ctypes.byref(psec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999)
micro = int(float(msec.value) * 1000 + float(usec.value) +
float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5)
if micro < 1000000:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
micro)
else:
add_sec = int(micro / 1000000)
try:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
micro - add_sec * 1000000) + \
datetime.timedelta(seconds=add_sec)
except OverflowError:
return datetime.datetime(datetime.MAXYEAR, 12, 31,
23, 59, 59,
999999)
def datetime_to_epoch16(self, dt):
"""
Converts a Python datetime to a CDF Epoch16 value
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : list of float
epoch16 corresponding to dt
See Also
========
v_datetime_to_epoch16
"""
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt.replace(tzinfo=None)
#Default to "illegal epoch"
epoch16 = (ctypes.c_double * 2)(-1., -1.)
if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000),
dt.microsecond % 1000, 0, 0,
epoch16):
return (-1., -1.) #Failure, so illegal epoch
return (epoch16[0], epoch16[1])
def epoch_to_epoch16(self, epoch):
"""
Converts a CDF EPOCH to a CDF EPOCH16 value
Parameters
==========
epoch : double
EPOCH to convert. Lists and numpy arrays are acceptable.
Returns
=======
out : (double, double)
EPOCH16 corresponding to epoch
"""
e = numpy.require(epoch, numpy.float64)
s = numpy.trunc(e / 1000.0)
#ugly numpy stuff, probably a better way....
res = numpy.hstack((s, (e - s * 1000.0) * 1e9))
if len(res) <= 2:
return res
newshape = list(res.shape[0:-2])
newshape.append(res.shape[-1] // 2)
newshape.append(2)
return numpy.rollaxis(res.reshape(newshape), -1, -2)
def epoch_to_num(self, epoch):
"""
Convert CDF EPOCH to matplotlib number.
Same output as :func:`~matplotlib.dates.date2num` and useful for
plotting large data sets without converting the times through datetime.
Parameters
==========
epoch : double
EPOCH to convert. Lists and numpy arrays are acceptable.
Returns
=======
out : double
Floating point number representing days since 0001-01-01.
"""
#date2num day 1 is 1/1/1 00UT
#epoch 1/1/1 00UT is 31622400000.0 (millisecond)
return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0
def epoch16_to_epoch(self, epoch16):
"""
Converts a CDF EPOCH16 to a CDF EPOCH value
Parameters
==========
epoch16 : (double, double)
EPOCH16 to convert. Lists and numpy arrays are acceptable.
LAST dimension should be 2: the two pairs of EPOCH16
Returns
=======
out : double
EPOCH corresponding to epoch16
"""
e = numpy.require(epoch16, numpy.float64)
return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9)
def tt2000_to_datetime(self, tt2000):
"""
Converts a CDF TT2000 value to a datetime
.. note::
Although TT2000 values support leapseconds, Python's datetime
object does not. Any times after 23:59:59.999999 will
be truncated to 23:59:59.999999.
Parameters
==========
tt2000 : int
TT2000 value from CDF
Raises
======
EpochError : if input invalid
Returns
=======
out : :class:`datetime.datetime`
date and time corresponding to epoch. Invalid values are set to
usual epoch invalid value, i.e. last moment of year 9999.
See Also
========
v_tt2000_to_datetime
"""
yyyy = ctypes.c_double(0)
mm = ctypes.c_double(0)
dd = ctypes.c_double(0)
hh = ctypes.c_double(0)
min = ctypes.c_double(0)
sec = ctypes.c_double(0)
msec = ctypes.c_double(0)
usec = ctypes.c_double(0)
nsec = ctypes.c_double(0)
self._library.breakdownTT2000(
ctypes.c_longlong(tt2000),
ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec),
ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999)
sec = int(sec.value)
if sec >= 60:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), 59, 999999)
micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5)
if micro < 1000000:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), sec, micro)
else:
add_sec = int(micro / 1000000)
try:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), sec,
micro - add_sec * 1000000) + \
datetime.timedelta(seconds=add_sec)
except OverflowError:
return datetime.datetime(datetime.MAXYEAR, 12, 31,
23, 59, 59, 999999)
def datetime_to_tt2000(self, dt):
"""
Converts a Python datetime to a CDF TT2000 value
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : int
tt2000 corresponding to dt
See Also
========
v_datetime_to_tt2000
"""
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt = dt.replace(tzinfo=None)
if dt == datetime.datetime.max:
return -2**63
return self._library.computeTT2000(
dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000),
dt.microsecond % 1000, 0)
def _datetime_to_tt2000_typepunned(self, dt):
"""
Converts a Python datetime to a CDF TT2000 value
Typepunned version that passes doubles as longlongs, to get around
ARM calling convention oddness.
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : int
tt2000 corresponding to dt
See Also
========
v_datetime_to_tt2000
"""
c_ll_p = ctypes.POINTER(ctypes.c_longlong)
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt = dt.replace(tzinfo=None)
if dt == datetime.datetime.max:
return -2**63
return self._library.computeTT2000(
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.year)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.month)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.day)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.hour)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.minute)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.second)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.microsecond // 1000)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.microsecond % 1000)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents)
def epoch_to_tt2000(self, epoch):
"""
Converts a CDF EPOCH to a CDF TT2000 value
Parameters
==========
epoch : double
EPOCH to convert
Returns
=======
out : int
tt2000 corresponding to epoch
See Also
========
v_epoch_to_tt2000
"""
return self._library.CDF_TT2000_from_UTC_EPOCH(epoch)
def tt2000_to_epoch(self, tt2000):
"""
Converts a CDF TT2000 value to a CDF EPOCH
.. note::
Although TT2000 values support leapseconds, CDF EPOCH values
do not. Times during leapseconds are rounded up to beginning
of the next day.
Parameters
==========
tt2000 : int
TT2000 value from CDF
Raises
======
EpochError : if input invalid
Returns
=======
out : double
EPOCH corresponding to the TT2000 input time
See Also
========
v_tt2000_to_epoch
"""
return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000)
def epoch16_to_tt2000(self, epoch0, epoch1):
"""
Converts a CDF epoch16 value to TT2000
.. note::
Because TT2000 does not support picoseconds, the picoseconds
value in epoch is ignored (i.e., truncated.)
Parameters
==========
epoch0 : float
epoch16 value from CDF, first half
epoch1 : float
epoch16 value from CDF, second half
Raises
======
EpochError : if input invalid
Returns
=======
out : long
TT2000 corresponding to epoch.
See Also
========
v_epoch16_to_tt2000
"""
return self._library.CDF_TT2000_from_UTC_EPOCH16(
(ctypes.c_double * 2)(epoch0, epoch1))
def tt2000_to_epoch16(self, tt2000):
"""
Converts a CDF TT2000 value to a CDF EPOCH16
.. note::
Although TT2000 values support leapseconds, CDF EPOCH16 values
do not. Times during leapseconds are rounded up to beginning
of the next day.
Parameters
==========
tt2000 : int
TT2000 value from CDF
Raises
======
EpochError : if input invalid
Returns
=======
out : double, double
EPOCH16 corresponding to the TT2000 input time
See Also
========
v_tt2000_to_epoch16
"""
#Default to "illegal epoch" if isn't populated
epoch16 = (ctypes.c_double * 2)(-1., -1.)
if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16):
return (-1., -1.) #Failure; illegal epoch
return (epoch16[0], epoch16[1])
def _bad_tt2000(*args, **kwargs):
"""Convenience function for complaining that TT2000 not supported"""
raise NotImplementedError(
'TT2000 functions require CDF library 3.4.0 or later')
def download_library():
"""Download and install the CDF library"""
if sys.platform != 'win32':
raise NotImplementedError(
'CDF library install only supported on Windows')
try:
import html.parser as HTMLParser
except ImportError:
import HTMLParser
#https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj
class LinkParser(HTMLParser.HTMLParser, object):
def __init__(self, *args, **kwargs):
self.links_found = []
super(LinkParser, self).__init__(*args, **kwargs)
def handle_starttag(self, tag, attrs):
if tag != 'a' or attrs[0][0] != 'href':
return
self.links_found.append(attrs[0][1])
import re
import subprocess
try:
import urllib.request as u
except ImportError:
import urllib as u
# Removed reference to spacepy
#import spacepy
#if spacepy.config.get('user_agent', None):
# class AppURLopener(u.FancyURLopener):
# version = spacepy.config['user_agent']
# u._urlopener = AppURLopener()
baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/'
url = u.urlopen(baseurl)
listing = url.read()
url.close()
p = LinkParser()
p.feed(listing)
cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)]
if not cdfdist:
raise RuntimeError(
"Couldn't find CDF distribution directory to download")
cdfdist.sort(key=lambda x: x.rstrip('/').split('_'))
cdfverbase = cdfdist[-1].rstrip('/')
instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \
'-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4)
insturl = baseurl + cdfverbase + '/windows/' + instfname
tmpdir = tempfile.mkdtemp()
try:
fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname))
subprocess.check_call([fname, '/install', '/q1'], shell=False)
finally:
shutil.rmtree(tmpdir)
_libpath, _library = Library._find_lib()
if _library is None:
raise Exception(('Cannot load CDF C library; checked {0}. '
'Try \'os.environ["CDF_LIB"] = library_directory\' '
'before import.').format(', '.join(_libpath)))
from . import const
lib = Library(_libpath, _library)
"""Module global library object.
Initalized at module load time so all classes have ready
access to the CDF library and a common state. E.g:
>>> import pycdf
>>> pycdf.lib.version
(3, 3, 0, ' ')
"""
class CDFException(Exception):
"""
Base class for errors or warnings in the CDF library.
Not normally used directly, but in subclasses :class:`CDFError`
and :class:`CDFWarning`.
Error messages provided by this class are looked up from the underlying
C library.
"""
def __init__(self, status):
"""
Create a CDF Exception
Uses CDF C library to look up an appropriate error message.
Parameters
==========
status : ctypes.c_long
CDF status
"""
self.status = status
self.string = 'CDF error ' + repr(status) + ', unable to get details.'
message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1)
try:
retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_,
ctypes.c_long(status),
const.GET_, const.STATUS_TEXT_, message,
const.NULL_)
if retval == const.CDF_OK:
if isinstance(message.value, str):
self.string = message.value
elif isinstance(message.value, bytes):
self.string = message.value.decode()
except:
pass
def __str__(self):
"""
Error string associated with the library error.
Returns
=======
out : str
Error message from the CDF library.
"""
return self.string
class CDFError(CDFException):
"""Raised for an error in the CDF library."""
pass
class CDFWarning(CDFException, UserWarning):
"""Used for a warning in the CDF library."""
def warn(self, level=4):
"""
Issues a warning based on the information stored in my exception
Intended for use in check_status or similar wrapper function.
Other Parameters
================
level : int
optional (default 3), how far up the stack the warning should
be reported. Passed directly to :class:`warnings.warn`.
"""
warnings.warn(self, self.__class__, level)
class EpochError(Exception):
"""Used for errors in epoch routines"""
pass
def _compress(obj, comptype=None, param=None):
"""Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var`
@param obj: object on which to set or check compression
@type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var`
@param comptype: type of compression to change to, see CDF C reference
manual section 4.10. Constants for this parameter
are in :py:mod:`pycdf.const`. If not specified, will not change
compression.
@type comptype: ctypes.c_long
@param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`.
If not specified, will choose reasonable default (5 for
gzip; other types have only one possible parameter.)
@type param: ctypes.c_long
@return: (comptype, param) currently in effect
@rtype: tuple
"""
if isinstance(obj, CDF):
COMPRESSION_ = const.CDF_COMPRESSION_
elif isinstance(obj, Var):
COMPRESSION_ = const.zVAR_COMPRESSION_
else:
raise ValueError('Must specify a CDF or Var type.')
validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)],
const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs],
const.HUFF_COMPRESSION.value:
[const.OPTIMAL_ENCODING_TREES],
const.AHUFF_COMPRESSION.value:
[const.OPTIMAL_ENCODING_TREES],
const.GZIP_COMPRESSION.value: [ctypes.c_long(5),
ctypes.c_long(1),
ctypes.c_long(2),
ctypes.c_long(3),
ctypes.c_long(4),
ctypes.c_long(6),
ctypes.c_long(7),
ctypes.c_long(8),
ctypes.c_long(9),
],
}
comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION,
const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION,
const.GZIP_COMPRESSION]
comptypevalues = [i.value for i in comptypes]
if comptype != None:
if not hasattr(comptype, 'value'):
comptype = ctypes.c_long(comptype)
if param is None:
if not comptype.value in validparams:
raise CDFError(const.BAD_COMPRESSION)
param = validparams[comptype.value][0]
paramlist = (ctypes.c_long * 1)(param)
obj._call(const.PUT_, COMPRESSION_,
comptype, paramlist)
params = (ctypes.c_long *
const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS))
comptype = ctypes.c_long(0)
percent = ctypes.c_long(0)
obj._call(const.GET_, COMPRESSION_,
ctypes.byref(comptype), ctypes.byref(params),
ctypes.byref(percent))
param = params[0]
if not comptype.value in comptypevalues:
raise CDFError(const.BAD_COMPRESSION)
validparamvalues = [i.value for i in validparams[comptype.value]]
if not param in validparamvalues:
raise CDFError(const.BAD_COMPRESSION_PARM)
comptype = comptypes[comptypevalues.index(comptype.value)]
if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION,
const.AHUFF_COMPRESSION):
param = validparams[comptype.value][validparamvalues.index(param)]
return (comptype, param)
class CDF(MutableMapping):
"""
Python object representing a CDF file.
Open or create a CDF file by creating an object of this class.
Parameters
==========
pathname : string
name of the file to open or create
masterpath : string
name of the master CDF file to use in creating
a new file. If not provided, an existing file is
opened; if provided but evaluates to ``False``
(e.g., ``''``), an empty new CDF is created.
create : bool
Create a new CDF even if masterpath isn't provided
readonly : bool
Open the CDF read-only. Default True if opening an
existing CDF; False if creating a new one. A readonly
CDF with many variables may be slow to close. See
:meth:`readonly`.
Raises
======
CDFError
if CDF library reports an error
Warns
=====
CDFWarning
if CDF library reports a warning and interpreter
is set to error on warnings.
Examples
========
Open a CDF by creating a CDF object, e.g.:
>>> cdffile = pycdf.CDF('cdf_filename.cdf')
Be sure to :meth:`close` or :meth:`save` when
done.
.. note::
Existing CDF files are opened read-only by default, see
:meth:`readonly` to change.
CDF supports the `with
<http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_
keyword, like other file objects, so:
>>> with pycdf.CDF('cdf_filename.cdf') as cdffile:
... #do brilliant things with the CDF
will open the CDF, execute the indented statements, and close the CDF when
finished or when an error occurs. The `python docs
<http://docs.python.org/reference/compound_stmts.html#with>`_ include more
detail on this 'context manager' ability.
CDF objects behave like a python `dictionary
<http://docs.python.org/tutorial/datastructures.html#dictionaries>`_,
where the keys are names of variables in the CDF, and the values,
:class:`Var` objects. As a dictionary, they are also `iterable
<http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy
to loop over all of the variables in a file. Some examples:
#. List the names of all variables in the open CDF ``cdffile``:
>>> cdffile.keys()
>>> for k in cdffile: #Alternate
... print(k)
#. Get a :class:`Var` object for the variable named ``Epoch``:
>>> epoch = cdffile['Epoch']
#. Determine if a CDF contains a variable named ``B_GSE``:
>>> if 'B_GSE' in cdffile:
... print('B_GSE is in the file')
... else:
... print('B_GSE is not in the file')
#. Find how many variables are in the file:
>>> print(len(cdffile))
#. Delete the variable ``Epoch`` from the open CDF file ``cdffile``:
>>> del cdffile['Epoch']
#. Display a summary of variables and types in open CDF file ``cdffile``:
>>> print(cdffile)
#. Open the CDF named ``cdf_filename.cdf``, read *all* the data from
all variables into dictionary ``data``, and close it when done or
if an error occurs:
>>> with pycdf.CDF('cdf_filename.cdf') as cdffile:
... data = cdffile.copy()
This last example can be very inefficient as it reads the entire CDF.
Normally it's better to treat the CDF as a dictionary and access only
the data needed, which will be pulled transparently from disc. See
:class:`Var` for more subtle examples.
Potentially useful dictionary methods and related functions:
- `in <http://docs.python.org/reference/expressions.html#in>`_
- `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_
- :py:func:`len`
- `list comprehensions
<http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_
- :py:func:`sorted`
- :py:func:`~spacepy.toolbox.dictree`
The CDF user's guide section 2.2 has more background information on CDF
files.
The :attr:`~CDF.attrs` Python attribute acts as a dictionary
referencing CDF attributes (do not confuse the two); all the
dictionary methods above also work on the attribute dictionary.
See :class:`gAttrList` for more on the dictionary of global
attributes.
Creating a new CDF from a master (skeleton) CDF has similar syntax to
opening one:
>>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf')
This creates and opens ``cdf_filename.cdf`` as a copy of
``master_cdf_filename.cdf``.
Using a skeleton CDF is recommended over making a CDF entirely from
scratch, but this is possible by specifying a blank master:
>>> cdffile = pycdf.CDF('cdf_filename.cdf', '')
When CDFs are created in this way, they are opened read-write, see
:py:meth:`readonly` to change.
By default, new CDFs (without a master) are created in version 2
(backward-compatible) format. To create a version 3 CDF, use
:meth:`Library.set_backward`:
>>> pycdf.lib.set_backward(False)
>>> cdffile = pycdf.CDF('cdf_filename.cdf', '')
Add variables by direct assignment, which will automatically set type
and dimension based on the data provided:
>>> cdffile['new_variable_name'] = [1, 2, 3, 4]
or, if more control is needed over the type and dimensions, use
:py:meth:`new`.
Although it is supported to assign Var objects to Python variables
for convenience, there are some minor pitfalls that can arise when
changing a CDF that will not affect most users. This is only a
concern when assigning a zVar object to a Python variable, changing the
CDF through some other variable, and then trying to use the zVar
object via the originally assigned variable.
Deleting a variable:
>>> var = cdffile['Var1']
>>> del cdffile['Var1']
>>> var[0] #fail, no such variable
Renaming a variable:
>>> var = cdffile['Var1']
>>> cdffile['Var1'].rename('Var2')
>>> var[0] #fail, no such variable
Renaming via the same variable works:
>>> var = cdffile['Var1']
>>> var.rename('Var2')
>>> var[0] #succeeds, aware of new name
Deleting a variable and then creating another variable with the same name
may lead to some surprises:
>>> var = cdffile['Var1']
>>> var[...] = [1, 2, 3, 4]
>>> del cdffile['Var1']
>>> cdffile.new('Var1', data=[5, 6, 7, 8]
>>> var[...]
[5, 6, 7, 8]
.. autosummary::
~CDF.attr_num
~CDF.attrs
~CDF.add_attr_to_cache
~CDF.add_to_cache
~CDF.backward
~CDF.checksum
~CDF.clear_attr_from_cache
~CDF.clear_from_cache
~CDF.clone
~CDF.close
~CDF.col_major
~CDF.compress
~CDF.copy
~CDF.from_data
~CDF.new
~CDF.raw_var
~CDF.readonly
~CDF.save
~CDF.var_num
~CDF.version
.. attribute:: CDF.attrs
Global attributes for this CDF in a dict-like format.
See :class:`gAttrList` for details.
.. attribute:: CDF.backward
True if this CDF was created in backward-compatible mode
(for opening with CDF library before 3.x)
.. automethod:: add_to_cache
.. automethod:: add_attr_to_cache
.. automethod:: attr_num
.. automethod:: checksum
.. automethod:: clear_from_cache
.. automethod:: clear_attr_from_cache
.. automethod:: clone
.. automethod:: close
.. automethod:: col_major
.. automethod:: compress
.. automethod:: copy
.. automethod:: from_data
.. automethod:: new
.. automethod:: raw_var
.. automethod:: readonly
.. automethod:: save
.. automethod:: var_num
.. automethod:: version
"""
def __init__(self, pathname, masterpath=None, create=None, readonly=None):
"""Open or create a CDF file.
Parameters
==========
pathname : string
name of the file to open or create
masterpath : string
name of the master CDF file to use in creating
a new file. If not provided, an existing file is
opened; if provided but evaluates to ``False``
(e.g., ``''``), an empty new CDF is created.
create : bool
Create a new CDF even if masterpath isn't provided
readonly : bool
Open the CDF read-only. Default True if opening an
existing CDF; False if creating a new one.
Raises
======
CDFError
if CDF library reports an error
CDFWarning
if CDF library reports a warning and interpreter
is set to error on warnings.
Examples
========
Open a CDF by creating a CDF object, e.g.:
>>> cdffile = pycdf.CDF('cdf_filename.cdf')
Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`
when done.
"""
if masterpath is not None: #Looks like we want to create
if create is False:
raise ValueError('Cannot specify a master CDF without creating a CDF')
if readonly is True:
raise ValueError('Cannot create a CDF in readonly mode')
if create and readonly:
raise ValueError('Cannot create a CDF in readonly mode')
try:
self.pathname = pathname.encode()
except AttributeError:
raise ValueError(
'pathname must be string-like: {0}'.format(pathname))
self._handle = ctypes.c_void_p(None)
self._opened = False
if masterpath is None and not create:
self._open(True if readonly is None else readonly)
elif masterpath:
self._from_master(masterpath.encode())
else:
self._create()
lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2))
self._attrlistref = weakref.ref(gAttrList(self))
self.backward = self.version()[0] < 3
self._var_nums = {}
"""Cache of name-to-number mappings for variables in this CDF"""
self._attr_info = {}
"""Cache of name-to-(number, global) mappings for attributes
in this CDF"""
def __del__(self):
"""Destructor; called when CDF object is destroyed.
Close CDF file if there is still a valid handle.
.. note::
To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close`
or :py:meth:`pycdf.CDF.save`.
"""
if self._opened:
self.close()
def __delitem__(self, name):
"""Delete a zVariable in this CDF, by name or number
Parameters
==========
name : string or int
Name or number of the CDF variable
.. note:
Variable numbers may change if variables are added or removed.
Examples
========
Delete the variable ``Epoch`` from the open CDF file ``cdffile``.
>>> del cdffile['Epoch']
"""
self[name]._delete()
def __enter__(self):
"""Context manager entrance function."""
return self
def __exit__(self, type, value, traceback):
"""Context manager exit function.
Close CDF file.
"""
self.close()
def __getitem__(self, name):
"""Gets a zVariable in this CDF, by name or number
The CDF acts like a dict
@param name: Name or number of the CDF variable
@type name: string or int
@return: CDF variable named or numbered L{name}
@rtype: :py:class:`pycdf.Var`
@raise KeyError: for pretty much any problem in lookup
@note: variable numbers may change if variables are added or removed.
"""
try:
return Var(self, name)
except CDFException as e:
raise KeyError('{0}: {1}'.format(name, e))
def __setitem__(self, name, data):
"""Writes data to a zVariable in this CDF
If the zVariable does not exist, will create one matching
L{data}. If it does exist, will attempt to write L{data}
to it without changing the type or dimensions.
@param name: name or number of the variable to write
@type name: str or int
@param data: data to write, or a :py:class:`pycdf.Var` to copy
"""
if isinstance(data, Var):
self.clone(data, name)
elif name in self:
self[name][...] = data
if hasattr(data, 'attrs'):
self[name].attrs.clone(data.attrs)
else:
self.new(name, data)
def __iter__(self, current = 0):
"""Iterates over zVars in CDF
Iterators for dicts return keys
@note: Returned in variable-number order
"""
while current < self.__len__():
name = self[current].name()
value = (yield name)
if value is None:
current += 1
else:
current = self[value]._num()
current += 1
def __len__(self):
"""Implements 'length' of CDF (number of zVars)
@return: number of zVars in the CDF
@rtype: int
"""
count = ctypes.c_long(0)
self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count))
return count.value
def __contains__(self, key):
"""Determines whether a particular variable name is in the CDF
@note: Essentially an efficiency function; L{__iter__} is called
if this isn't defined
@param key: key/variable name to check
@type key: string
@return: True if L{key} is the name of a variable in CDF, else False
@rtype: Boolean
"""
try:
foo = self[key]
return True
except KeyError as e:
expected = str(key) + \
": NO_SUCH_VAR: Named variable not found in this CDF."
if expected in e.args:
return False
raise
def __repr__(self):
"""Returns representation of CDF
Cannot return anything that can be eval'd to create a copy of the
CDF, so just wrap the informal representation in angle brackets.
@return: all the data in this list of attributes
@rtype: str
"""
return '<CDF:\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the CDF
This is an 'informal' representation in that it cannot be evaluated
directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all
variables. (Attributes are not listed.)
@return: description of the variables in the CDF
@rtype: str
"""
if self._opened:
return '\n'.join([key + ': ' + str(value)
for (key, value) in sorted(self.items())])
#can get away with this sort because second value in tuple isn't
#compared unless first are different, and variable name is unique.
else:
if isinstance(self.pathname, str):
return 'Closed CDF {0}'.format(self.pathname)
else:
return 'Closed CDF {0}'.format(self.pathname.decode('ascii'))
def _open(self, readonly=True):
"""Opens the CDF file (called on init)
Will open an existing CDF file read/write.
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
.. note:
Not intended for direct call; pass parameters to
:py:class:`pycdf.CDF` constructor.
"""
lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle))
self._opened = True
if readonly: #Default is RW
self.readonly(readonly)
def _create(self):
"""Creates (and opens) a new CDF file
Created at ``pathname``.
Assumes zero-dimension r variables
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
.. note:
Not intended for direct call; pass parameters to
:py:class:`pycdf.CDF` constructor.
"""
lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0),
(ctypes.c_long * 1)(0), ctypes.byref(self._handle))
self._opened = True
def _from_master(self, master_path):
"""Creates a new CDF from a master CDF file
``master_path`` is copied to ``pathname`` and opened.
Parameters
==========
master_path : string
location of the master CDF file
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
.. note:
Not intended for direct call; pass parameters to
:py:class:`pycdf.CDF` constructor.
"""
if os.path.exists(self.pathname):
raise CDFError(const.CDF_EXISTS)
shutil.copy2(master_path, self.pathname)
self._open(False)
def _call(self, *args, **kwargs):
"""Select this CDF as current and call the CDF internal interface
Adds call to select this CDF to L{args} and passes all parameters
directly through to the CDFlib routine of the CDF library's C internal
interface. Checks the return value with L{Library.check_status}.
Parameters
==========
args : various, see :py:mod:`ctypes`.
Passed directly to the CDF library interface. Useful
constants are defined in the :doc:`const <pycdf_const>`
module of this package.
Returns
=======
out : ctypes.c_long
CDF status from the library
.. note:
Terminal NULL_ is automatically added to ``args``.
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
"""
return lib.call(const.SELECT_, const.CDF_, self._handle,
*args, **kwargs)
def clone(self, zVar, name=None, data=True):
"""
Clone a zVariable (from another CDF or this) into this CDF
Parameters
==========
zVar : :py:class:`Var`
variable to clone
Other Parameters
================
name : str
Name of the new variable (default: name of the original)
data : boolean (optional)
Copy data, or only type, dimensions, variance, attributes?
(default: True, copy data as well)
Returns
=======
out : :py:class:`Var`
The newly-created zVar in this CDF
"""
if name is None:
name = zVar.name()
if name in self:
del self[name]
self.new(name, type=zVar.type(), recVary=zVar.rv(),
dimVarys=zVar.dv(), dims=zVar._dim_sizes(),
n_elements=zVar._nelems())
self[name].compress(*zVar.compress())
self[name].attrs.clone(zVar.attrs)
if data:
r = zVar._raw
zVar._raw = True
self.raw_var(name)[...] = zVar[...]
zVar._raw = r
return zVar
def col_major(self, new_col=None):
"""
Finds the majority of this CDF file
Other Parameters
================
new_col : boolean
Specify True to change to column-major, False to change to
row major, or do not specify to check the majority
rather than changing it.
(default is check only)
Returns
=======
out : boolean
True if column-major, false if row-major
"""
if new_col != None:
new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR
self._call(const.PUT_, const.CDF_MAJORITY_, new_maj)
maj = ctypes.c_long(0)
self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj))
if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value):
raise CDFError(const.BAD_MAJORITY)
return maj.value == const.COLUMN_MAJOR.value
def readonly(self, ro=None):
"""
Sets or check the readonly status of this CDF
If the CDF has been changed since opening, setting readonly mode
will have no effect.
.. note::
Closing a CDF that has been opened readonly, or setting readonly
False, may take a substantial amount of time if there are many
variables in the CDF, as a (potentially large) cache needs to
be cleared. Consider specifying ``readonly=False`` when opening
the file if this is an issue. However, this may make some reading
operations slower.
Other Parameters
================
ro : Boolean
True to set the CDF readonly, False to set it read/write,
or leave out to check only.
Returns
=======
out : Boolean
True if CDF is read-only, else False
Raises
======
CDFError : if bad mode is set
"""
if ro == True:
self._call(const.SELECT_, const.CDF_READONLY_MODE_,
const.READONLYon)
elif ro == False:
self._call(const.SELECT_, const.CDF_READONLY_MODE_,
const.READONLYoff)
mode = ctypes.c_long(0)
self._call(const.CONFIRM_, const.CDF_READONLY_MODE_,
ctypes.byref(mode))
if mode.value == const.READONLYon.value:
return True
elif mode.value == const.READONLYoff.value:
return False
else:
raise CDFError(const.BAD_READONLY_MODE.value)
def checksum(self, new_val=None):
"""
Set or check the checksum status of this CDF. If checksums
are enabled, the checksum will be verified every time the file
is opened.
Other Parameters
================
new_val : boolean
True to enable checksum, False to disable, or leave out
to simply check.
Returns
=======
out : boolean
True if the checksum is enabled or False if disabled
"""
if new_val != None:
self._call(const.PUT_, const.CDF_CHECKSUM_,
const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM)
chk = ctypes.c_long(0)
self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk))
if not chk.value in (const.MD5_CHECKSUM.value,
const.NO_CHECKSUM.value):
raise CDFError(const.BAD_CHECKSUM)
return chk.value == const.MD5_CHECKSUM.value
def close(self):
"""
Closes the CDF file
Although called on object destruction (:meth:`~CDF.__del__`),
to ensure all data are saved, the user should explicitly call
:meth:`~CDF.close` or :meth:`~CDF.save`.
Raises
======
CDFError : if CDF library reports an error
Warns
=====
CDFWarning : if CDF library reports a warning
"""
self._call(const.CLOSE_, const.CDF_)
self._opened = False
def compress(self, comptype=None, param=None):
"""
Set or check the compression of this CDF
Sets compression on entire *file*, not per-variable.
See section 2.6 of the CDF user's guide for more information on
compression.
Other Parameters
================
comptype : ctypes.c_long
type of compression to change to, see CDF C reference manual
section 4.10. Constants for this parameter are in
:mod:`~pycdf.const`. If not specified, will not change
compression.
param : ctypes.c_long
Compression parameter, see CDF CRM 4.10 and
:mod:`~pycdf.const`.
If not specified, will choose reasonable default (5 for gzip;
other types have only one possible parameter.)
Returns
=======
out : tuple
(comptype, param) currently in effect
See Also
========
:meth:`Var.compress`
Examples
========
Set file ``cdffile`` to gzip compression, compression level 9:
>>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9)
"""
return _compress(self, comptype, param)
def new(self, name, data=None, type=None, recVary=True, dimVarys=None,
dims=None, n_elements=None, compress=None, compress_param=None):
"""
Create a new zVariable in this CDF
.. note::
Either ``data`` or ``type`` must be specified. If type is not
specified, it is guessed from ``data``.
Parameters
==========
name : str
name of the new variable
Other Parameters
================
data
data to store in the new variable. If this has a an ``attrs``
attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it
will be used to populate attributes of the new variable.
type : ctypes.c_long
CDF type of the variable, from :mod:`~pycdf.const`.
See section 2.5 of the CDF user's guide for more information on
CDF data types.
recVary : boolean
record variance of the variable (default True)
dimVarys : list of boolean
dimension variance of each dimension, default True for all
dimensions.
dims : list of int
size of each dimension of this variable, default zero-dimensional.
Note this is the dimensionality as defined by CDF, i.e., for
record-varying variables it excludes the leading record dimension.
See :py:class:`Var`.
n_elements : int
number of elements, should be 1 except for CDF_CHAR,
for which it's the length of the string.
compress : ctypes.c_long
Compression to apply to this variable, default None.
See :py:meth:`Var.compress`.
compress_param : ctypes.c_long
Compression parameter if compression used; reasonable default
is chosen. See :py:meth:`Var.compress`.
Returns
=======
out : :py:class:`Var`
the newly-created zVariable
Raises
======
ValueError : if neither data nor sufficient typing information
is provided.
Notes
=====
Any given data may be representable by a range of CDF types; if
the type is not specified, pycdf will guess which
the CDF types which can represent this data. This breaks down to:
#. If input data is a numpy array, match the type of that array
#. Proper kind (numerical, string, time)
#. Proper range (stores highest and lowest number provided)
#. Sufficient resolution (EPOCH16 required if datetime has
microseconds or below.)
If more than one value satisfies the requirements, types are returned
in preferred order:
#. Type that matches precision of data first, then
#. integer type before float type, then
#. Smallest type first, then
#. signed type first, then
#. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1)
So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies
below the millisecond level (rule 1), but otherwise EPOCH is preferred
(rule 2).
For floats, four-byte is preferred unless eight-byte is required:
#. absolute values between 0 and 3e-39
#. absolute values greater than 1.7e38
This will switch to an eight-byte double in some cases where four bytes
would be sufficient for IEEE 754 encoding, but where DEC formats would
require eight.
"""
if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \
and self.backward:
raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 '
'in backward-compatible CDF')
if not lib.supports_int8 and \
type in (const.CDF_INT8, const.CDF_TIME_TT2000):
raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0')
if data is None:
if type is None:
raise ValueError('Must provide either data or a CDF type.')
if dims is None:
dims = []
if n_elements is None:
n_elements = 1
else:
(guess_dims, guess_types, guess_elements) = _Hyperslice.types(data)
if dims is None:
if recVary:
if guess_dims == ():
raise ValueError(
'Record-varying data cannot be scalar. '
'Specify NRV with CDF.new() or put data in array.')
dims = guess_dims[1:]
else:
dims = guess_dims
if type is None:
type = guess_types[0]
if type == const.CDF_EPOCH16.value and self.backward:
type = const.CDF_EPOCH
if n_elements is None:
n_elements = guess_elements
if dimVarys is None:
dimVarys = [True for i in dims]
recVary = const.VARY if recVary else const.NOVARY
dimVarys = [const.VARY if dimVary else const.NOVARY
for dimVary in dimVarys]
if not hasattr(type, 'value'):
type = ctypes.c_long(type)
if type.value == const.CDF_INT8.value and not lib.supports_int8:
raise ValueError(
'64-bit integer support require CDF library 3.4.0')
if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value,
const.CDF_TIME_TT2000.value) \
and self.backward:
raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; '
'incompatible with backward-compatible CDF')
new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys)
if compress != None:
new_var.compress(compress, compress_param)
if data is not None:
new_var[...] = data
if hasattr(data, 'attrs'):
new_var.attrs.clone(data.attrs)
return new_var
def raw_var(self, name):
"""
Get a "raw" :class:`Var` object.
Normally a :class:`Var` will perform translation of values for
certain types (to/from Unicode for CHAR variables on Py3k,
and to/from datetime for all time types). A "raw" object
does not perform this translation, on read or write.
This does *not* affect the data on disk, and in fact it
is possible to maintain multiple Python objects with access
to the same zVariable.
Parameters
==========
name : str
name or number of the zVariable
"""
v = self[name]
v._raw = True
return v
def save(self):
"""
Saves the CDF file but leaves it open.
If closing the CDF, :meth:`close` is sufficient;
there is no need to call
:meth:`save` before :meth:`close`.
.. note::
Relies on an undocumented call of the CDF C library, which is
also used in the Java interface.
Raises
======
CDFError : if CDF library reports an error
Warns
=====
CDFWarning : if CDF library reports a warning
"""
self._call(const.SAVE_, const.CDF_)
def copy(self):
"""
Make a copy of all data and attributes in this CDF
Returns
=======
out : :py:class:`CDFCopy`
:class:`~spacepy.datamodel.SpaceData`-like object of all data
"""
return CDFCopy(self)
def version(self):
"""
Get version of library that created this CDF
Returns
=======
out : tuple
version of CDF library, in form (version, release, increment)
"""
ver = ctypes.c_long(0)
rel = ctypes.c_long(0)
inc = ctypes.c_long(0)
self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver),
const.GET_, const.CDF_RELEASE_, ctypes.byref(rel),
const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc))
return (ver.value, rel.value, inc.value)
def _get_attrs(self):
"""Get attribute list
Provide access to the CDF's attribute list without holding a
strong reference, as the attribute list has a (strong)
back-reference to its parent.
Either deref a weak reference (to try and keep the object the same),
or make a new AttrList instance and assign it to the weak reference
for next time.
"""
al = self._attrlistref()
if al is None:
al = gAttrList(self)
self._attrlistref = weakref.ref(al)
return al
def _set_attrs(self, value):
"""Assign to the attribute list
Clears all elements of the attribute list and copies from value
"""
self.attrs.clone(value)
attrs = property(
_get_attrs, _set_attrs, None,
"""Global attributes for this CDF in a dict-like format.
See :class:`gAttrList` for details.
""")
def var_num(self, varname):
"""Get the variable number of a particular variable name
This maintains a cache of name-to-number mappings for zVariables
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
Raises
======
CDFError : if variable is not found
Returns
=======
out : int
Variable number of this zvariable.
"""
num = self._var_nums.get(varname, None)
if num is None: #Copied from Var._get, which can hopefully be thinned
varNum = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMBER_, varname,
ctypes.byref(varNum))
num = varNum.value
self._var_nums[varname] = num
return num
def attr_num(self, attrname):
"""Get the attribute number and scope by attribute name
This maintains a cache of name-to-number mappings for attributes
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
attrname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
Raises
======
CDFError : if variable is not found
Returns
=======
out : tuple
attribute number, scope (True for global) of this attribute
"""
res = self._attr_info.get(attrname, None)
if res is None: #Copied from Var._get, which can hopefully be thinned
attrNum = ctypes.c_long(0)
self._call(const.GET_, const.ATTR_NUMBER_, attrname,
ctypes.byref(attrNum))
scope = ctypes.c_long(0)
self._call(const.SELECT_, const.ATTR_, attrNum,
const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope))
if scope.value == const.GLOBAL_SCOPE.value:
scope = True
elif scope.value == const.VARIABLE_SCOPE.value:
scope = False
else:
raise CDFError(const.BAD_SCOPE)
res = (attrNum.value, scope)
self._attr_info[attrname] = res
return res
def clear_attr_from_cache(self, attrname):
"""Mark an attribute deleted in the name-to-number cache
Will remove an attribute, and all attributes with higher numbers,
from the attribute cache.
Does NOT delete the variable!
This maintains a cache of name-to-number mappings for attributes
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
attrname : bytes
name of the attribute. Not this is NOT a string in Python 3!
"""
num, scope = self.attr_num(attrname)
#All numbers higher than this are renumbered
for a, n in list(self._attr_info.items()):
if n[0] >= num:
del self._attr_info[a]
def clear_from_cache(self, varname):
"""Mark a variable deleted in the name-to-number cache
Will remove a variable, and all variables with higher numbers,
from the variable cache.
Does NOT delete the variable!
This maintains a cache of name-to-number mappings for zVariables
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
"""
num = self.var_num(varname)
#All numbers higher than this are renumbered
for v, n in list(self._var_nums.items()):
if n >= num:
del self._var_nums[v]
def add_attr_to_cache(self, attrname, num, scope):
"""Add an attribute to the name-to-number cache
This maintains a cache of name-to-number mappings for attributes
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
num : int
number of the variable
scope : bool
True if global scope; False if variable scope.
"""
self._attr_info[attrname] = (num, scope)
def add_to_cache(self, varname, num):
"""Add a variable to the name-to-number cache
This maintains a cache of name-to-number mappings for zVariables
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
num : int
number of the variable
"""
self._var_nums[varname] = num
#Note there is no function for delete, currently handled in Var.rename
#and Attr.rename by just deleting from the dict directly. Maybe this
#should be differen (maybe should be possible to follow a variable across
#a rename...)
class Var(MutableSequence):
"""
A CDF variable.
This object does not directly store the data from the CDF; rather,
it provides access to the data in a format that much like a Python
list or numpy :class:`~numpy.ndarray`.
General list information is available in the python docs:
`1 <http://docs.python.org/tutorial/introduction.html#lists>`_,
`2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_,
`3 <http://docs.python.org/library/stdtypes.html#typesseq>`_.
The CDF user's guide, section 2.3, provides background on variables.
.. note::
Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable.
A record-varying variable's data are viewed as a hypercube of dimensions
n_dims+1 (the extra dimension is the record number). They are indexed in
row-major fashion, i.e. the last index changes most frequently / is
contiguous in memory. If the CDF is column-major, the data are
transformed to row-major before return.
Non record-varying variables are similar, but do not have the extra
dimension of record number.
Variables can be subscripted by a multidimensional index to return the
data. Indices are in row-major order with the first dimension
representing the record number. If the CDF is column major,
the data are reordered to row major. Each dimension is specified
by standard Python
`slice <http://docs.python.org/tutorial/introduction.html#strings>`_
notation, with dimensions separated by commas. The ellipsis fills in
any missing dimensions with full slices. The returned data are
lists; Python represents multidimensional arrays as nested lists.
The innermost set of lists represents contiguous data.
.. note::
numpy 'fancy indexing' is *not* supported.
Degenerate dimensions are 'collapsed', i.e. no list of only one
element will be returned if a single subscript is specified
instead of a range. (To avoid this, specify a slice like 1:2,
which starts with 1 and ends before 2).
Two special cases:
1. requesting a single-dimension slice for a
record-varying variable will return all data for that
record number (or those record numbers) for that variable.
2. Requests for multi-dimensional variables may skip the record-number
dimension and simply specify the slice on the array itself. In that
case, the slice of the array will be returned for all records.
In the event of ambiguity (e.g., single-dimension slice on a one-dimensional
variable), case 1 takes priority.
Otherwise, mismatch between the number of dimensions specified in
the slice and the number of dimensions in the variable will cause
an :exc:`~exceptions.IndexError` to be thrown.
This all sounds very complicated but it is essentially attempting
to do the 'right thing' for a range of slices.
An unusual case is scalar (zero-dimensional) non-record-varying variables.
Clearly they cannot be subscripted normally. In this case, use the
``[...]`` syntax meaning 'access all data.':
>>> import pycdf
>>> testcdf = pycdf.CDF('test.cdf', '')
>>> variable = testcdf.new('variable', recVary=False,
... type=pycdf.const.CDF_INT4)
>>> variable[...] = 10
>>> variable
<Var:
CDF_INT4 [] NRV
>
>>> variable[...]
10
Reading any empty non-record-varying variable will return an empty
with the same *number* of dimensions, but all dimensions will be
of zero length. The scalar is, again, a special case: due to the
inability to have a numpy array which is both zero-dimensional and empty,
reading an NRV scalar variable with no data will return an empty
one-dimensional array. This is really not recommended.
As a list type, variables are also `iterable
<http://docs.python.org/tutorial/classes.html#iterators>`_; iterating
over a variable returns a single complete record at a time.
This is all clearer with examples. Consider a variable ``B_GSM``, with
three elements per record (x, y, z components) and fifty records in
the CDF. Then:
1. ``B_GSM[0, 1]`` is the y component of the first record.
2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z
components of the 11th record. As a shortcut, if only one dimension
is specified, it is assumed to be the record number, so this
could also be written ``B_GSM[10]``.
3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a
fifty-element list, each element itself being a three-element
list of x, y, z components.
Multidimensional example: consider fluxes stored as a function of
pitch angle and energy. Such a variable may be called Flux and
stored as a two-dimensional array, with the first dimension
representing (say) ten energy steps and the second, eighteen
pitch angle bins (ten degrees wide, centered from 5 to 175 degrees).
Assume 100 records stored in the CDF (i.e. 100 different times).
1. ``Flux[4]`` is a list of ten elements, one per energy step,
each element being a list of 18 fluxes, one per pitch bin.
All are taken from the fifth record in the CDF.
2. ``Flux[4, :, 0:4]`` is the same record, all energies, but
only the first four pitch bins (roughly, field-aligned).
3. ``Flux[..., 0:4]`` is a 100-element list (one per record),
each element being a ten-element list (one per energy step),
each containing fluxes for the first four pitch bins.
This slicing notation is very flexible and allows reading
specifically the desired data from the CDF.
All data are, on read, converted to appropriate Python data
types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to
:class:`~datetime.datetime`. Data are returned in numpy arrays.
.. note::
Although pycdf supports TIME_TT2000 variables, the Python
:class:`~datetime.datetime` object does not support leap
seconds. Thus, on read, any seconds past 59 are truncated
to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds).
Potentially useful list methods and related functions:
- `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_
- `in <http://docs.python.org/reference/expressions.html#in>`_
- `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_
- `len <http://docs.python.org/library/functions.html#len>`_
- `list comprehensions
<http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_
- `sorted <http://docs.python.org/library/functions.html#sorted>`_
The topic of array majority can be very confusing; good background material
is available at `IDL Array Storage and Indexing
<http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief,
*regardless of the majority stored in the CDF*, pycdf will always present
the data in the native Python majority, row-major order, also known as
C order. This is the default order in `NumPy
<http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html
#internal-memory-layout-of-an-ndarray>`_.
However, packages that render image data may expect it in column-major
order. If the axes seem 'swapped' this is likely the reason.
The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing
zAttributes (do not confuse the two); all the dictionary methods above
also work on the attribute dictionary. See :class:`zAttrList` for more on
the dictionary of attributes.
With writing, as with reading, every attempt has been made to match the
behavior of Python lists. You can write one record, many records, or even
certain elements of all records. There is one restriction: only the record
dimension (i.e. dimension 0) can be resized by write, as all records
in a variable must have the same dimensions. Similarly, only whole
records can be deleted.
.. note::
Unusual error messages on writing data usually mean that pycdf is
unable to interpret the data as a regular array of a single type
matching the type and shape of the variable being written.
A 5x4 array is supported; an irregular array where one row has
five columns and a different row has six columns is not. Error messages
of this type include:
- ``Data must be well-formed, regular array of number, string, or datetime``
- ``setting an array element with a sequence.``
- ``shape mismatch: objects cannot be broadcast to a
single shape``
For these examples, assume Flux has 100 records and dimensions [2, 3].
Rewrite the first record without changing the rest:
>>> Flux[0] = [[1, 2, 3], [4, 5, 6]]
Writes a new first record and delete all the rest:
>>> Flux[...] = [[1, 2, 3], [4, 5, 6]]
Write a new record in the last position and add a new record after:
>>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]],
... [[11, 12, 13], [14, 15, 16]]]
Insert two new records between the current number 5 and 6:
>>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13],
... [14, 15, 16]]]
This operation can be quite slow, as it requires reading and
rewriting the entire variable. (CDF does not directly support
record insertion.)
Change the first element of the first two records but leave other
elements alone:
>>> Flux[0:2, 0, 0] = [1, 2]
Remove the first record:
>>> del Flux[0]
Removes record 5 (the sixth):
>>> del Flux[5]
Due to the need to work around a bug in the CDF library, this operation
can be quite slow.
Delete *all data* from ``Flux``, but leave the variable definition intact:
>>> del Flux[...]
.. note::
Although this interface only directly supports zVariables, zMode is
set on opening the CDF so rVars appear as zVars. See p.24 of the
CDF user's guide; pyCDF uses zMode 2.
.. autosummary::
~Var.attrs
~Var.compress
~Var.copy
~Var.dtype
~Var.dv
~Var.insert
~Var.name
~Var.rename
~Var.rv
~Var.shape
~Var.type
.. attribute:: Var.attrs
zAttributes for this zVariable in a dict-like format.
See :class:`zAttrList` for details.
.. automethod:: compress
.. automethod:: copy
.. autoattribute:: dtype
.. automethod:: dv
.. automethod:: insert
.. automethod:: name
.. automethod:: rename
.. automethod:: rv
.. autoattribute:: shape
.. automethod:: type
"""
def __init__(self, cdf_file, var_name, *args):
"""Create or locate a variable
Parameters
==========
cdf_file : :py:class:`pycdf.CDF`
CDF file containing this variable
var_name : string
name of this variable
Other Parameters
================
args
additional arguments passed to :py:meth:`_create`. If none,
opens an existing variable. If provided, creates a
new one.
Raises
======
CDFError
if CDF library reports an error
Warns
=====
CDFWarning
if CDF library reports a warning
"""
self.cdf_file = cdf_file
#This is the definitive "identify" of variable
self._name = None
self._type = None #CDF type (long)
self._raw = False #Raw access (skip all conversions)
if len(args) == 0:
self._get(var_name)
else:
self._create(var_name, *args)
#Weak reference to attribute list (use attrs instead)
#This avoids a reference loop
self._attrlistref = weakref.ref(zAttrList(self))
def __getitem__(self, key):
"""Returns a slice from the data array. Details under :py:class:`pycdf.Var`.
@return: The data from this variable
@rtype: list-of-lists of appropriate type.
@raise IndexError: if L{key} is out of range, mismatches dimensions,
or simply unparseable.
@raise CDFError: for errors from the CDF library
"""
hslice = _Hyperslice(self, key)
#Hyperslice mostly catches this sort of thing, but
#an empty variable is a special case, since we might want to
#WRITE to 0th record (which Hyperslice also supports) but
#can't READ from it, and iterating over tries to read from it.
if hslice.rv:
if hslice.dimsizes[0] == 0 and hslice.degen[0] and \
hslice.starts[0] == 0:
raise IndexError('record index out of range')
#For NRV, again hslice will assume 0th record exists since we might
#want to write. So ANY degenerate dim other than the glued-on 0th
#suggests an explicit index that should fail. None degenerate suggests
#make an empty array.
#Note this is pulling a lot of hyperslice stuff into getitem!
elif hslice.dimsizes[0] == 0:
if len(hslice.degen) > 1 and max(hslice.degen[1:]):
raise IndexError('record index out of range')
else:
#The zero-length dimension is degenerate so it gets chopped,
#and you can't have a zero-length numpy array that still
#maintains the size of all other dimensions. So just force
#a zero-dim array and the rest will follow
hslice.counts[...] = 0
#If this is a scalar, need to make a single non-degenerate
#dimension so it can be empty.
if len(hslice.counts) == 1:
hslice.degen[0] = False
result = hslice.create_array()
if hslice.counts[0] != 0:
hslice.select()
lib.call(const.GET_, const.zVAR_HYPERDATA_,
result.ctypes.data_as(ctypes.c_void_p))
return hslice.convert_input_array(result)
def __delitem__(self, key):
"""Removes a record (or set of records) from the CDF
Only whole records can be deleted, so the del call must either specify
only one dimension or it must specify all elements of the non-record
dimensions. This is *not* a way to resize a variable!
Deleting records from the middle of a variable may be very slow in
some circumstances. To work around a bug in CDF library versions
3.4.0 and before, all the data must be read in, the requested deletions
done, and then all written back out.
@param key: index or slice to delete
@type key: int or slice
@raise TypeError: if an attempt is made to delete from a non
record-varying variable, or to delete below
the record level
"""
if not self.rv():
raise TypeError('Cannot delete records from non-record-varying '
'variable.')
hslice = _Hyperslice(self, key)
if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any():
raise TypeError('Can only delete entire records.')
if hslice.counts[0] == 0:
return
start = hslice.starts[0]
count = hslice.counts[0]
interval = hslice.intervals[0]
dimsize = hslice.dimsizes[0]
self._call()
dangerous_delete = False
if lib._del_middle_rec_bug and \
(interval != 1 or (start != 0 and start + count < dimsize)):
#delete from middle is dangerous if only have one index entry
entries = ctypes.c_long(0)
lib.call(const.GET_, const.zVAR_nINDEXENTRIES_,
ctypes.byref(entries))
dangerous_delete = (entries.value == 1)
if dangerous_delete:
data = self[...]
data = numpy.delete(
data,
numpy.arange(start, start + count * interval, interval),
0)
self[0:dimsize - count] = data
first_rec = dimsize - count
last_rec = dimsize - 1
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(first_rec), ctypes.c_long(last_rec))
elif interval == 1:
first_rec = ctypes.c_long(start)
last_rec = ctypes.c_long(start + count - 1)
lib.call(const.DELETE_, const.zVAR_RECORDS_,
first_rec, last_rec)
else:
self._call()
#delete from end to avoid renumbering of records
for recno in range(start + (count - 1) * interval,
start - 1, -1 * interval):
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(recno), ctypes.c_long(recno))
def __setitem__(self, key, data):
"""Puts a slice into the data array. Details under :py:class:`pycdf.Var`.
@param key: index or slice to store
@type key: int or slice
@param data: data to store
@type data: numpy.array
@raise IndexError: if L{key} is out of range, mismatches dimensions,
or simply unparseable. IndexError will
@raise CDFError: for errors from the CDF library
"""
hslice = _Hyperslice(self, key)
n_recs = hslice.counts[0]
hslice.expand(data)
cdf_type = self.type()
if cdf_type == const.CDF_EPOCH16.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch16(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_EPOCH.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_TIME_TT2000.value:
if not self._raw:
try:
data = lib.v_datetime_to_tt2000(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.int64)
else:
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=self._np_type())
if cdf_type == const.CDF_EPOCH16.value:
datashape = data.shape[:-1]
else:
datashape = data.shape
#Check data sizes
if datashape != tuple(hslice.expected_dims()):
raise ValueError('attempt to assign data of dimensions ' +
str(datashape) + ' to slice of dimensions ' +
str(tuple(hslice.expected_dims())))
#Flip majority and reversed dimensions, see convert_input_array
data = hslice.convert_output_array(data)
#Handle insertions and similar weirdness
if hslice.counts[0] > n_recs and \
hslice.starts[0] + n_recs < hslice.dimsizes[0]:
#Specified slice ends before last record, so insert in middle
saved_data = self[hslice.starts[0] + n_recs:]
if hslice.counts[0] > 0:
hslice.select()
lib.call(const.PUT_, const.zVAR_HYPERDATA_,
data.ctypes.data_as(ctypes.c_void_p))
if hslice.counts[0] < n_recs:
first_rec = hslice.starts[0] + hslice.counts[0]
last_rec = hslice.dimsizes[0] - 1
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(first_rec), ctypes.c_long(last_rec))
elif hslice.counts[0] > n_recs and \
hslice.starts[0] + n_recs < hslice.dimsizes[0]:
#Put saved data in after inserted data
self[hslice.starts[0] + hslice.counts[0]:] = saved_data
def extend(self, data):
"""
Append multiple values to the end of this variable
This is an efficiency function which overrides the base implementation
in MutableSequence.
Parameters
----------
data :
the data to append
"""
self[len(self):] = data
def insert(self, index, data):
"""
Inserts a *single* record before an index
Parameters
----------
index : int
index before which to insert the new record
data :
the record to insert
"""
self[index:index] = [data]
def _create(self, var_name, datatype, n_elements = 1, dims = (),
recVary = const.VARY, dimVarys = None):
"""Creates a new zVariable
@param var_name: name of this variable
@type var_name: string
@param datatype: CDF data type
@type datatype: ctypes.c_long
@param n_elements: number of elements (should be 1 except for
CDF_CHAR variables).
@type n_elements: long
@param dims: size of each dimension for multi-dimensional variable,
or empty for a zero-dimensional
@type dims: sequence of long
@param recVary: record variance for this variable (VARY/NOVARY)
@type recVary: long
@param dimVarys: array of VARY or NOVARY, variance for each dimension
@type dimVarys: sequence of long
@return: new variable with this name
@rtype: :py:class:`pycdf.Var`
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
@note: Not intended to be used directly; use L{CDF.new}.
"""
dim_array = (ctypes.c_long * len(dims))(*dims)
enc_name = var_name.encode('ascii')
if dimVarys is None:
dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY)
else:
dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys)
varNum = ctypes.c_long(0)
self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype,
ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array,
recVary, dim_vary_array, ctypes.byref(varNum))
self._name = enc_name
self.cdf_file.add_to_cache(enc_name, varNum.value)
def _delete(self):
"""Removes this zVariable from the CDF
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
self._call(const.DELETE_, const.zVAR_)
self.cdf_file.clear_from_cache(self._name)
self._name = None
def _get(self, var_name):
"""Gets an existing zVariable
@param var_name: name of this variable
@type var_name: string
@return: variable with this name
@rtype: :py:class:`pycdf.Var`
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
@note: Not intended to be used directly; use L{CDF.__getitem__}.
"""
if isinstance(var_name, str_classes):
try:
enc_name = var_name.encode('ascii').rstrip()
except AttributeError:
enc_name = var_name.rstrip() #already in ASCII
#'touch' CDF to cause an error if the name isn't there; get number
varNum = ctypes.c_long(0)
self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum))
self._name = enc_name
self.cdf_file.add_to_cache(enc_name, varNum.value)
else: #Looking up by number
name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1)
self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name),
const.GET_, const.zVAR_NAME_, name)
self._name = name.value.rstrip()
self.cdf_file.add_to_cache(self._name, var_name)
def _num(self):
"""Returns the zVar number for this variable
@return: number of this zVar
@rtype: int
"""
return self.cdf_file.var_num(self._name)
def __len__(self):
"""Get number of records for this variable in this file
@return: Number of records
@rtype: long
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
count = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count))
return (count.value + 1)
def __repr__(self):
"""Returns representation of the variable
Cannot return anything that can be eval'd to create a copy,
so just wrap the informal representation in angle brackets.
@return: info on this zVar
@rtype: str
"""
return '<Var:\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the variable
This is an 'informal' representation in that it cannot be evaluated
directly to create a :py:class:`pycdf.Var`.
@return: info on this zVar, CDFTYPE [dimensions] NRV
(if not record-varying)
@rtype: str
"""
if self.cdf_file._opened:
cdftype = self.type()
chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value)
rv = self.rv()
typestr = lib.cdftypenames[cdftype] + \
('*' + str(self._nelems()) if cdftype in chartypes else '' )
if rv:
sizestr = str([len(self)] + self._dim_sizes())
else:
sizestr = str(self._dim_sizes())
return typestr + ' ' + sizestr + ('' if rv else ' NRV')
else:
if isinstance(self._name, str):
return 'zVar "{0}" in closed CDF {1}'.format(
self._name, self.cdf_file.pathname)
else:
return 'zVar "{0}" in closed CDF {1}'.format(
self._name.decode('ascii'),
self.cdf_file.pathname.decode('ascii'))
def _n_dims(self):
"""Get number of dimensions for this variable
@return: the number of dimensions
@rtype: long
"""
n_dims = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims))
return n_dims.value
def _dim_sizes(self):
"""Get the dimension sizes for this variable
@return: sequence of sizes
@rtype: sequence of long
@note: This will always be in Python order (i.e. row major, last index
iterates most quickly), *regardless* of the majority of the CDF.
"""
sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0)
self._call(const.GET_, const.zVAR_DIMSIZES_, sizes)
sizes = sizes[0:self._n_dims()]
return sizes
def rv(self, new_rv=None):
"""
Gets or sets whether this variable has record variance
If the variance is unknown, True is assumed
(this replicates the apparent behavior of the CDF library on
variable creation).
Other Parameters
================
new_rv : boolean
True to change to record variance, False to change to NRV,
unspecified to simply check variance.
Returns
=======
out : Boolean
True if record varying, False if NRV
"""
if new_rv != None:
self._call(const.PUT_, const.zVAR_RECVARY_,
const.VARY if new_rv else const.NOVARY)
vary = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary))
return vary.value != const.NOVARY.value
def dv(self, new_dv=None):
"""
Gets or sets dimension variance of each dimension of variable.
If the variance is unknown, True is assumed
(this replicates the apparent behavior of the
CDF library on variable creation).
Parameters
==========
new_dv : list of boolean
Each element True to change that dimension to dimension
variance, False to change to not dimension variance.
(Unspecified to simply check variance.)
Returns
=======
out : list of boolean
True if that dimension has variance, else false.
"""
ndims = self._n_dims()
if new_dv != None:
if len(new_dv) != ndims:
raise ValueError('Must specify variance for ' +
str(ndims) + 'dimensions.')
varies = (ctypes.c_long * ndims)(
*[const.VARY if dv else const.NOVARY for dv in new_dv])
self._call(const.PUT_, const.zVAR_DIMVARYS_,
varies)
if ndims == 0:
return []
varies = (ctypes.c_long * const.CDF_MAX_DIMS)()
self._call(const.GET_, const.zVAR_DIMVARYS_, varies)
return [dv != const.NOVARY.value for dv in varies[0:ndims]]
def _call(self, *args, **kwargs):
"""Select this CDF and variable and call the CDF internal interface
Adds call to select this CDF to L{args} and passes all parameters
directly through to the CDFlib routine of the CDF library's C internal
interface. Checks the return value with L{Library.check_status}.
@param args: Passed directly to the CDF library interface. Useful
constants are defined in the :py:mod:`pycdf.const` module of this package.
@type args: various, see :py:mod:`ctypes`.
@return: CDF status from the library
@rtype: ctypes.c_long
@note: Terminal NULL_ is automatically added to L{args}.
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
return self.cdf_file._call(
const.SELECT_, const.zVAR_,
ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs)
def _np_type(self):
"""Returns the numpy type of this variable
This is the numpy type that will come directly out of the CDF;
see :meth:`dtype` for the representation post-conversion.
Raises
======
CDFError : for library-reported error or failure to find numpy type
Returns
=======
out : dtype
numpy dtype that will hold value from this variable
"""
cdftype = self.type()
if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value:
return numpy.dtype('S' + str(self._nelems()))
try:
return lib.numpytypedict[cdftype]
except KeyError:
raise CDFError(const.BAD_DATA_TYPE)
def type(self, new_type=None):
"""
Returns or sets the CDF type of this variable
Parameters
==========
new_type : ctypes.c_long
the new type from :mod:`~pycdf.const`
Returns
=======
out : int
CDF type
"""
if new_type != None:
if not hasattr(new_type, 'value'):
new_type = ctypes.c_long(new_type)
n_elements = ctypes.c_long(self._nelems())
self._call(const.PUT_, const.zVAR_DATASPEC_,
new_type, n_elements)
self._type = None
if self._type is None:
cdftype = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_DATATYPE_,
ctypes.byref(cdftype))
self._type = cdftype.value
return self._type
def _nelems(self):
"""Number of elements for each value in this variable
This is the length of strings for CHAR and UCHAR,
should be 1 otherwise.
@return: length of strings
@rtype: int
"""
nelems = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems))
return nelems.value
def name(self):
"""
Returns the name of this variable
Returns
=======
out : str
variable's name
"""
if isinstance(self._name, str):
return self._name
elif isinstance(self._name, bytes):
return self._name.decode()
def compress(self, comptype=None, param=None):
"""
Set or check the compression of this variable
Compression may not be changeable on variables with data already
written; even deleting the data may not permit the change.
See section 2.6 of the CDF user's guide for more information on
compression.
Other Parameters
================
comptype : ctypes.c_long
type of compression to change to, see CDF C reference
manual section 4.10. Constants for this parameter
are in :mod:`~pycdf.const`. If not specified, will not
change compression.
param : ctypes.c_long
Compression parameter, see CDF CRM 4.10 and
:mod:`~pycdf.const`.
If not specified, will choose reasonable default (5 for
gzip; other types have only one possible parameter.)
Returns
=======
out : tuple
the (comptype, param) currently in effect
"""
return _compress(self, comptype, param)
def copy(self):
"""
Copies all data and attributes from this variable
Returns
=======
out : :class:`VarCopy`
list of all data in record order
"""
return VarCopy(self)
def rename(self, new_name):
"""
Renames this variable
Parameters
==========
new_name : str
the new name for this variable
"""
try:
enc_name = new_name.encode('ascii')
except AttributeError:
enc_name = new_name
if len(enc_name) > const.CDF_VAR_NAME_LEN256:
raise CDFError(const.BAD_VAR_NAME)
self._call(const.PUT_, const.zVAR_NAME_, enc_name)
self.cdf_file.add_to_cache(
enc_name,
self.cdf_file.var_num(self._name)) #Still in cache
del self.cdf_file._var_nums[self._name]
self._name = enc_name
@property
def shape(self):
"""
Provides the numpy array-like shape of this variable.
Returns a tuple; first element is number of records (RV variable
only) And the rest provide the dimensionality of the variable.
.. note::
Assigning to this attribute will not change the shape.
"""
if self.rv():
return tuple([len(self)] + self._dim_sizes())
else:
return tuple(self._dim_sizes())
@property
def dtype(self):
"""
Provide the numpy dtype equivalent to the CDF type of this variable.
Data from this variable will be returned in numpy arrays of this type.
See Also
--------
type
"""
cdftype = self.type()
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \
str is not bytes and not self._raw:
return numpy.dtype('U' + str(self._nelems()))
if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value,
const.CDF_TIME_TT2000.value) and not self._raw:
return numpy.dtype('O')
return self._np_type()
def _get_attrs(self):
"""Get attribute list
Provide access to the zVar's attribute list without holding a
strong reference, as the attribute list has a (strong)
back-reference to its parent.
Either deref a weak reference (to try and keep the object the same),
or make a new AttrList instance and assign it to the weak reference
for next time.
"""
al = self._attrlistref()
if al is None:
al = zAttrList(self)
self._attrlistref = weakref.ref(al)
return al
def _set_attrs(self, value):
"""Assign to the attribute list
Clears all elements of the attribute list and copies from value
"""
self.attrs.clone(value)
attrs = property(
_get_attrs, _set_attrs, None,
"""zAttributes for this zVariable in a dict-like format.
See :class:`zAttrList` for details.
""")
class _Hyperslice(object):
"""Represents a CDF 'slice' used for the hyper CDF functions
For internal module use only.
@ivar dims: number of dimensions to this slice, usually
number of dimensions to the variable plus one
for the record, which represents the 0th
(least rapidly varying) dimension.
@type dims: int
@ivar dimsizes: size of each dimension (0th is number of records)
@type dimsizes: list of int
@ivar starts: index of the start value for each dimension
('dimension indices' in CDF speak)
@type starts: list of int
@ivar counts: number of values to get from each dimension.
Final result will be the product of everything
in counts.
('dimension counts' in CDF speak)
@type counts: numpy.array
@ivar intervals: interval between successive indices
to use for each dimension.
('dimension invervals' in CDF speak)
@type intervals: list of int
@ivar degen: is this dimension degenerate, i.e. should be
removed in the returned dataset. A 3D array
with one dimension degenerate will be returned
as a 2D array (i.e. list-of-lists.)
@type degen: numpy.array
@ivar rev: should this dimension be returned in reverse order?
@type rev: numpy.array
@ivar column: is this slice in column-major mode (if false, row-major)
@type column: boolean
@ivar zvar: what CDF variable this object slices on
@type zvar: :py:class:`pycdf.Var`
@ivar expanded_key: fully-expanded version of the key passed to the
constructor (all dimensions filled in)
@type expanded_key: tuple
@note: All dimension-related variables are stored row-major
(Python order)
"""
def __init__(self, zvar, key):
"""Create a Hyperslice
@param zvar: zVariable that this slices
@type zvar: :py:class:`pycdf.Var`
@param key: Python multi-dimensional slice as passed to
__getitem__
@type key: tuple of slice and/or int
@raise IndexError: if slice is out of range, mismatches dimensions, or
otherwise unparsable.
@raise ValueError: if slice has invalid values
"""
self.zvar = zvar
self.rv = self.zvar.rv()
#dim of records, + 1 record dim (NRV always is record 0)
self.dims = zvar._n_dims() + 1
self.dimsizes = [len(zvar)] + \
zvar._dim_sizes()
self.starts = [0] * self.dims
self.counts = numpy.empty((self.dims,), dtype=numpy.int32)
self.counts.fill(1)
self.intervals = [1] * self.dims
self.degen = numpy.zeros(self.dims, dtype=numpy.bool)
self.rev = numpy.zeros(self.dims, dtype=numpy.bool)
#key is:
#1. a single value (integer or slice object) if called 1D
#2. a tuple (of integers and/or slice objects) if called nD
#3. Each item is either a single value (degenerate dim)
# or a slice object.
if not hasattr(key, '__len__'): #Not a container object, pack in tuple
key = (key, )
if not self.rv:
key = (0, ) + key #NRV, so always get 0th record (degenerate)
key = self.expand_ellipsis(key, self.dims)
if self.rv: #special-cases for RV variables
if len(key) == 1: #get all data for this record(s)
key = self.expand_ellipsis(key + (Ellipsis, ), self.dims)
elif len(key) == self.dims - 1: #get same slice from each record
key = (slice(None, None, None), ) + key
if len(key) == self.dims:
self.expanded_key = key
for i in range(self.dims):
idx = key[i]
if hasattr(idx, 'start'): #slice
(self.starts[i], self.counts[i],
self.intervals[i], self.rev[i]) = \
self.convert_range(idx.start, idx.stop,
idx.step, self.dimsizes[i])
else: #Single degenerate value
if idx < 0:
idx += self.dimsizes[i]
if idx != 0 and (idx >= self.dimsizes[i] or idx < 0):
raise IndexError('list index out of range')
self.starts[i] = idx
self.degen[i] = True
else:
raise IndexError('Slice does not match dimensions for zVar ' +
str(zvar._name))
self.column = zvar.cdf_file.col_major()
def expected_dims(self, data=None):
"""Calculate size of non-degenerate dimensions
Figures out size, in each dimension, of expected input data
@return: size of each dimension for this slice, excluding degenerate
@rtype: list of int
"""
return [self.counts[i] for i in range(self.dims) if not self.degen[i]]
def expand(self, data):
"""Expands the record dimension of this slice to hold a set of data
If the length of data (outermost dimension) is larger than the record
count (counts[0]) for this slice, expand the slice to hold all the data.
This requires that the record dimension of the slice not be degenerate,
and also that it not have been completely specified when the hyperslice
was created (i.e. record dimension either ellipsis or no specified
stop.)
Does *not* expand any other dimension, since that's Very Hard in CDF.
@param data: the data which are intended to be stored in this slice
@type data: list
"""
rec_slice = self.expanded_key[0]
if not self.rv or isinstance(data, str_classes) or self.degen[0] or \
not hasattr(rec_slice, 'stop'):
return
if len(data) < self.counts[0]: #Truncate to fit data
if rec_slice.stop is None and rec_slice.step in (None, 1):
self.counts[0] = len(data)
elif len(data) > self.counts[0]: #Expand to fit data
if rec_slice.step in (None, 1):
self.counts[0] = len(data)
def create_array(self):
"""Creates a numpy array to hold the data from this slice
Returns
=======
out : numpy.array
array sized, typed, and dimensioned to hold data from
this slice
"""
counts = self.counts
degen = self.degen
if self.column:
counts = self.reorder(counts)
degen = self.reorder(degen)
#TODO: Forcing C order for now, revert to using self.column later
array = numpy.empty(
[counts[i] for i in range(len(counts)) if not degen[i]],
self.zvar._np_type(), order='C')
return numpy.require(array, requirements=('C', 'A', 'W'))
def convert_input_array(self, buffer):
"""Converts a buffer of raw data from this slice
EPOCH(16) variables always need to be converted.
CHAR need converted to Unicode if py3k
Parameters
==========
buffer : numpy.array
data as read from the CDF file
Returns
=======
out : numpy.array
converted data
"""
result = self._flip_array(buffer)
#Convert to derived types
cdftype = self.zvar.type()
if not self.zvar._raw:
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \
str != bytes:
dt = numpy.dtype('U{0}'.format(result.dtype.itemsize))
result = numpy.require(numpy.char.array(result).decode(),
dtype=dt)
elif cdftype == const.CDF_EPOCH.value:
result = lib.v_epoch_to_datetime(result)
elif cdftype == const.CDF_EPOCH16.value:
result = lib.v_epoch16_to_datetime(result)
elif cdftype == const.CDF_TIME_TT2000.value:
result = lib.v_tt2000_to_datetime(result)
return result
def convert_output_array(self, buffer):
"""Convert a buffer of data that will go into this slice
Parameters
==========
buffer : numpy.array
data to go into the CDF file
Returns
=======
out : numpy.array
input with majority flipped and dimensions reversed to be
suitable to pass directly to CDF library.
"""
buffer = self._flip_array(buffer)
return numpy.require(buffer, requirements=('C', 'A', 'W'))
def _flip_array(self, data):
"""
Operations for majority, etc. common between convert_input and _output
"""
cdftype = self.zvar.type()
#Flip majority if any non-degenerate dimensions exist
if self.column and not min(self.degen):
#Record-number dim degen, swap whole thing
if self.degen[0]:
if cdftype == const.CDF_EPOCH16.value:
#Maintain last dimension
data = data.transpose(
list(range(len(data.shape) - 2, 0, -1)) +
[len(data.shape) - 1]
)
else:
data = data.transpose()
#Record-number dimension is not degenerate, so keep it first
else:
if cdftype == const.CDF_EPOCH16.value:
data = data.transpose(
[0] + list(range(len(data.shape) - 2, 0, -1)) +
[len(data.shape) - 1]
)
else:
data = data.transpose(
[0] + list(range(len(data.shape) - 1, 0, -1)))
#Reverse non-degenerate dimensions in rev
#Remember that the degenerate indices are already gone!
if self.rev.any():
sliced = [(slice(None, None, -1) if self.rev[i] else slice(None))
for i in range(self.dims) if not self.degen[i]]
if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim
sliced.extend(slice(None))
data = operator.getitem(data, tuple(sliced))
return data
def select(self):
"""Selects this hyperslice in the CDF
Calls the CDF library to select the CDF, variable, records, and
array elements corresponding to this slice.
"""
args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]),
const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]),
const.SELECT_, const.zVAR_RECINTERVAL_,
ctypes.c_long(self.intervals[0]))
if self.dims > 1:
dims = self.dims - 1
args += (const.SELECT_, const.zVAR_DIMINDICES_,
(ctypes.c_long * dims)(*self.starts[1:]),
const.SELECT_, const.zVAR_DIMCOUNTS_,
(ctypes.c_long * dims)(*self.counts[1:]),
const.SELECT_, const.zVAR_DIMINTERVALS_,
(ctypes.c_long * dims)(*self.intervals[1:]))
self.zvar._call(*args)
@staticmethod
def expand_ellipsis(slices, n_dims):
"""Expands any ellipses into correct number of full-size slices
@param slices: tuple of slices, integers, or ellipse objects
@type slices: tuple
@param n_dims: number of dimensions this slice is over
@type n_dims: int
@return: L{slices} with ellipses replaced by appropriate number of
full-dimension slices
@rtype: tuple
@raise IndexError: if ellipses specified when already have enough
dimensions
"""
if slices is Ellipsis:
return tuple([slice(None, None, None)
for i in range(n_dims)])
#Elements might be numpy arrays, so can't use in/index
idx = [i for i, v in enumerate(slices) if v is Ellipsis]
if not idx: #no ellipsis
return slices
if len(idx) > 1: #multiples!
raise IndexError('Ellipses can only be used once per slice.')
idx = idx[0]
#how many dims to expand ellipsis to
#remember the ellipsis is in len(slices) and must be replaced!
extra = n_dims - len(slices) + 1
if extra < 0:
raise IndexError('too many indices')
result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:]
return result
@staticmethod
def check_well_formed(data):
"""Checks if input data is well-formed, regular array"""
d = numpy.asanyarray(data)
if d.dtype == numpy.object: #this is probably going to be bad
try:
len(d.flat[0])
except TypeError: #at least it's not a list
pass
else:
raise ValueError(
'Data must be well-formed, regular array of number, '
'string, or datetime')
@staticmethod
def dimensions(data):
"""Finds the dimensions of a nested list-of-lists
@param data: data of which dimensions are desired
@type data: list (of lists)
@return: dimensions of L{data}, in order outside-in
@rtype: list of int
@raise ValueError: if L{data} has irregular dimensions
"""
d = numpy.asanyarray(data)
_Hyperslice.check_well_formed(d)
return d.shape
@staticmethod
def types(data, backward=False):
"""Find dimensions and valid types of a nested list-of-lists
Any given data may be representable by a range of CDF types; infer
the CDF types which can represent this data. This breaks down to:
1. Proper kind (numerical, string, time)
2. Proper range (stores highest and lowest number)
3. Sufficient resolution (EPOCH16 required if datetime has
microseconds or below.)
If more than one value satisfies the requirements, types are returned
in preferred order:
1. Type that matches precision of data first, then
2. integer type before float type, then
3. Smallest type first, then
4. signed type first, then
5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1)
So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies
below the millisecond level (rule 1), but otherwise EPOCH is preferred
(rule 2).
For floats, four-byte is preferred unless eight-byte is required:
1. absolute values between 0 and 3e-39
2. absolute values greater than 1.7e38
This will switch to an eight-byte double in some cases where four bytes
would be sufficient for IEEE 754 encoding, but where DEC formats would
require eight.
@param data: data for which dimensions and CDF types are desired
@type data: list (of lists)
@param backward: limit to pre-CDF3 types
@type backward: bool
@return: dimensions of L{data}, in order outside-in;
CDF types which can represent this data;
number of elements required (i.e. length of longest string)
@rtype: 3-tuple of lists ([int], [ctypes.c_long], [int])
@raise ValueError: if L{data} has irregular dimensions
"""
d = numpy.asanyarray(data)
dims = d.shape
elements = 1
types = []
_Hyperslice.check_well_formed(d)
if d.dtype.kind in ('S', 'U'): #it's a string
types = [const.CDF_CHAR, const.CDF_UCHAR]
elements = d.dtype.itemsize
if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per
elements //= 4
elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'):
if max((dt.microsecond % 1000 for dt in d.flat)) > 0:
types = [const.CDF_EPOCH16, const.CDF_EPOCH,
const.CDF_TIME_TT2000]
else:
types = [const.CDF_EPOCH, const.CDF_EPOCH16,
const.CDF_TIME_TT2000]
if backward:
del types[types.index(const.CDF_EPOCH16)]
del types[-1]
elif not lib.supports_int8:
del types[-1]
elif d is data or isinstance(data, numpy.generic):
#numpy array came in, use its type (or byte-swapped)
types = [k for k in lib.numpytypedict
if (lib.numpytypedict[k] == d.dtype
or lib.numpytypedict[k] == d.dtype.newbyteorder())
and not k in lib.timetypes]
if (not lib.supports_int8 or backward) \
and const.CDF_INT8.value in types:
del types[types.index(const.CDF_INT8.value)]
#Maintain priority to match the ordered lists below:
#float/double (44, 45) before real (21/22), and
#byte (41) before int (1) before char (51). So hack.
#Consider making typedict an ordered dict once 2.6 is dead.
types.sort(key=lambda x: x % 50, reverse=True)
if not types: #not a numpy array, or can't parse its type
if d.dtype.kind == 'O': #Object. Try to make it numeric
#Can't do safe casting from Object, so try and compare
#Basically try most restrictive to least restrictive
trytypes = (numpy.uint64, numpy.int64, numpy.float64)
for t in trytypes:
try:
newd = d.astype(dtype=t)
except: #Failure to cast, try next type
continue
if (newd == d).all(): #Values preserved, use this type
d = newd
#Continue with normal guessing, as if a list
break
else:
#fell through without a match
raise ValueError(
'Cannot convert generic objects to CDF type.')
if d.dtype.kind in ('i', 'u'): #integer
minval = numpy.min(d)
maxval = numpy.max(d)
if minval < 0:
types = [const.CDF_BYTE, const.CDF_INT1,
const.CDF_INT2, const.CDF_INT4, const.CDF_INT8,
const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63,
1.7e38, 1.7e38, 8e307, 8e307]
else:
types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1,
const.CDF_INT2, const.CDF_UINT2,
const.CDF_INT4, const.CDF_UINT4,
const.CDF_INT8,
const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
cutoffs = [2 ** 7, 2 ** 7, 2 ** 8,
2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63,
1.7e38, 1.7e38, 8e307, 8e307]
types = [t for (t, c) in zip(types, cutoffs) if c > maxval
and (minval >= 0 or minval >= -c)]
if (not lib.supports_int8 or backward) \
and const.CDF_INT8 in types:
del types[types.index(const.CDF_INT8)]
else: #float
if dims is ():
if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39):
types = [const.CDF_DOUBLE, const.CDF_REAL8]
else:
types = [const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
else:
absolutes = numpy.abs(d[d != 0])
if len(absolutes) > 0 and \
(numpy.max(absolutes) > 1.7e38 or
numpy.min(absolutes) < 3e-39):
types = [const.CDF_DOUBLE, const.CDF_REAL8]
else:
types = [const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
types = [t.value if hasattr(t, 'value') else t for t in types]
return (dims, types, elements)
@staticmethod
def reorder(seq):
"""Reorders seq to switch array majority
Used to take an array of subscripts between row
and column majority. First element is not touched,
being the record number.
@param seq: a sequence of *subscripts*
@type seq: sequence of integers
@return: seq with all but element 0 reversed in order
@rtype: sequence of integers
"""
return numpy.concatenate((seq[0:1],
numpy.flipud(seq)[:-1]))
@staticmethod
def convert_range(start, stop, step, size):
"""Converts a start/stop/step range to start/count/interval
(i.e. changes from Python-style slice to CDF-style)
@param start: index to start a slice at, may be none or negative
@type start: int
@param stop: index at end of slice (one-past, standard Python),
may be none or negative
@type stop: int
@param step: interval for stepping through stlice
@type step: int
@param size: size of list to slice
@type size: int
@return: (start, count, interval, rev) where:
1. start is the start index, normalized to be within
the size of the list and negatives handled
2. count is the number of records in the slice,
guaranteed to stop before the end
3. interval is the skip between records
4. rev indicates whether the sequence should be reversed
@rtype: (int, int, int, boolean)
"""
(start, stop, step) = slice(start, stop, step).indices(size)
if step < 0:
step *= -1
count = int((start - stop + step - 1) / step)
start = start - (count - 1) * step
rev = True
else:
count = int((stop - start + step - 1) / step)
rev = False
if count < 0:
count = 0
start = 0
return (start, count, step, rev)
class Attr(MutableSequence):
"""An attribute, g or z, for a CDF
.. warning::
This class should not be used directly, but only in its
subclasses, :class:`gAttr` and :class:`zAttr`. The methods
listed here are safe to use in the subclasses.
Represents a CDF attribute, providing access to the Entries in a format
that looks like a Python
list. General list information is available in the python docs:
`1 <http://docs.python.org/tutorial/introduction.html#lists>`_,
`2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_,
`3 <http://docs.python.org/library/stdtypes.html#typesseq>`_.
An introduction to CDF attributes can be found in section 2.4 of
the CDF user's guide.
Each element of the list is a single Entry of the appropriate type.
The index to the elements is the Entry number.
Multi-dimensional slicing is *not* supported; an Entry with multiple
elements will have all elements returned (and can thus be sliced itself).
Example:
>>> first_three = attribute[5, 0:3] #will fail
>>> first_three = attribute[5][0:3] #first three elements of 5th Entry
.. autosummary::
~Attr.append
~Attr.has_entry
~Attr.insert
~Attr.max_idx
~Attr.new
~Attr.number
~Attr.rename
~Attr.type
.. automethod:: append
.. automethod:: has_entry
.. automethod:: insert
.. automethod:: max_idx
.. automethod:: new
.. automethod:: number
.. automethod:: rename
.. automethod:: type
"""
def __init__(self, cdf_file, attr_name, create=False):
"""Initialize this attribute
@param cdf_file: CDF file containing this attribute
@type cdf_file: :py:class:`pycdf.CDF`
@param attr_name: Name of this attribute
@type attr_name: str
@param create: True to create attribute, False to look up existing.
@type create: bool
"""
self._cdf_file = cdf_file
self._raw = False
if isinstance(attr_name, str_classes):
try:
self._name = attr_name.encode('ascii')
except AttributeError:
self._name = attr_name
attrno = ctypes.c_long()
if create:
self._cdf_file._call(const.CREATE_, const.ATTR_,
self._name, self.SCOPE,
ctypes.byref(attrno))
self._cdf_file.add_attr_to_cache(
self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE)
else: #Ensure exists, and populate cache. See scope note below
attrno, scope = self._cdf_file.attr_num(self._name)
else:
name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1)
scope = ctypes.c_long(0)
self._cdf_file._call(const.SELECT_, const.ATTR_,
ctypes.c_long(attr_name))
#Because it's possible to create a gAttr Python objecting
#referencing an Attribute with variable scope, and vice-versa,
#do NOT assume the scope matches
#(Higher level code checks for that being a bad thing.)
self._cdf_file._call(
const.GET_, const.ATTR_NAME_, name,
const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope))
self._name = name.value.rstrip()
if scope.value == const.GLOBAL_SCOPE.value:
scope = True
elif scope.value == const.VARIABLE_SCOPE.value:
scope = False
else:
raise CDFError(const.BAD_SCOPE)
self._cdf_file.add_attr_to_cache(self._name, attr_name, scope)
def __getitem__(self, key):
"""Return a slice of Entries.
Because Attributes may be sparse, a multi-element slice will return
None for those elements which do not have associated Entries.
@param key: index or range of Entry number to return
@type key: slice or int
@return: a list of entries, appropriate type.
@raise IndexError: if L{key} is an int and that Entry number does not
exist.
"""
if key is Ellipsis:
key = slice(None, None, None)
if hasattr(key, 'indices'):
idx = range(*key.indices(self.max_idx() + 1))
return [self._get_entry(i) if self.has_entry(i) else None
for i in idx]
else:
if self.has_entry(key):
return self._get_entry(key)
else:
raise IndexError('list index ' + str(key) + ' out of range.')
def _check_other_entries(self, types):
"""Try to get the type of this entry from others in the Attribute
For zAttrs, checks if all other Entries are the same type, and at
least one doesn't match its zVar, i.e. Entry type dominates (otherwise
assumption is the Var type dominates).
For gAttrs, checks all other Entries, and gives priority to the
one that's earliest in the possible type list and exists in other
Entries.
This is only one component of Entry type guessing!
:param list types: CDF types that are candidates (match the data)
:return: The type discerned from other Entries, or None
"""
if self.ENTRY_ == const.zENTRY_:
#If everything else is the same entry type,
#and one is not the same as its var, probably
#all entries should be of that type
cand_et = None #The Entry type that might work
one_var_diff = False #One Var has a type different from Entry
for num in range(self.max_idx() + 1):
if not self.has_entry(num):
continue
vartype = self._cdf_file[num].type()
entrytype = self.type(num)
if vartype != entrytype:
one_var_diff = True
if cand_et is None:
if not entrytype in types:
return None #One var has Entry with "impossible" type
cand_et = entrytype
elif cand_et != entrytype:
return None #Two vars have Entries with different types
if one_var_diff and cand_et is not None:
return cand_et
else:
# Of those types which exist in other entries,
# find the one which is earliest
# in types, i.e. the preferred type
entrytypes = [self.type(num) for num in
range(self.max_idx() + 1)
if self.has_entry(num)]
entrytypes = [et for et in entrytypes if et in types]
if entrytypes:
return types[
min([types.index(et) for et in entrytypes])]
return None
def __setitem__(self, key, data):
"""Set a slice of Entries.
@param key: index or range of Entry numbers to set
@type key: slice or int
@param data: the data to set these entries to. Normally each entry should
be a sequence; if a scalar is provided, it is treated
as a single-element list.
@type data: scalar or list
@raise ValueError: if size of {data} does not match size of L{key}
@note: Attributes do not 'grow' or 'shrink' as entries are added
or removed. Indexes of entries never change and there is no
way to 'insert'.
"""
if key is Ellipsis:
key = slice(None, None, None)
if not hasattr(key, 'indices'):
#Single value, promote everything a dimension
idx = (key, key + 1, 1)
data = [data]
else:
idx = key.indices(self.max_idx() + 1)
if key.step is None or key.step > 0:
#Iterating forward, extend slice to match data
if len(data) > len(range(*idx)):
idx = (idx[0], idx[0] + idx[2] * len(data), idx[2])
#get, and check, types and sizes for all data
#checks first so don't have error after changing half the Entries
data_idx = -1
typelist = []
for i in range(*idx):
data_idx += 1
if data_idx >= len(data):
continue
datum = data[data_idx]
if datum is None:
typelist[i] = (None, None, None)
continue
(dims, types, elements) = _Hyperslice.types(
datum, backward=self._cdf_file.backward)
if len(types) <= 0:
raise ValueError('Cannot find a matching CDF type.')
if len(dims) > 1:
raise ValueError('Entries must be scalar or 1D.')
elif len(dims) == 1 and isinstance(datum[0], str_classes):
raise ValueError('Entry strings must be scalar.')
entry_type = None
if self.has_entry(i): #If the entry already exists, match its type
entry_type = self.type(i)
if not entry_type in types:
entry_type = None
if entry_type is None: #Check other entries for this attribute
entry_type = self._check_other_entries(types)
if entry_type is None and self.ENTRY_ == const.zENTRY_:
#Fall back to zVar type
vartype = self._cdf_file[i].type()
if vartype in types:
entry_type = vartype
else:
entry_type = types[0]
elif entry_type is None:
entry_type = types[0]
if not entry_type in lib.numpytypedict:
raise ValueError('Cannot find a matching numpy type.')
typelist.append((dims, entry_type, elements))
data_idx = -1
for i in range(*idx):
data_idx += 1
if data_idx >= len(data) or data[data_idx] is None:
if self.has_entry(i):
del self[i]
continue
datum = data[data_idx]
(dims, entry_type, elements) = typelist[data_idx]
self._write_entry(i, datum, entry_type, dims, elements)
def __delitem__(self, key):
"""Delete a slice of Entries.
@param key: index or range of Entry numbers to delete
@type key: slice or int
@note: Attributes do not 'grow' or 'shrink' as entries are added
or removed. Indexes of entries never change and there is no
way to 'insert'.
"""
if key is Ellipsis:
key = slice(None, None, None)
if not hasattr(key, 'indices'):
idx = (key, key + 1, 1)
else:
idx = key.indices(self.max_idx() + 1)
for i in range(*idx):
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i),
const.DELETE_, self.ENTRY_)
def __iter__(self, current=0):
"""Iterates over all entries in this Attribute
Returns data from one entry at a time until reaches the end.
@note: Returned in entry-number order.
"""
while current <= self.max_idx():
if self.has_entry(current):
value = yield(self._get_entry(current))
if value != None:
current = value
current += 1
def __reversed__(self, current=None):
"""Iterates over all entries in this Attribute
Returns data from one entry at a time, starting at end and going
to beginning.
@note: Returned in entry-number order.
"""
if current is None:
current = self.max_idx()
while current >= 0:
if self.has_entry(current):
value = yield(self._get_entry(current))
if value != None:
current = value
current -= 1
def __len__(self):
"""Number of Entries for this Attr. NOT same as max Entry number.
@return: Number of Entries
@rtype: int
"""
count = ctypes.c_long(0)
self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count))
return count.value
def __repr__(self):
"""Returns representation of an attribute
Cannot return anything that can be eval'd to create a copy of the
attribtute, so just wrap the informal representation in angle brackets.
@return: all the data in this attribute
@rtype: str
"""
return '<\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the attribute
This is an 'informal' representation in that it cannot be evaluated
directly to create an L{Attr}.
@return: all the data in this attribute
@rtype: str
"""
if self._cdf_file._opened:
return '\n'.join([str(item) for item in self])
else:
if isinstance(self._name, str):
return 'Attribute "{0}" in closed CDF {1}'.format(
self._name, self._cdf_file.pathname)
else:
return 'Attribute "{0}" in closed CDF {1}'.format(
self._name.decode('ascii'),
self._cdf_file.pathname.decode('ascii'))
def insert(self, index, data):
"""Insert an entry at a particular number
Inserts entry at particular number while moving all subsequent
entries to one entry number later. Does not close gaps.
Parameters
==========
index : int
index where to put the new entry
data :
data for the new entry
"""
max_entry = self.max_idx()
if index > max_entry: #Easy case
self[index] = data
return
for i in range(max_entry, index - 1, -1):
if self.has_entry(i+1):
self.__delitem__(i+1)
if self.has_entry(i):
self.new(self.__getitem__(i), type=self.type(i), number=i+1)
self[index] = data
def append(self, data):
"""Add an entry to end of attribute
Puts entry after last defined entry (does not fill gaps)
Parameters
==========
data :
data for the new entry
"""
self[self.max_idx() + 1] = data
def _call(self, *args, **kwargs):
"""Select this CDF and Attr and call the CDF internal interface
@param args: Passed directly to the CDF library interface.
@type args: various, see :py:mod:`ctypes`.
@return: CDF status from the library
@rtype: ctypes.c_long
@note: Terminal NULL_ is automatically added to L{args}.
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
return self._cdf_file._call(
const.SELECT_, const.ATTR_,
ctypes.c_long(self._cdf_file.attr_num(self._name)[0]),
*args, **kwargs)
def _entry_len(self, number):
"""Number of elements in an Entry
@param number: number of Entry
@type number: int
@return: number of elements
@rtype: int
"""
if not self.has_entry(number):
raise IndexError('list index ' + str(number) + ' out of range.')
count = ctypes.c_long(0)
self._call(
const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count))
return count.value
def type(self, number, new_type=None):
"""Find or change the CDF type of a particular Entry number
Parameters
==========
number : int
number of Entry to check or change
Other Parameters
================
new_type
type to change this Entry to, from :mod:`~pycdf.const`.
Omit to only check type.
Returns
=======
out : int
CDF variable type, see :mod:`~pycdf.const`
Notes
=====
If changing types, old and new must be equivalent, see CDF
User's Guide section 2.5.5 pg. 57
"""
if new_type != None:
if not hasattr(new_type, 'value'):
new_type = ctypes.c_long(new_type)
size = ctypes.c_long(self._entry_len(number))
status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.PUT_, self.ENTRY_DATASPEC_, new_type, size,
ignore=(const.NO_SUCH_ENTRY,))
if status == const.NO_SUCH_ENTRY:
raise IndexError('list index ' + str(number) + ' out of range.')
cdftype = ctypes.c_long(0)
status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype),
ignore=(const.NO_SUCH_ENTRY,))
if status == const.NO_SUCH_ENTRY:
raise IndexError('list index ' + str(number) + ' out of range.')
return cdftype.value
def has_entry(self, number):
"""Check if this attribute has a particular Entry number
Parameters
==========
number : int
number of Entry to check or change
Returns
=======
out : bool
True if ``number`` is a valid entry number; False if not
"""
status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_,
ctypes.c_long(number),
ignore=(const.NO_SUCH_ENTRY, ))
return not status == const.NO_SUCH_ENTRY
def max_idx(self):
"""Maximum index of Entries for this Attr
Returns
=======
out : int
maximum Entry number
"""
count = ctypes.c_long(0)
self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count))
return count.value
def new(self, data, type=None, number=None):
"""Create a new Entry in this Attribute
.. note:: If ``number`` is provided and an Entry with that number
already exists, it will be overwritten.
Parameters
==========
data
data to put in the Entry
Other Parameters
================
type : int
type of the new Entry, from :mod:`~pycdf.const`
(otherwise guessed from ``data``)
number : int
Entry number to write, default is lowest available number.
"""
if number is None:
number = 0
while self.has_entry(number):
number += 1
(dims, types, elements) = _Hyperslice.types(
data, backward=self._cdf_file.backward)
if type is None:
#Guess based on other entries
type = self._check_other_entries(types)
if type is None and self.ENTRY_ == const.zENTRY_:
#Try to match variable type
vartype = self._cdf_file[number].type()
if vartype in types:
type = vartype
if type is None:
type = types[0]
elif hasattr(type, 'value'):
type = type.value
self._write_entry(number, data, type, dims, elements)
def number(self):
"""Find the attribute number for this attribute
Returns
=======
out : int
attribute number
"""
no = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.ATTR_NUMBER_,
self._name, ctypes.byref(no))
return no.value
def global_scope(self):
"""Determine scope of this attribute.
Returns
=======
out : bool
True if global (i.e. gAttr), False if zAttr
"""
return self._cdf_file.attr_num(self._name)[1]
def rename(self, new_name):
"""Rename this attribute
Renaming a zAttribute renames it for *all* zVariables in this CDF!
Parameters
==========
new_name : str
the new name of the attribute
"""
try:
enc_name = new_name.encode('ascii')
except AttributeError:
enc_name = new_name
if len(enc_name) > const.CDF_ATTR_NAME_LEN256:
raise CDFError(const.BAD_ATTR_NAME)
self._call(const.PUT_, const.ATTR_NAME_, enc_name)
self._cdf_file.add_attr_to_cache(
enc_name,
*self._cdf_file.attr_num(self._name)) #still in cache
del self._cdf_file._attr_info[self._name]
self._name = enc_name
def _get_entry(self, number):
"""Read an Entry associated with this L{Attr}
@param number: number of Entry to return
@type number: int
@return: data from entry numbered L{number}
@rtype: list or str
"""
if not self.has_entry(number):
raise IndexError('list index ' + str(number) + ' out of range.')
#Make a big enough buffer
length = self._entry_len(number)
cdftype = self.type(number)
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
buff = numpy.empty((), 'S{0}'.format(length), order='C')
else:
if not cdftype in lib.numpytypedict:
raise CDFError(const.BAD_DATA_TYPE)
buff = numpy.empty((length,), lib.numpytypedict[cdftype],
order='C')
buff = numpy.require(buff, requirements=('C', 'A', 'W'))
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_DATA_,
buff.ctypes.data_as(ctypes.c_void_p))
#decode
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
if str == bytes or self._raw: #Py2k, leave as bytes
result = bytes(buff)
else: #Py3k, make unicode
result = str(numpy.char.array(buff).decode())
else:
if not self._raw:
if cdftype == const.CDF_EPOCH.value:
result = lib.v_epoch_to_datetime(buff)
elif cdftype == const.CDF_EPOCH16.value:
result = lib.v_epoch16_to_datetime(buff)
elif cdftype == const.CDF_TIME_TT2000.value:
result = lib.v_tt2000_to_datetime(buff)
else:
result = buff
else:
result = buff
if length == 1:
result = result[0]
return result
def _write_entry(self, number, data, cdf_type, dims, elements):
"""Write an Entry to this Attr.
@param number: number of Entry to write
@type number: int
@param data: data to write
@param cdf_type: the CDF type to write, from :py:mod:`pycdf.const`
@param dims: dimensions of L{data}
@type dims: list
@param elements: number of elements in L{data}, 1 unless it is a string
@type elements: int
"""
if len(dims) == 0:
n_write = 1
else:
n_write = dims[0]
if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.dtype('S' + str(elements)))
n_write = elements
elif cdf_type == const.CDF_EPOCH16.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch16(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_EPOCH.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch(data),
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_TIME_TT2000.value:
if not self._raw:
try:
data = lib.v_datetime_to_tt2000(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.int64)
elif cdf_type in lib.numpytypedict:
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=lib.numpytypedict[cdf_type])
else:
raise CDFError(const.BAD_DATA_TYPE)
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type),
ctypes.c_long(n_write),
data.ctypes.data_as(ctypes.c_void_p))
def _delete(self):
"""Delete this Attribute
Also deletes all Entries associated with it.
"""
self._call(const.DELETE_, const.ATTR_)
self._cdf_file.clear_attr_from_cache(self._name)
self._name = None
class zAttr(Attr):
"""zAttribute for zVariables within a CDF.
.. warning::
Because zAttributes are shared across all variables in a CDF,
directly manipulating them may have unexpected consequences.
It is safest to operate on zEntries via :class:`zAttrList`.
.. note::
When accessing a zAttr, pyCDF exposes only the zEntry corresponding
to the associated zVariable.
See Also
========
:class:`Attr`
"""
ENTRY_ = const.zENTRY_
ENTRY_DATA_ = const.zENTRY_DATA_
SCOPE = const.VARIABLE_SCOPE
ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_
ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_
ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_
ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_
ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_
ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_
def insert(self, index, data):
"""Insert entry at particular index number
Since there can only be one zEntry per zAttr, this cannot be
implemented.
Raises
======
NotImplementedError : always
"""
raise NotImplementedError
def append(self, index, data):
"""Add entry to end of attribute list
Since there can only be one zEntry per zAttr, this cannot be
implemented.
Raises
======
NotImplementedError : always
"""
raise NotImplementedError
class gAttr(Attr):
"""Global Attribute for a CDF
Represents a CDF attribute, providing access to the gEntries in a format
that looks like a Python list. General list information is available in
the python docs:
`1 <http://docs.python.org/tutorial/introduction.html#lists>`_,
`2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_,
`3 <http://docs.python.org/library/stdtypes.html#typesseq>`_.
Normally accessed by providing a key to a :class:`gAttrList`:
>>> attribute = cdffile.attrs['attribute_name']
>>> first_gentry = attribute[0]
Each element of the list is a single gEntry of the appropriate type.
The index to the elements is the gEntry number.
A gEntry may be either a single string or a 1D array of numerical type.
Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR)
with a single element are returned as scalars; multiple-element entries
are returned as a list. No provision is made for accessing below
the entry level; the whole list is returned at once (but Python's
slicing syntax can be used to extract individual items from that list.)
Multi-dimensional slicing is *not* supported; an entry with multiple
elements will have all elements returned (and can thus be sliced itself).
Example:
>>> first_three = attribute[5, 0:3] #will fail
>>> first_three = attribute[5][0:3] #first three elements of 5th Entry
gEntries are *not* necessarily contiguous; a gAttribute may have an
entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the
*number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined
gEntry number and :meth:`~Attr.has_entry` to determine if a particular
gEntry number exists. Iterating over all entries is also supported::
>>> entrylist = [entry for entry in attribute]
Deleting gEntries will leave a "hole":
>>> attribute[0:3] = [1, 2, 3]
>>> del attribute[1]
>>> attribute.has_entry(1)
False
>>> attribute.has_entry(2)
True
>>> print attribute[0:3]
[1, None, 3]
Multi-element slices over nonexistent gEntries will return ``None`` where
no entry exists. Single-element indices for nonexistent gEntries will
raise ``IndexError``. Assigning ``None`` to a gEntry will delete it.
When assigning to a gEntry, the type is chosen to match the data;
subject to that constraint, it will try to match
(in order):
#. existing gEntry of the same number in this gAttribute
#. other gEntries in this gAttribute
#. data-matching constraints described in :meth:`CDF.new`.
See Also
========
:class:`Attr`
"""
ENTRY_ = const.gENTRY_
ENTRY_DATA_ = const.gENTRY_DATA_
SCOPE = const.GLOBAL_SCOPE
ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_
ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_
ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_
ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_
ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_
ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_
class AttrList(MutableMapping):
"""Object representing a list of attributes.
.. warning::
This class should not be used directly, but only via its
subclasses, :class:`gAttrList` and :class:`zAttrList`.
Methods listed here are safe to use from the subclasses.
.. autosummary::
~AttrList.clone
~AttrList.copy
~AttrList.from_dict
~AttrList.new
~AttrList.rename
.. automethod:: clone
.. automethod:: copy
.. automethod:: from_dict
.. automethod:: new
.. automethod:: rename
"""
def __init__(self, cdf_file, special_entry=None):
"""Initialize the attribute collection
@param cdf_file: CDF these attributes are in
@type cdf_file: :py:class:`pycdf.CDF`
@param special_entry: callable which returns a "special" entry number,
used to limit results for zAttrs to those which match the zVar
(i.e. the var number)
@type special_entry: callable
"""
self._cdf_file = cdf_file
self.special_entry = special_entry
def __getitem__(self, name):
"""Find an Attribute by name
@param name: name of the Attribute to return
@type name: str
@return: attribute named L{name}
@rtype: L{Attr}
@raise KeyError: if there is no attribute named L{name}
@raise CDFError: other errors in CDF library
"""
try:
attrib = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status == const.NO_SUCH_ATTR:
raise KeyError(name + ': ' + str(v))
else:
raise
if attrib.global_scope() != self.global_scope:
raise KeyError(name + ': no ' + self.attr_name + ' by that name.')
return attrib
def __setitem__(self, name, data):
"""Create an Attribute or change its entries
@param name: name of Attribute to change
@type name: str
@param data: Entries to populate this Attribute with.
Any existing Entries will be deleted!
Another C{Attr} may be specified, in which
case all its entries are copied.
@type data: scalar, list, or L{Attr}
"""
if isinstance(data, AttrList):
if name in self:
del self[name]
attr = self._get_or_create(name)
for entryno in range(data.max_idx()):
if data.has_entry(entryno):
attr.new(data[entryno], data.type(entryno), entryno)
else:
attr = self._get_or_create(name)
if isinstance(data, str_classes):
data = [data]
else:
try:
junk = len(data)
except TypeError:
data = [data]
attr[:] = data
del attr[len(data):]
def __delitem__(self, name):
"""Delete an Attribute (and all its entries)
@param name: name of Attribute to delete
@type name: str
"""
try:
attr = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status == const.NO_SUCH_ATTR:
raise KeyError(name + ': ' + str(v))
else:
raise
if attr.global_scope() != self.global_scope:
raise KeyError(name + ': not ' + self.attr_name)
attr._delete()
def __iter__(self, current=0):
"""Iterates over all Attr in this CDF or variable
Returns name of one L{Attr} at a time until reaches the end.
@note: Returned in number order.
"""
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_,
ctypes.byref(count))
while current < count.value:
candidate = self.AttrType(self._cdf_file, current)
if candidate.global_scope() == self.global_scope:
if self.special_entry is None or \
candidate.has_entry(self.special_entry()):
if str == bytes:
value = yield(candidate._name)
else:
value = yield(candidate._name.decode())
if value != None:
current = self[value].number()
current += 1
def __repr__(self):
"""Returns representation of attribute list
Cannot return anything that can be eval'd to create a copy of the
list, so just wrap the informal representation in angle brackets.
@return: all the data in this list of attributes
@rtype: str
"""
return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the attribute list
This is an 'informal' representation in that it cannot be evaluated
directly to create an L{AttrList}.
@return: all the data in this list of attributes
@rtype: str
"""
if self._cdf_file._opened:
return '\n'.join([key + ': ' + (
('\n' + ' ' * (len(key) + 2)).join(
[str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']'
for i in range(value.max_idx() + 1) if value.has_entry(i)])
if isinstance(value, Attr)
else str(value) +
' [' + lib.cdftypenames[self.type(key)] + ']'
)
for (key, value) in sorted(self.items())])
else:
if isinstance(self._cdf_file.pathname, str):
return 'Attribute list in closed CDF {0}'.format(
self._cdf_file.pathname)
else:
return 'Attribute list in closed CDF {0}'.format(
self._cdf_file.pathname.decode('ascii'))
def clone(self, master, name=None, new_name=None):
"""
Clones another attribute list, or one attribute from it, into this
list.
Parameters
==========
master : AttrList
the attribute list to copy from. This can be any dict-like object.
Other Parameters
================
name : str (optional)
name of attribute to clone (default: clone entire list)
new_name : str (optional)
name of the new attribute, default ``name``
"""
if name is None:
self._clone_list(master)
else:
self._clone_attr(master, name, new_name)
def copy(self):
"""
Create a copy of this attribute list
Returns
=======
out : dict
copy of the entries for all attributes in this list
"""
return dict((key, value[:] if isinstance(value, Attr) else value)
for (key, value) in self.items())
def new(self, name, data=None, type=None):
"""
Create a new Attr in this AttrList
Parameters
==========
name : str
name of the new Attribute
Other Parameters
================
data
data to put into the first entry in the new Attribute
type
CDF type of the first entry from :mod:`~pycdf.const`.
Only used if data are specified.
Raises
======
KeyError : if the name already exists in this list
"""
if name in self:
raise KeyError(name + ' already exists.')
#A zAttr without an Entry in this zVar will be a "get" not "create"
attr = self._get_or_create(name)
if data is not None:
if self.special_entry is None:
attr.new(data, type)
else:
attr.new(data, type, self.special_entry())
def rename(self, old_name, new_name):
"""
Rename an attribute in this list
Renaming a zAttribute renames it for *all* zVariables in this CDF!
Parameters
==========
old_name : str
the current name of the attribute
new_name : str
the new name of the attribute
"""
AttrList.__getitem__(self, old_name).rename(new_name)
def from_dict(self, in_dict):
"""
Fill this list of attributes from a dictionary
.. deprecated:: 0.1.5
Use :meth:`~pycdf.AttrList.clone` instead; it supports
cloning from dictionaries.
Parameters
==========
in_dict : dict
Attribute list is populated entirely from this dictionary;
all existing attributes are deleted.
"""
warnings.warn("from_dict is deprecated and will be removed. Use clone.",
DeprecationWarning)
for k in in_dict:
self[k] = in_dict[k]
for k in list(self):
if not k in in_dict:
del self[k]
def _clone_attr(self, master, name, new_name=None):
"""Clones a single attribute from one in this list or another
Copies data and types from the master attribute to the new one
@param master: attribute list to copy attribute from
@type master: L{AttrList}
@param name: name of attribute to copy
@type name: str
@param new_name: name of the new attribute, default L{name}
@type new_name: str
"""
if new_name is None:
new_name = name
self[new_name] = master[name]
def _clone_list(self, master):
"""Clones this attribute list from another
@param master: the attribute list to copy from
@type master: L{AttrList}
"""
for name in master:
self._clone_attr(master, name)
for name in list(self): #Can't iterate over a list we're changing
if not name in master:
del self[name]
def _get_or_create(self, name):
"""Retrieve L{Attr} or create it if it doesn't exist
@param name: name of the attribute to look up or create
@type name: str
@return: attribute with this name
@rtype: L{Attr}
"""
attr = None
try:
attr = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status != const.NO_SUCH_ATTR:
raise
if attr is None:
attr = self.AttrType(self._cdf_file, name, True)
elif attr.global_scope() != self.global_scope:
raise KeyError(name + ': not ' + self.attr_name)
return attr
class gAttrList(AttrList):
"""
Object representing *all* the gAttributes in a CDF.
Normally accessed as an attribute of an open :class:`CDF`:
>>> global_attribs = cdffile.attrs
Appears as a dictionary: keys are attribute names; each value is an
attribute represented by a :class:`gAttr` object. To access the global
attribute TEXT:
>>> text_attr = cdffile.attrs['TEXT']
See Also
========
:class:`AttrList`
"""
AttrType = gAttr
attr_name = 'gAttribute'
global_scope = True
def __len__(self):
"""
Number of gAttributes in this CDF
Returns
=======
out : int
number of gAttributes in the CDF
"""
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_,
ctypes.byref(count))
return count.value
class zAttrList(AttrList):
"""Object representing *all* the zAttributes in a zVariable.
Normally accessed as an attribute of a :class:`Var` in an open
CDF:
>>> epoch_attribs = cdffile['Epoch'].attrs
Appears as a dictionary: keys are attribute names, values are
the value of the zEntry associated with the appropriate zVariable.
Each vAttribute in a CDF may only have a *single* entry associated
with each variable. The entry may be a string, a single numerical value,
or a series of numerical values. Entries with multiple values are returned
as an entire list; direct access to the individual elements is not
possible.
Example: finding the first dependency of (ISTP-compliant) variable
``Flux``:
>>> print cdffile['Flux'].attrs['DEPEND_0']
zAttributes are shared among zVariables, one zEntry allowed per zVariable.
(pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will
delete the underlying zAttribute.
zEntries are created and destroyed by the usual dict methods on the
zAttrlist:
>>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry
>>> del epoch_attribs['new_entry'] #delete the zEntry
The type of the zEntry is guessed from data provided. The type is chosen to
match the data; subject to that constraint, it will try to match
(in order):
#. existing zEntry corresponding to this zVar
#. other zEntries in this zAttribute
#. the type of this zVar
#. data-matching constraints described in :py:meth:`CDF.new`
See Also
========
:class:`AttrList`
"""
AttrType = zAttr
attr_name = 'zAttribute'
global_scope = False
def __init__(self, zvar):
"""Initialize the attribute collection
@param zvar: zVariable these attributes are in
@param zvar: :py:class:`pycdf.Var`
"""
super(zAttrList, self).__init__(zvar.cdf_file, zvar._num)
self._zvar = zvar
def __getitem__(self, name):
"""Find an zEntry by name
@param name: name of the zAttribute to return
@type name: str
@return: attribute named L{name}
@rtype: L{zAttr}
@raise KeyError: if there is no attribute named L{name} associated
with this zVariable
@raise CDFError: other errors in CDF library
"""
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if attrib.has_entry(zvar_num):
attrib._raw = self._zvar._raw
return attrib[zvar_num]
else:
raise KeyError(name + ': no such attribute for variable ' +
self._zvar.name())
def __delitem__(self, name):
"""Delete an zEntry by name
@param name: name of the zEntry to delete
@type name: str
@raise KeyError: if there is no attribute named L{name} associated
with this zVariable
@raise CDFError: other errors in CDF library
@note: If this is the only remaining entry, the Attribute will be
deleted.
"""
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if not attrib.has_entry(zvar_num):
raise KeyError(str(name) + ': no such attribute for variable ' +
str(self._zvar._name))
del attrib[zvar_num]
if len(attrib) == 0:
attrib._delete()
def __setitem__(self, name, data):
"""Sets a zEntry by name
The type of the zEntry is guessed from L{data}. The type is chosen to
match the data; subject to that constraint, it will try to match
(in order):
1. existing zEntry corresponding to this zVar
2. other zEntries in this zAttribute
3. the type of this zVar
4. data-matching constraints described in L{_Hyperslice.types}
@param name: name of zAttribute; zEntry for this zVariable will be set
in zAttribute by this name
@type name: str
@raise CDFError: errors in CDF library
@raise ValueError: if unable to find a valid CDF type matching L{data},
or if L{data} is the wrong dimensions.
"""
try:
attr = super(zAttrList, self).__getitem__(name)
except KeyError:
attr = zAttr(self._cdf_file, name, True)
attr._raw = self._zvar._raw
attr[self._zvar._num()] = data
def __len__(self):
"""Number of zAttributes in this variable
@return: number of zAttributes in the CDF
which have entries for this variable.
@rtype: int
"""
length = 0
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_,
ctypes.byref(count))
current = 0
while current < count.value:
candidate = zAttr(self._cdf_file, current)
if not candidate.global_scope():
if candidate.has_entry(self._zvar._num()):
length += 1
current += 1
return length
def type(self, name, new_type=None):
"""Find or change the CDF type of a zEntry in this zVar
@param name: name of the zAttr to check or change
@type name: str
@param new_type: type to change it to, see :py:mod:`pycdf.const`
@type new_type: ctypes.c_long
@return: CDF variable type, see :py:mod:`pycdf.const`
@rtype: int
@note: If changing types, old and new must be equivalent, see CDF
User's Guide section 2.5.5 pg. 57
"""
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if not attrib.has_entry(zvar_num):
raise KeyError(name + ': no such attribute for variable ' +
self._zvar.name())
return attrib.type(zvar_num, new_type)
def _clone_attr(self, master, name, new_name=None):
"""Clones a single attribute from one in this list or another
Copies data and types from the master attribute to the new one
@param master: attribute list to copy attribute from
@type master: L{zAttrList}
@param name: name of attribute to copy
@type name: str
@param new_name: name of the new attribute, default L{name}
@type new_name: str
"""
if new_name is None:
new_name = name
if new_name in self:
del self[new_name]
self.new(new_name, master[name],
master.type(name) if hasattr(master, 'type') else None)
| 37.378262 | 118 | 0.572838 | [
"Unlicense"
] | cpiker/condaCDF | pycdf/__init__.py | 199,114 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for creating GCE container (Docker) deployments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import re
import enum
from googlecloudsdk.api_lib.compute import exceptions
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
import six
USER_INIT_TEMPLATE = """#cloud-config
runcmd:
- ['/usr/bin/kubelet',
'--allow-privileged=%s',
'--manifest-url=http://metadata.google.internal/computeMetadata/v1/instance/attributes/google-container-manifest',
'--manifest-url-header=Metadata-Flavor:Google',
'--config=/etc/kubernetes/manifests']
"""
MANIFEST_DISCLAIMER = """# DISCLAIMER:
# This container declaration format is not a public API and may change without
# notice. Please use gcloud command-line tool or Google Cloud Console to run
# Containers on Google Compute Engine.
"""
USER_DATA_KEY = 'user-data'
CONTAINER_MANIFEST_KEY = 'google-container-manifest'
GCE_CONTAINER_DECLARATION = 'gce-container-declaration'
STACKDRIVER_LOGGING_AGENT_CONFIGURATION = 'google-logging-enabled'
GKE_DOCKER = 'gci-ensure-gke-docker'
ALLOWED_PROTOCOLS = ['TCP', 'UDP']
# Prefix of all COS image major release names
COS_MAJOR_RELEASE_PREFIX = 'cos-stable-'
# Pin this version of gcloud to COS image major release version
COS_MAJOR_RELEASE = COS_MAJOR_RELEASE_PREFIX + '55'
COS_PROJECT = 'cos-cloud'
_MIN_PREFERRED_COS_VERSION = 63
# Translation from CLI to API wording
RESTART_POLICY_API = {
'never': 'Never',
'on-failure': 'OnFailure',
'always': 'Always'
}
class MountVolumeMode(enum.Enum):
READ_ONLY = 1,
READ_WRITE = 2,
def isReadOnly(self):
return self == MountVolumeMode.READ_ONLY
_DEFAULT_MODE = MountVolumeMode.READ_WRITE
def _GetUserInit(allow_privileged):
"""Gets user-init metadata value for COS image."""
allow_privileged_val = 'true' if allow_privileged else 'false'
return USER_INIT_TEMPLATE % (allow_privileged_val)
class Error(exceptions.Error):
"""Base exception for containers."""
class InvalidMetadataKeyException(Error):
"""InvalidMetadataKeyException is for not allowed metadata keys."""
def __init__(self, metadata_key):
super(InvalidMetadataKeyException, self).__init__(
'Metadata key "{0}" is not allowed when running containerized VM.'
.format(metadata_key))
class NoGceContainerDeclarationMetadataKey(Error):
"""Raised on attempt to update-container on instance without containers."""
def __init__(self):
super(NoGceContainerDeclarationMetadataKey, self).__init__(
"Instance doesn't have {} metadata key - it is not a container.".format(
GCE_CONTAINER_DECLARATION))
def ValidateUserMetadata(metadata):
"""Validates if user-specified metadata.
Checks if it contains values which may conflict with container deployment.
Args:
metadata: user-specified VM metadata.
Raises:
InvalidMetadataKeyException: if there is conflict with user-provided
metadata
"""
for entry in metadata.items:
if entry.key in [USER_DATA_KEY, CONTAINER_MANIFEST_KEY, GKE_DOCKER]:
raise InvalidMetadataKeyException(entry.key)
def CreateTagsMessage(messages, tags):
"""Create tags message with parameters for container VM or VM templates."""
if tags:
return messages.Tags(items=tags)
def GetLabelsMessageWithCosVersion(
labels, image_uri, resources, resource_class):
"""Returns message with labels for instance / instance template.
Args:
labels: dict, labels to assign to the resource.
image_uri: URI of image used as a base for the resource. The function
extracts COS version from the URI and uses it as a value of
`container-vm` label.
resources: object that can parse image_uri.
resource_class: class of the resource to which labels will be assigned.
Must contain LabelsValue class and
resource_class.LabelsValue must contain AdditionalProperty
class.
"""
cos_version = resources.Parse(
image_uri, collection='compute.images').Name().replace('/', '-')
if labels is None:
labels = {}
labels['container-vm'] = cos_version
additional_properties = [
resource_class.LabelsValue.AdditionalProperty(key=k, value=v)
for k, v in sorted(six.iteritems(labels))]
return resource_class.LabelsValue(additionalProperties=additional_properties)
class NoCosImageException(Error):
"""Raised when COS image could not be found."""
def __init__(self):
super(NoCosImageException, self).__init__(
'Could not find COS (Cloud OS) for release family \'{0}\''
.format(COS_MAJOR_RELEASE))
def ExpandCosImageFlag(compute_client):
"""Select a COS image to run Docker."""
compute = compute_client.apitools_client
images = compute_client.MakeRequests([(
compute.images,
'List',
compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT)
)])
return _SelectNewestCosImage(images)
def _SelectNewestCosImage(images):
"""Selects newest COS image from the list."""
cos_images = sorted([image for image in images
if image.name.startswith(COS_MAJOR_RELEASE)],
key=lambda x: times.ParseDateTime(x.creationTimestamp))
if not cos_images:
raise NoCosImageException()
return cos_images[-1].selfLink
def _ValidateAndParsePortMapping(port_mappings):
"""Parses and validates port mapping."""
ports_config = []
for port_mapping in port_mappings:
mapping_match = re.match(r'^(\d+):(\d+):(\S+)$', port_mapping)
if not mapping_match:
raise calliope_exceptions.InvalidArgumentException(
'--port-mappings',
'Port mappings should follow PORT:TARGET_PORT:PROTOCOL format.')
port, target_port, protocol = mapping_match.groups()
if protocol not in ALLOWED_PROTOCOLS:
raise calliope_exceptions.InvalidArgumentException(
'--port-mappings',
'Protocol should be one of [{0}]'.format(
', '.join(ALLOWED_PROTOCOLS)))
ports_config.append({
'containerPort': int(target_port),
'hostPort': int(port),
'protocol': protocol})
return ports_config
def ExpandKonletCosImageFlag(compute_client):
"""Select a COS image to run Konlet.
This function scans three families in order:
- stable
- beta
- dev
looking for the first image with version at least _MIN_PREFERRED_COS_VERSION.
Args:
compute_client: ClientAdapter, The Compute API client adapter
Returns:
COS image at version _MIN_PREFERRED_COS_VERSION or later.
Raises:
NoCosImageException: No COS image at version at least
_MIN_PREFERRED_COS_VERSION was found. This should not happen if backend is
healthy.
"""
compute = compute_client.apitools_client
images = compute_client.MakeRequests(
[(compute.images,
'List',
compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT))])
name_re_template = r'cos-{}-(\d+)-.*'
image_families = ['stable', 'beta', 'dev']
for family in image_families:
name_re = name_re_template.format(family)
def MakeCreateComparisonKey(name_re):
def CreateComparisonKey(image):
version = int(re.match(name_re, image.name).group(1))
timestamp = times.ParseDateTime(image.creationTimestamp)
return version, timestamp
return CreateComparisonKey
cos_images = sorted(
[image for image in images if re.match(name_re, image.name)],
key=MakeCreateComparisonKey(name_re))
if (cos_images and MakeCreateComparisonKey(name_re)(cos_images[-1])[0] >=
_MIN_PREFERRED_COS_VERSION):
return cos_images[-1].selfLink
raise NoCosImageException()
def _ReadDictionary(filename):
# pylint:disable=line-too-long
r"""Read environment variable from file.
File format:
It is intended (but not guaranteed) to follow standard docker format
[](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file)
but without capturing environment variables from host machine.
Lines starting by "#" character are comments.
Empty lines are ignored.
Below grammar production follow in EBNF format.
file = (whitespace* statement '\n')*
statement = comment
| definition
whitespace = ' '
| '\t'
comment = '#' [^\n]*
definition = [^#=\n] [^= \t\n]* '=' [^\n]*
Args:
filename: str, name of the file to read
Returns:
A dictionary mapping environment variable names to their values.
"""
env_vars = {}
if not filename:
return env_vars
with files.FileReader(filename) as f:
for i, line in enumerate(f):
# Strip whitespace at the beginning and end of line
line = line.strip()
# Ignore comments and empty lines
if len(line) <= 1 or line[0] == '#':
continue
# Find first left '=' character
assignment_op_loc = line.find('=')
if assignment_op_loc == -1:
raise calliope_exceptions.BadFileException(
'Syntax error in {}:{}: Expected VAR=VAL, got {}'.format(
filename, i, line))
env = line[:assignment_op_loc]
val = line[assignment_op_loc+1:]
if ' ' in env or '\t' in env:
raise calliope_exceptions.BadFileException(
'Syntax error in {}:{} Variable name cannot contain whitespaces,'
' got "{}"'.format(filename, i, env))
env_vars[env] = val
return env_vars
def _GetHostPathDiskName(idx):
return 'host-path-{}'.format(idx)
def _GetTmpfsDiskName(idx):
return 'tmpfs-{}'.format(idx)
def _GetPersistentDiskName(idx):
return 'pd-{}'.format(idx)
def _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
used_names=None, disks=None):
"""Add volume specs from --container-mount-disk."""
used_names = used_names or []
disks = disks or []
idx = 0
for mount_disk in container_mount_disk:
while _GetPersistentDiskName(idx) in used_names:
idx += 1
device_name = mount_disk.get('name')
partition = mount_disk.get('partition')
def _GetMatchingVolume(device_name, partition):
for volume_spec in volumes:
pd = volume_spec.get('gcePersistentDisk', {})
if (pd.get('pdName') == device_name
and pd.get('partition') == partition):
return volume_spec
repeated = _GetMatchingVolume(device_name, partition)
if repeated:
name = repeated['name']
else:
name = _GetPersistentDiskName(idx)
used_names.append(name)
if not device_name:
# This should not be needed - any command that accepts container mount
# disks should validate that there is only one disk before calling this
# function.
if len(disks) != 1:
raise calliope_exceptions.InvalidArgumentException(
'--container-mount-disk',
'Must specify the name of the disk to be mounted unless exactly '
'one disk is attached to the instance.')
device_name = disks[0].get('name')
if disks[0].get('device-name', device_name) != device_name:
raise exceptions.InvalidArgumentException(
'--container-mount-disk',
'Must not have a device-name that is different from disk name if '
'disk is being attached to the instance and mounted to a container:'
' [{}]'.format(disks[0].get('device-name')))
volume_mounts.append({
'name': name,
'mountPath': mount_disk['mount-path'],
'readOnly': mount_disk.get('mode', _DEFAULT_MODE).isReadOnly()})
if repeated:
continue
volume_spec = {
'name': name,
'gcePersistentDisk': {
'pdName': device_name,
'fsType': 'ext4'}}
if partition:
volume_spec['gcePersistentDisk'].update({'partition': partition})
volumes.append(volume_spec)
idx += 1
def _CreateContainerManifest(args, instance_name,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Create container manifest from argument namespace and instance name."""
container = {'image': args.container_image, 'name': instance_name}
if args.container_command is not None:
container['command'] = [args.container_command]
if args.container_arg is not None:
container['args'] = args.container_arg
container['stdin'] = args.container_stdin
container['tty'] = args.container_tty
container['securityContext'] = {'privileged': args.container_privileged}
env_vars = _ReadDictionary(args.container_env_file)
for env_var_dict in args.container_env or []:
for env, val in six.iteritems(env_var_dict):
env_vars[env] = val
if env_vars:
container['env'] = [{
'name': env,
'value': val
} for env, val in six.iteritems(env_vars)]
volumes = []
volume_mounts = []
for idx, volume in enumerate(args.container_mount_host_path or []):
volumes.append({
'name': _GetHostPathDiskName(idx),
'hostPath': {
'path': volume['host-path']
},
})
volume_mounts.append({
'name': _GetHostPathDiskName(idx),
'mountPath': volume['mount-path'],
'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()
})
for idx, tmpfs in enumerate(args.container_mount_tmpfs or []):
volumes.append(
{'name': _GetTmpfsDiskName(idx), 'emptyDir': {'medium': 'Memory'}})
volume_mounts.append(
{'name': _GetTmpfsDiskName(idx), 'mountPath': tmpfs['mount-path']})
if container_mount_disk_enabled:
container_mount_disk = container_mount_disk or []
disks = (args.disk or []) + (args.create_disk or [])
_AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
disks=disks)
container['volumeMounts'] = volume_mounts
manifest = {
'spec': {
'containers': [container],
'volumes': volumes,
'restartPolicy': RESTART_POLICY_API[args.container_restart_policy]
}
}
return manifest
def DumpYaml(data):
"""Dumps data dict to YAML in format expected by Konlet."""
return MANIFEST_DISCLAIMER + yaml.dump(data)
def _CreateYamlContainerManifest(args, instance_name,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Helper to create the container manifest."""
return DumpYaml(_CreateContainerManifest(
args, instance_name,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk))
def CreateKonletMetadataMessage(messages, args, instance_name, user_metadata,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Helper to create the metadata for konlet."""
konlet_metadata = {
GCE_CONTAINER_DECLARATION:
_CreateYamlContainerManifest(
args, instance_name,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk),
# Since COS 69, having logs for Container-VMs written requires enabling
# Stackdriver Logging agent.
STACKDRIVER_LOGGING_AGENT_CONFIGURATION: 'true',
}
return metadata_utils.ConstructMetadataMessage(
messages, metadata=konlet_metadata, existing_metadata=user_metadata)
def UpdateInstance(holder, client, instance_ref, instance, args,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Update an instance and its container metadata."""
# find gce-container-declaration metadata entry
for metadata in instance.metadata.items:
if metadata.key == GCE_CONTAINER_DECLARATION:
UpdateMetadata(
holder, metadata, args, instance,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk)
# update Google Compute Engine resource
operation = client.apitools_client.instances.SetMetadata(
client.messages.ComputeInstancesSetMetadataRequest(
metadata=instance.metadata, **instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
set_metadata_waiter = waiter.WaitFor(
operation_poller, operation_ref,
'Updating specification of container [{0}]'.format(
instance_ref.Name()))
if (instance.status ==
client.messages.Instance.StatusValueValuesEnum.TERMINATED):
return set_metadata_waiter
elif (instance.status ==
client.messages.Instance.StatusValueValuesEnum.SUSPENDED):
return _StopVm(holder, client, instance_ref)
else:
_StopVm(holder, client, instance_ref)
return _StartVm(holder, client, instance_ref)
raise NoGceContainerDeclarationMetadataKey()
def _StopVm(holder, client, instance_ref):
"""Stop the Virtual Machine."""
operation = client.apitools_client.instances.Stop(
client.messages.ComputeInstancesStopRequest(
**instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Stopping instance [{0}]'.format(instance_ref.Name()))
def _StartVm(holder, client, instance_ref):
"""Start the Virtual Machine."""
operation = client.apitools_client.instances.Start(
client.messages.ComputeInstancesStartRequest(
**instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Starting instance [{0}]'.format(instance_ref.Name()))
def UpdateMetadata(holder, metadata, args, instance,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Update konlet metadata entry using user-supplied data."""
# precondition: metadata.key == GCE_CONTAINER_DECLARATION
manifest = yaml.load(metadata.value)
if args.IsSpecified('container_image'):
manifest['spec']['containers'][0]['image'] = args.container_image
if args.IsSpecified('container_command'):
manifest['spec']['containers'][0]['command'] = [args.container_command]
if args.IsSpecified('clear_container_command'):
manifest['spec']['containers'][0].pop('command', None)
if args.IsSpecified('container_arg'):
manifest['spec']['containers'][0]['args'] = args.container_arg
if args.IsSpecified('clear_container_args'):
manifest['spec']['containers'][0].pop('args', None)
if args.container_privileged is True:
manifest['spec']['containers'][0]['securityContext']['privileged'] = True
if args.container_privileged is False:
manifest['spec']['containers'][0]['securityContext']['privileged'] = False
if container_mount_disk_enabled:
container_mount_disk = container_mount_disk or []
disks = instance.disks
else:
container_mount_disk = []
# Only need disks for updating the container mount disk.
disks = []
_UpdateMounts(holder, manifest, args.remove_container_mounts or [],
args.container_mount_host_path or [],
args.container_mount_tmpfs or [],
container_mount_disk,
disks)
_UpdateEnv(manifest,
itertools.chain.from_iterable(args.remove_container_env or []),
args.container_env_file, args.container_env or [])
if args.container_stdin is True:
manifest['spec']['containers'][0]['stdin'] = True
if args.container_stdin is False:
manifest['spec']['containers'][0]['stdin'] = False
if args.container_tty is True:
manifest['spec']['containers'][0]['tty'] = True
if args.container_tty is False:
manifest['spec']['containers'][0]['tty'] = False
if args.IsSpecified('container_restart_policy'):
manifest['spec']['restartPolicy'] = RESTART_POLICY_API[
args.container_restart_policy]
metadata.value = DumpYaml(manifest)
def _UpdateMounts(holder, manifest, remove_container_mounts,
container_mount_host_path, container_mount_tmpfs,
container_mount_disk, disks):
"""Updates mounts in container manifest."""
_CleanupMounts(manifest, remove_container_mounts, container_mount_host_path,
container_mount_tmpfs,
container_mount_disk=container_mount_disk)
used_names = [volume['name'] for volume in manifest['spec']['volumes']]
volumes = []
volume_mounts = []
next_volume_index = 0
for volume in container_mount_host_path:
while _GetHostPathDiskName(next_volume_index) in used_names:
next_volume_index += 1
name = _GetHostPathDiskName(next_volume_index)
next_volume_index += 1
volumes.append({
'name': name,
'hostPath': {
'path': volume['host-path']
},
})
volume_mounts.append({
'name': name,
'mountPath': volume['mount-path'],
'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()
})
for tmpfs in container_mount_tmpfs:
while _GetTmpfsDiskName(next_volume_index) in used_names:
next_volume_index += 1
name = _GetTmpfsDiskName(next_volume_index)
next_volume_index += 1
volumes.append({'name': name, 'emptyDir': {'medium': 'Memory'}})
volume_mounts.append({'name': name, 'mountPath': tmpfs['mount-path']})
if container_mount_disk:
# Convert to dict to match helper input needs.
# The disk must already have a device name that matches its
# name. For disks that were attached to the instance already.
disks = [{'device-name': disk.deviceName,
'name': holder.resources.Parse(disk.source).Name()}
for disk in disks]
_AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
used_names=used_names, disks=disks)
manifest['spec']['containers'][0]['volumeMounts'].extend(volume_mounts)
manifest['spec']['volumes'].extend(volumes)
def _CleanupMounts(manifest, remove_container_mounts, container_mount_host_path,
container_mount_tmpfs, container_mount_disk=None):
"""Remove all specified mounts from container manifest."""
container_mount_disk = container_mount_disk or []
# volumeMounts stored in this list should be removed
mount_paths_to_remove = remove_container_mounts[:]
for host_path in container_mount_host_path:
mount_paths_to_remove.append(host_path['mount-path'])
for tmpfs in container_mount_tmpfs:
mount_paths_to_remove.append(tmpfs['mount-path'])
for disk in container_mount_disk:
mount_paths_to_remove.append(disk['mount-path'])
# volumeMounts stored in this list are used
used_mounts = []
used_mounts_names = []
removed_mount_names = []
for mount in manifest['spec']['containers'][0].get('volumeMounts', []):
if mount['mountPath'] not in mount_paths_to_remove:
used_mounts.append(mount)
used_mounts_names.append(mount['name'])
else:
removed_mount_names.append(mount['name'])
# override volumeMounts
manifest['spec']['containers'][0]['volumeMounts'] = used_mounts
# garbage collect volumes which become orphaned, skip volumes orphaned before
# start of the procedure
used_volumes = []
for volume in manifest['spec'].get('volumes', []):
if (volume['name'] in used_mounts_names or
volume['name'] not in removed_mount_names):
used_volumes.append(volume)
# override volumes
manifest['spec']['volumes'] = used_volumes
def _UpdateEnv(manifest, remove_container_env, container_env_file,
container_env):
"""Update environment variables in container manifest."""
current_env = {}
for env_val in manifest['spec']['containers'][0].get('env', []):
current_env[env_val['name']] = env_val['value']
for env in remove_container_env:
current_env.pop(env, None)
current_env.update(_ReadDictionary(container_env_file))
for env_var_dict in container_env:
for env, val in six.iteritems(env_var_dict):
current_env[env] = val
if current_env:
manifest['spec']['containers'][0]['env'] = [{
'name': env,
'value': val
} for env, val in six.iteritems(current_env)]
| 34.512129 | 117 | 0.694275 | [
"MIT"
] | bopopescu/JobSniperRails | gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py | 25,608 | Python |
#!/usr/bin/evn python
# -*- coding: utf-8 -*-
# python version 2.7.6
import magic
mime = magic.Magic(mime=True)
print mime.from_file("/Users/mac/Documents/data/fastq/8.fastq") | 19.777778 | 63 | 0.696629 | [
"BSD-3-Clause"
] | sdyz5210/python | files/judgeFileType.py | 178 | Python |
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
import numpy as np
import os.path as opth
import tqdm
import os
from sklearn.utils import shuffle
import argparse
HOME = os.path.expanduser('~')
os.environ["CUDA_VISIBLE_DEVICES"] = "2";
layers = tf.keras.layers
parser = argparse.ArgumentParser()
def define_generator():
def conv1d_block(filters, upsample=True, activation=tf.nn.relu, index=0):
if upsample:
model.add(layers.UpSampling1D(name="UpSampling" + str(index), size=2))
model.add(layers.Conv1D(filters=filters, kernel_size=5, padding='same', name="Conv1D" + str(index),
activation=activation))
model.add(layers.BatchNormalization())
model = tf.keras.models.Sequential(name="Generator")
model.add(layers.Dense(int(316), activation=tf.nn.relu, name="NoiseToSpatial")) #50
model.add(layers.BatchNormalization())
model.add(layers.Reshape((int(316),1)))
conv1d_block(filters=512, upsample=True, index=0)
conv1d_block(filters=512, upsample=True, index=1)
conv1d_block(filters=256, upsample=True, index=2)
conv1d_block(filters=256, upsample=True, index=3)
conv1d_block(filters=128, upsample=False, index=4)
conv1d_block(filters=128, upsample=False, index=5)
conv1d_block(filters=64, upsample=False, index=6)
conv1d_block(filters=64, upsample=False, index=7)
conv1d_block(filters=1, upsample=False, activation=tf.nn.tanh, index=8)
return model
class Discriminator:
def __init__(self):
self.tail = self._define_tail()
self.head = self._define_head()
def _define_tail(self, name="Discriminator"):
feature_model = tf.keras.models.Sequential(name=name)
def conv1d_dropout(filters, strides, index=0):
suffix = str(index)
feature_model.add(layers.Conv1D(filters=filters, strides=strides, name="Conv{}".format(suffix), padding='same',
kernel_size=5, activation=tf.nn.leaky_relu))
feature_model.add(layers.Dropout(name="Dropout{}".format(suffix), rate=0.3))
conv1d_dropout(filters=32, strides=2, index=5)
conv1d_dropout(filters=32, strides=2, index=6)
conv1d_dropout(filters=64, strides=2, index=0)
conv1d_dropout(filters=64, strides=2, index=1)
conv1d_dropout(filters=128, strides=2, index=2)
conv1d_dropout(filters=128, strides=2, index=3)
conv1d_dropout(filters=256, strides=1, index=4) #64
conv1d_dropout(filters=256, strides=1, index=7)
feature_model.add(layers.Flatten(name="Flatten")) # This is feature layer for FM loss !!
return feature_model
def _define_head(self):
head_model = tf.keras.models.Sequential(name="DiscriminatorHead")
head_model.add(layers.Dense(units=2048, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=2048, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=1024, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=512, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=args.num_classes, activation=None, name="Logits"))
return head_model
@property
def trainable_variables(self):
return self.tail.trainable_variables + self.head.trainable_variables
def __call__(self, x, *args, **kwargs):
features = self.tail(x, *args, **kwargs)
print(features.shape)
return self.head(features, *args, **kwargs), features
def accuracy(logits, labels):
preds = tf.argmax(logits, axis=1)
return tf.reduce_mean(tf.to_float(tf.equal(preds, labels)))
def main(args):
global best_acc
best_acc = 0
with tf.Graph().as_default():
print("Input data preprocessing...")
with tf.name_scope("DataPreprocess"):
r_train = 500.0 / 6.0
r_test = 100.0 / 6.0
nClass = 100 # 95 # 100
mon_instance = 2498.0 # 1000.0 # 300.0
unClass = 0 # 40000 # 30000
unmon_instance = unClass
dim = 5000
with tf.device('/cpu:0'):
(train_x, train_y, test_x_data, test_y_data) = split_awf_closed(r_train, r_test, nClass, mon_instance,
unmon_instance, dim)
def reshape_and_scale(x, img_shape=(-1, dim, 1)):
return x.reshape(img_shape).astype(np.float32)
train_x = reshape_and_scale(train_x)
test_x_data = reshape_and_scale(test_x_data)
# Use AWF2 for unlabled set
awf_data2 = np.load (HOME+'/datasets/awf2.npz', allow_pickle=True)
train_x_unlabeled = awf_data2['data']
train_y_unlabeled = awf_data2['labels']
train_x_unlabeled = reshape_and_scale(train_x_unlabeled)
X, y = shuffle(train_x, train_y)
print(X.shape)
print(y.shape)
print("Setup the input pipeline...")
with tf.name_scope("InputPipeline"):
train_x_labeled, train_y_labeled = [], []
for i in range(args.num_classes):
print(i)
train_x_labeled.append(X[y == i][:args.num_labeled_examples])
train_y_labeled.append(y[y == i][:args.num_labeled_examples])
train_x_labeled_data = np.concatenate(train_x_labeled)
train_y_labeled_data = np.concatenate(train_y_labeled)
train_x_unlabeled_data = train_x_unlabeled#np.concatenate(train_x_unlabeled)
train_y_unlabeled_data = train_y_unlabeled#np.concatenate(train_y_unlabeled)
train_x_unlabeled2, train_y_unlabeled2 = shuffle(train_x_unlabeled, train_y_unlabeled)
train_x_unlabeled2_data = train_x_unlabeled2#np.concatenate(train_x_unlabeled2)
train_y_unlabeled2_data = train_y_unlabeled2#np.concatenate(train_y_unlabeled2)
labeled_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
labeled_y = tf.placeholder(tf.int64, shape=[None])
unlabeled_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
unlabeled_y = tf.placeholder(tf.int64, shape=[None])
unlabeled_X2 = tf.placeholder(tf.float32, shape=[None, dim, 1])
unlabeled_y2 = tf.placeholder(tf.int64, shape=[None])
test_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
test_y = tf.placeholder(tf.int64, shape=[None])
train_labeled_dataset = tf.data.Dataset.from_tensor_slices((labeled_X, labeled_y)) \
.shuffle(buffer_size=len(train_x_labeled_data)) \
.repeat()
train_labeled_dataset = train_labeled_dataset.batch(args.batch_size)
iterator_labeled = train_labeled_dataset.make_initializable_iterator()
traces_lab, labels_lab = iterator_labeled.get_next()
train_unlabeled_dataset = tf.data.Dataset.from_tensor_slices(
(unlabeled_X, unlabeled_y, unlabeled_X2, unlabeled_y2)) \
.shuffle(buffer_size=len(train_x_labeled_data)) \
.repeat()
train_unlabeled_dataset = train_unlabeled_dataset.batch(args.batch_size)
iterator_unlabeled = train_unlabeled_dataset.make_initializable_iterator()
traces_unl, labels_unl, traces_unl2, labels_unl2 = iterator_unlabeled.get_next()
test_dataset = tf.data.Dataset.from_tensor_slices((test_X, test_y)) \
.repeat()
test_dataset = test_dataset.batch(args.batch_size)
iterator_test = test_dataset.make_initializable_iterator()
traces_test, labels_test = iterator_test.get_next()
with tf.name_scope("BatchSize"):
batch_size_tensor = tf.shape(traces_lab)[0]
z, z_perturbed = define_noise(batch_size_tensor,args)
with tf.name_scope("Generator"):
g_model = define_generator()
traces_fake = g_model(z)
traces_fake_perturbed = g_model(z_perturbed)
with tf.name_scope("Discriminator") as discriminator_scope:
d_model = Discriminator()
logits_fake, features_fake = d_model(traces_fake, training=True)
logits_fake_perturbed, _ = d_model(traces_fake_perturbed, training=True)
logits_real_unl, features_real_unl = d_model(traces_unl, training=True)
logits_real_lab, features_real_lab = d_model(traces_lab, training=True) # 1) For supervised loss
logits_train, _ = d_model(traces_lab, training=False)
with tf.name_scope("DiscriminatorLoss"):
loss_supervised = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_lab, logits=logits_real_lab))
logits_sum_real = tf.reduce_logsumexp(logits_real_unl, axis=1)
logits_sum_fake = tf.reduce_logsumexp(logits_fake, axis=1)
loss_unsupervised = 0.5 * (
tf.negative(tf.reduce_mean(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_fake)))
loss_d = loss_supervised + loss_unsupervised
if args.man_reg:
loss_d += 1e-3 * tf.nn.l2_loss(logits_fake - logits_fake_perturbed) \
/ tf.to_float(batch_size_tensor)
with tf.name_scope("Train") as train_scope:
optimizer = tf.train.AdamOptimizer(args.lr * 0.25)
optimize_d = optimizer.minimize(loss_d, var_list=d_model.trainable_variables)
train_accuracy_op = accuracy(logits_train, labels_lab)
with tf.name_scope(discriminator_scope):
with tf.control_dependencies([optimize_d]):
logits_fake, features_fake = d_model(traces_fake, training=True)
logits_real_unl, features_real_unl = d_model(traces_unl2, training=True)
with tf.name_scope("GeneratorLoss"):
feature_mean_real = tf.reduce_mean(features_real_unl, axis=0)
feature_mean_fake = tf.reduce_mean(features_fake, axis=0)
# L1 distance of features is the loss for the generator
loss_g = tf.reduce_mean(tf.abs(feature_mean_real - feature_mean_fake))
with tf.name_scope(train_scope):
optimizer = tf.train.AdamOptimizer(args.lr, beta1=0.5)
train_op = optimizer.minimize(loss_g, var_list=g_model.trainable_variables)
with tf.name_scope(discriminator_scope):
with tf.name_scope("Test"):
logits_test, _ = d_model(traces_test, training=False)
test_accuracy_op = accuracy(logits_test, labels_test)
with tf.name_scope("Summaries"):
summary_op = tf.summary.merge([
tf.summary.scalar("LossDiscriminator", loss_d),
tf.summary.scalar("LossGenerator", loss_g),
tf.summary.scalar("ClassificationAccuracyTrain", train_accuracy_op),
tf.summary.scalar("ClassificationAccuracyTest", test_accuracy_op)])
writer = tf.summary.FileWriter(_next_logdir("tensorboard/wfi5_cw"))
print("Run training...")
steps_per_epoch = (len(train_x_labeled_data) + len(
train_x_unlabeled_data)) // args.batch_size
steps_per_test = test_x_data.shape[0] // args.batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(args.train_epochs):
losses_d, losses_g, accuracies = [], [], []
print("Epoch {}".format(epoch))
pbar = tqdm.trange(steps_per_epoch)
sess.run(iterator_labeled.initializer,
feed_dict={labeled_X: train_x_labeled_data, labeled_y: train_y_labeled_data})
sess.run(iterator_unlabeled.initializer,
feed_dict={unlabeled_X: train_x_unlabeled_data, unlabeled_y: train_y_unlabeled_data,
unlabeled_X2: train_x_unlabeled2_data, unlabeled_y2: train_y_unlabeled2_data})
sess.run(iterator_test.initializer, feed_dict={test_X: test_x_data, test_y: test_y_data})
for _ in pbar:
if step % 1000 == 0:
_, loss_g_batch, loss_d_batch, summ, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, summary_op, train_accuracy_op])
writer.add_summary(summ, global_step=step)
else:
_, loss_g_batch, loss_d_batch, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, train_accuracy_op])
pbar.set_description("Discriminator loss {0:.3f}, Generator loss {1:.3f}"
.format(loss_d_batch, loss_g_batch))
losses_d.append(loss_d_batch)
losses_g.append(loss_g_batch)
accuracies.append(accuracy_batch)
step += 1
print("Discriminator loss: {0:.4f}, Generator loss: {1:.4f}, "
"Train accuracy: {2:.4f}"
.format(np.mean(losses_d), np.mean(losses_g), np.mean(accuracies)))
accuracies = [sess.run(test_accuracy_op) for _ in range(steps_per_test)]
if np.mean (accuracies) > best_acc:
best_acc = np.mean (accuracies)
if epoch == (int(args.train_epochs)-1):
print ("Test accuracy: {0:.4f}".format (np.mean (accuracies)))
print ("Best accuracy: {0:.4f}".format (best_acc))
def define_noise(batch_size_tensor, args):
with tf.name_scope("LatentNoiseVector"):
z = tfd.Normal(loc=0.0, scale=args.stddev).sample(
sample_shape=(batch_size_tensor, args.z_dim_size))
z_perturbed = z + tfd.Normal(loc=0.0, scale=args.stddev).sample(
sample_shape=(batch_size_tensor, args.z_dim_size)) * 1e-5
return z, z_perturbed
def split_awf_closed(r_train, r_test, nClass, mon_instance, unmon_instance, dim):
mon_data = np.load(HOME+'/datasets/awf1.npz', allow_pickle=True)
mon_x = mon_data['feature']
## We need to uniformly random selection over each monitored class
print('mon_instance',mon_instance)
print('unmon_instance',unmon_instance)
num_mtrain_instance = mon_instance * (r_train / (r_train + r_test)) ## number of monitored training instances for each class
mon_random = np.array(range(int(mon_instance)))
np.random.shuffle(mon_random)
mon_train_ins = mon_random[:int(num_mtrain_instance)] #1666
mon_test_ins = mon_random[int(num_mtrain_instance):]
print('mon_test_ins', len(mon_test_ins))
# Due to the memory error, initialize np arrays here first
train_feature = np.zeros((nClass*len(mon_train_ins), dim), dtype=int)
test_feature = np.zeros((nClass*len(mon_test_ins),dim), dtype=int)
print('test_feature', len(test_feature))
train_label = np.zeros((nClass*len(mon_train_ins),), dtype=int)
test_label = np.zeros((nClass*len(mon_test_ins),), dtype=int)
print(len(mon_train_ins))
print(len(mon_test_ins))
i = 0
mon_instance = int(mon_instance)
print('Monitored training set partitioning...')
print(nClass)
print(len(mon_train_ins))
for c in range(nClass):
c=int(c)
print(c)
for instance in mon_train_ins:
train_label[i] = c
train_feature[i] = mon_x[(c*mon_instance)+instance][:dim]
i += 1
print(i)
print('Monitored testing set partitioning...')
j = 0
for c in range(nClass):
c = int(c)
for instance in mon_test_ins:
test_label[j]=c
test_feature[j]=mon_x[(c*mon_instance)+instance][:dim]
j += 1
print(j)
print(j)
print('train_feature: ', len(train_feature))
print('train_label: ', len(train_label))
print('test_feature: ', len(test_feature))
print('test_label: ', len(test_label))
print('train_dim: ', len(train_feature[0]))
print('test_dim: ', len(test_feature[0]))
return train_feature, train_label, test_feature, test_label
def _next_logdir(path):
if not os.path.exists(path):
os.makedirs(path)
subdirs = [d for d in os.listdir(path) if opth.isdir(opth.join(path, d))]
logdir = opth.join(path, "run" + str(len(subdirs)).zfill(4))
if not os.path.exists(logdir):
os.makedirs(logdir)
return logdir
if __name__ == "__main__":
parser.add_argument ('--batch_size', required=False, default=32)
parser.add_argument ('--train_epochs', required=False, default=12)
parser.add_argument ('--lr', required=False, default=2e-4)
parser.add_argument ('--stddev', required=False, default=1e-2)
parser.add_argument ('--num_classes', required=False, default=100)
parser.add_argument ('--z_dim_size', required=False, default=100)
parser.add_argument ('--num_labeled_examples', required=False, default=5)
parser.add_argument ('--man_reg', required=False, default=True)
args = parser.parse_args ()
for i in range(5):
main(args)
| 44.868895 | 129 | 0.641744 | [
"MIT"
] | traffic-analysis/gandalf | wfi/cw/wfi-cw5.py | 17,454 | Python |
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import json
import os
_ENV_DIR = '/var/db/factory/umpire'
_CONFIG_PATH = os.path.join(_ENV_DIR, 'active_umpire.json')
def SaveNewActiveConfig(config):
"""Serialize and saves the configuration as new active config file."""
json_config = json.dumps(
config, indent=2, separators=(',', ': '), sort_keys=True) + '\n'
json_name = 'umpire.%s.json' % (
hashlib.md5(json_config.encode('utf-8')).hexdigest())
json_path = os.path.join('resources', json_name)
with open(os.path.join(_ENV_DIR, json_path), 'w') as f:
f.write(json_config)
os.unlink(_CONFIG_PATH)
os.symlink(json_path, _CONFIG_PATH)
def Migrate():
with open('/var/db/factory/umpire/active_umpire.json') as f:
config = json.load(f)
if 'rulesets' in config:
for r in config['rulesets']:
r.pop('match', None)
SaveNewActiveConfig(config)
| 29.285714 | 72 | 0.702439 | [
"BSD-3-Clause"
] | arccode/factory | py/umpire/server/migrations/0010.py | 1,025 | Python |
'''
Created by auto_sdk on 2021.03.10
'''
from dingtalk.api.base import RestApi
class OapiCateringUnfreezeRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.order_id = None
self.rule_code = None
self.userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.catering.unfreeze'
| 21.235294 | 43 | 0.747922 | [
"Apache-2.0"
] | hth945/pytest | other/dingding/dingtalk/api/rest/OapiCateringUnfreezeRequest.py | 361 | Python |
# Autogenerated file.
from .client import MidiOutputClient # type: ignore
| 24.666667 | 51 | 0.797297 | [
"MIT"
] | microsoft/jacdac-python | jacdac/midi_output/__init__.py | 74 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.i18n import _
from nova import image
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
migrate_opt = cfg.IntOpt('migrate_max_retries',
default=-1,
help='Number of times to retry live-migration before failing. '
'If == -1, try until out of hosts. '
'If == 0, only try once, no retries.')
CONF = cfg.CONF
CONF.register_opt(migrate_opt)
class LiveMigrationTask(object):
def __init__(self, context, instance, destination,
block_migration, disk_over_commit):
self.context = context
self.instance = instance
self.destination = destination
self.block_migration = block_migration
self.disk_over_commit = disk_over_commit
self.source = instance.host
self.migrate_data = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
self.scheduler_client = scheduler_client.SchedulerClient()
self.image_api = image.API()
def execute(self):
self._check_instance_is_active()
self._check_host_is_up(self.source)
if not self.destination:
self.destination = self._find_destination()
else:
self._check_requested_destination()
# TODO(johngarbutt) need to move complexity out of compute manager
# TODO(johngarbutt) disk_over_commit?
return self.compute_rpcapi.live_migration(self.context,
host=self.source,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migrate_data=self.migrate_data)
def rollback(self):
# TODO(johngarbutt) need to implement the clean up operation
# but this will make sense only once we pull in the compute
# calls, since this class currently makes no state changes,
# except to call the compute method, that has no matching
# rollback call right now.
raise NotImplementedError()
def _check_instance_is_active(self):
if self.instance.power_state not in (power_state.RUNNING,
power_state.PAUSED):
raise exception.InstanceInvalidState(
instance_uuid = self.instance.uuid,
attr = 'power_state',
state = self.instance.power_state,
method = 'live migrate')
def _check_host_is_up(self, host):
try:
service = objects.Service.get_by_compute_host(self.context, host)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=host)
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host)
def _check_requested_destination(self):
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
self._check_compatible_with_source_hypervisor(self.destination)
self._call_livem_checks_on_host(self.destination)
def _check_destination_is_not_source(self):
if self.destination == self.source:
raise exception.UnableToMigrateToSelf(
instance_id=self.instance.uuid, host=self.destination)
def _check_destination_has_enough_memory(self):
avail = self._get_compute_info(self.destination)['free_ram_mb']
mem_inst = self.instance.memory_mb
if not mem_inst or avail <= mem_inst:
instance_uuid = self.instance.uuid
dest = self.destination
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationPreCheckError(reason=reason % dict(
instance_uuid=instance_uuid, dest=dest, avail=avail,
mem_inst=mem_inst))
def _get_compute_info(self, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, host)
def _check_compatible_with_source_hypervisor(self, destination):
source_info = self._get_compute_info(self.source)
destination_info = self._get_compute_info(destination)
source_type = source_info['hypervisor_type']
destination_type = destination_info['hypervisor_type']
if source_type != destination_type:
raise exception.InvalidHypervisorType()
source_version = source_info['hypervisor_version']
destination_version = destination_info['hypervisor_version']
if source_version > destination_version:
raise exception.DestinationHypervisorTooOld()
def _call_livem_checks_on_host(self, destination):
self.migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(self.context, self.instance,
destination, self.block_migration, self.disk_over_commit)
def _find_destination(self):
# TODO(johngarbutt) this retry loop should be shared
attempted_hosts = [self.source]
image = utils.get_image_from_system_metadata(
self.instance.system_metadata)
request_spec = scheduler_utils.build_request_spec(self.context, image,
[self.instance])
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)
filter_properties = {'ignore_hosts': attempted_hosts}
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_properties)
host = self.scheduler_client.select_destinations(self.context,
request_spec, filter_properties)[0]['host']
try:
self._check_compatible_with_source_hypervisor(host)
self._call_livem_checks_on_host(host)
except exception.Invalid as e:
LOG.debug("Skipping host: %(host)s because: %(e)s",
{"host": host, "e": e})
attempted_hosts.append(host)
host = None
return host
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:
return
retries = len(attempted_hosts) - 1
if retries > CONF.migrate_max_retries:
msg = (_('Exceeded max scheduling retries %(max_retries)d for '
'instance %(instance_uuid)s during live migration')
% {'max_retries': retries,
'instance_uuid': self.instance.uuid})
raise exception.NoValidHost(reason=msg)
def execute(context, instance, destination,
block_migration, disk_over_commit):
task = LiveMigrationTask(context, instance,
destination,
block_migration,
disk_over_commit)
# TODO(johngarbutt) create a superclass that contains a safe_execute call
return task.execute()
| 41.948454 | 78 | 0.652249 | [
"Apache-2.0"
] | alvarolopez/nova | nova/conductor/tasks/live_migrate.py | 8,138 | Python |
# Problem: N soldiers are standing in a circle and
# first person has sword and he kills the 2nd person
# and gives the sword to the third person and so on
# till 99th person kills the 100th person gives the
# sword back to the first person, this goes on till
# only one person survives. Print the survivor.
def josephus(people, step=2):
if step<=1:
print("Enter step value, greater than 1")
else:
step -= 1 # translated to zero-based indexing
kill = step # kill will hold the index of current person to die
while(len(people) > 1):
print(people.pop(kill)) # pop method removes the element from the list
kill = (kill + step) % len(people)
print(people[0], "is safe")
num = int(input("Enter the number of soldiers: "))
soldiers = [i for i in range(1, num+1)] # generates a list of 1..num
josephus(soldiers)
| 37.608696 | 76 | 0.678613 | [
"MIT"
] | GarvitArya/Python-Interview-Problems-for-Practice | josephus.py | 865 | Python |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class BiEncoder(nn.Layer):
def __init__(self,question_encoder,context_encoder,dropout,output_emb_size = 768,state=None):
super(BiEncoder, self).__init__()
self.state = state
if self.state == None:
self.question_encoder = question_encoder
self.context_encoder = context_encoder
elif self.state == "FORQUESTION":
self.question_encoder = question_encoder
elif self.state == "FORCONTEXT":
self.context_encoder = context_encoder
self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=0.02))
self.emb_reduce_linear = paddle.nn.Linear(
768, output_emb_size, weight_attr=weight_attr)
def get_question_pooled_embedding(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.question_encoder(input_ids, token_type_ids, position_ids,attention_mask)
"""cls_embedding = self.emb_reduce_linear(cls_embedding)
cls_embedding = self.dropout(cls_embedding)
cls_embedding = F.normalize(cls_embedding, p=2, axis=-1)"""
return cls_embedding
def get_context_pooled_embedding(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.context_encoder(input_ids, token_type_ids, position_ids,attention_mask)
"""cls_embedding = self.emb_reduce_linear(cls_embedding)
cls_embedding = self.dropout(cls_embedding)
cls_embedding = F.normalize(cls_embedding, p=2, axis=-1)"""
return cls_embedding
def forward(self,
question_id,
question_segments,
question_attn_mask,
context_ids,
context_segments,
context_attn_mask,
):
question_pooled_out = self.get_question_pooled_embedding(question_id,question_segments,question_attn_mask)
context_pooled_out = self.get_context_pooled_embedding(context_ids,context_segments,context_attn_mask)
return question_pooled_out,context_pooled_out
class BiEncoderNllLoss(object):
def calc(self,
q_vectors,
ctx_vectors,
positive_idx_per_question,
loss_scale=None):
scorces = paddle.matmul(q_vectors,paddle.transpose(ctx_vectors,[0,1]))
if len(q_vectors.size()) > 1:
q_num = q_vectors.size(0)
scores = scorces.view(q_num, -1)
softmax_scorces = F.log_softmax(scores,axis=1)
loss = F.nll_loss(softmax_scorces,paddle.to_tensor(positive_idx_per_question))
max_score = paddle.max(softmax_scorces,axis=1)
correct_predictions_count = (None)
if loss_scale:
loss.mul_(loss_scale)
return loss,correct_predictions_count | 37.988235 | 114 | 0.633013 | [
"Apache-2.0"
] | Elvisambition/PaddleNLP | examples/semantic_indexing/biencoder_base_model.py | 3,229 | Python |
import sys
sys.path.append('../scripts')
from detect_duplicates import df
def test_nan_names():
assert df.name.isnull().sum() == 0
def test_dup_pid():
assert df.patient_id.duplicated().sum() == 0
def test_phone_dup():
assert df.phone_number.duplicated().sum() == 0 | 23.583333 | 50 | 0.689046 | [
"MIT"
] | monkeyusage/duplicates | tests/test.py | 283 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.SettlementbillOpenApiDTO import SettlementbillOpenApiDTO
class AlipayBossFncSettleSettlementbillCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncSettleSettlementbillCreateResponse, self).__init__()
self._result_set = None
@property
def result_set(self):
return self._result_set
@result_set.setter
def result_set(self, value):
if isinstance(value, SettlementbillOpenApiDTO):
self._result_set = value
else:
self._result_set = SettlementbillOpenApiDTO.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayBossFncSettleSettlementbillCreateResponse, self).parse_response_content(response_content)
if 'result_set' in response:
self.result_set = response['result_set']
| 33.466667 | 120 | 0.74004 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/response/AlipayBossFncSettleSettlementbillCreateResponse.py | 1,004 | Python |
#!/usr/bin/python
import argparse
import subprocess
import json
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true")
args = parser.parse_args()
result = {"_meta": {"hostvars": {}}}
if args.list:
output = subprocess.check_output([
"cd ../terraform/stage; terraform show -json"
], shell=True)
data = json.loads(output)
group_list = set()
try:
for module in data["values"]["root_module"]["child_modules"]:
try:
for resource in module["resources"]:
if resource["type"] == "null_resource":
continue
group_name = resource["name"]
values = resource["values"]
host_name = values["name"]
ip = values["network_interface"][0]["nat_ip_address"]
if group_name not in result:
result[group_name] = {"hosts": []}
group_list.add(group_name)
result[group_name]["hosts"].append(host_name)
result["_meta"]["hostvars"][host_name] = {
"ansible_host": ip
}
except KeyError:
continue
result["all"] = {"children": list(group_list), "hosts": [], "vars": {}}
except KeyError:
pass
print(json.dumps(result))
else:
print(json.dumps(result))
| 28.94 | 79 | 0.520387 | [
"MIT"
] | Otus-DevOps-2020-08/ValeriyTyutyunnik_infra | ansible/environments/stage/dynamic_inventory.py | 1,447 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceAssociationLinksOperations(object):
"""ServiceAssociationLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceAssociationLinksListResult"
"""Gets a list of service association links for a subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceAssociationLinksListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ServiceAssociationLinksListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceAssociationLinksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ServiceAssociationLinks'} # type: ignore
| 46.899083 | 223 | 0.689358 | [
"MIT"
] | 4thel00z/microsoft-crap-that-doesnt-work | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_service_association_links_operations.py | 5,112 | Python |
from .util import rms
from .util import moving_average
from .util import moving_avg_conv
from .util import moving_avg_fft
from .util import normalize
from .util import next_pow2
from .util import top_and_tail
from .util import extrapolate
from .util import nearest
from .util import deprecated
from .util import apply_along_axis
from .util import sigmoid
from .util import power
from .util import root
| 26.8 | 34 | 0.825871 | [
"Apache-2.0"
] | EvanBianco/bruges | bruges/util/__init__.py | 402 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.securitycenter_v1beta1.proto import (
asset_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
finding_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
organization_settings_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
security_marks_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
source_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto",
package="google.cloud.securitycenter.v1beta1",
syntax="proto3",
serialized_pb=_b(
'\nFgoogle/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto\x12#google.cloud.securitycenter.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x35google/cloud/securitycenter_v1beta1/proto/asset.proto\x1a\x37google/cloud/securitycenter_v1beta1/proto/finding.proto\x1a\x45google/cloud/securitycenter_v1beta1/proto/organization_settings.proto\x1a>google/cloud/securitycenter_v1beta1/proto/security_marks.proto\x1a\x36google/cloud/securitycenter_v1beta1/proto/source.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto"y\n\x14\x43reateFindingRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\nfinding_id\x18\x02 \x01(\t\x12=\n\x07\x66inding\x18\x03 \x01(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding"b\n\x13\x43reateSourceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12;\n\x06source\x18\x02 \x01(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source".\n\x1eGetOrganizationSettingsRequest\x12\x0c\n\x04name\x18\x01 \x01(\t" \n\x10GetSourceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xd1\x01\n\x12GroupAssetsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08group_by\x18\x03 \x01(\t\x12\x33\n\x10\x63ompare_duration\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12-\n\tread_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\npage_token\x18\x07 \x01(\t\x12\x11\n\tpage_size\x18\x08 \x01(\x05"\xa9\x01\n\x13GroupAssetsResponse\x12J\n\x10group_by_results\x18\x01 \x03(\x0b\x32\x30.google.cloud.securitycenter.v1beta1.GroupResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x9e\x01\n\x14GroupFindingsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08group_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\npage_token\x18\x05 \x01(\t\x12\x11\n\tpage_size\x18\x06 \x01(\x05"\xab\x01\n\x15GroupFindingsResponse\x12J\n\x10group_by_results\x18\x01 \x03(\x0b\x32\x30.google.cloud.securitycenter.v1beta1.GroupResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\xbd\x01\n\x0bGroupResult\x12T\n\nproperties\x18\x01 \x03(\x0b\x32@.google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry\x12\r\n\x05\x63ount\x18\x02 \x01(\x03\x1aI\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01"K\n\x12ListSourcesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x07 \x01(\x05"l\n\x13ListSourcesResponse\x12<\n\x07sources\x18\x01 \x03(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x80\x02\n\x11ListAssetsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x33\n\x10\x63ompare_duration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\nfield_mask\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x12\n\npage_token\x18\x08 \x01(\t\x12\x11\n\tpage_size\x18\t \x01(\x05"\xd6\x03\n\x12ListAssetsResponse\x12\x65\n\x13list_assets_results\x18\x01 \x03(\x0b\x32H.google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\x12\x12\n\ntotal_size\x18\x04 \x01(\x05\x1a\xfc\x01\n\x10ListAssetsResult\x12\x39\n\x05\x61sset\x18\x01 \x01(\x0b\x32*.google.cloud.securitycenter.v1beta1.Asset\x12]\n\x05state\x18\x02 \x01(\x0e\x32N.google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.State"N\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06UNUSED\x10\x01\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x02\x12\x0b\n\x07REMOVED\x10\x03\x12\n\n\x06\x41\x43TIVE\x10\x04"\xcd\x01\n\x13ListFindingsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nfield_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x12\n\npage_token\x18\x06 \x01(\t\x12\x11\n\tpage_size\x18\x07 \x01(\x05"\xb2\x01\n\x14ListFindingsResponse\x12>\n\x08\x66indings\x18\x01 \x03(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\x12\x12\n\ntotal_size\x18\x04 \x01(\x05"\x99\x01\n\x16SetFindingStateRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x05state\x18\x02 \x01(\x0e\x32\x32.google.cloud.securitycenter.v1beta1.Finding.State\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"*\n\x18RunAssetDiscoveryRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t"\x86\x01\n\x14UpdateFindingRequest\x12=\n\x07\x66inding\x18\x01 \x01(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xae\x01\n!UpdateOrganizationSettingsRequest\x12X\n\x15organization_settings\x18\x01 \x01(\x0b\x32\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x83\x01\n\x13UpdateSourceRequest\x12;\n\x06source\x18\x01 \x01(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xc9\x01\n\x1aUpdateSecurityMarksRequest\x12J\n\x0esecurity_marks\x18\x01 \x01(\x0b\x32\x32.google.cloud.securitycenter.v1beta1.SecurityMarks\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\x80\x1c\n\x0eSecurityCenter\x12\xb0\x01\n\x0c\x43reateSource\x12\x38.google.cloud.securitycenter.v1beta1.CreateSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"9\x82\xd3\xe4\x93\x02\x33")/v1beta1/{parent=organizations/*}/sources:\x06source\x12\xbf\x01\n\rCreateFinding\x12\x39.google.cloud.securitycenter.v1beta1.CreateFindingRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"E\x82\xd3\xe4\x93\x02?"4/v1beta1/{parent=organizations/*/sources/*}/findings:\x07\x66inding\x12\x90\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"E\x82\xd3\xe4\x93\x02?":/v1beta1/{resource=organizations/*/sources/*}:getIamPolicy:\x01*\x12\xd7\x01\n\x17GetOrganizationSettings\x12\x43.google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings"<\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta1/{name=organizations/*/organizationSettings}\x12\xa2\x01\n\tGetSource\x12\x35.google.cloud.securitycenter.v1beta1.GetSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"1\x82\xd3\xe4\x93\x02+\x12)/v1beta1/{name=organizations/*/sources/*}\x12\xbb\x01\n\x0bGroupAssets\x12\x37.google.cloud.securitycenter.v1beta1.GroupAssetsRequest\x1a\x38.google.cloud.securitycenter.v1beta1.GroupAssetsResponse"9\x82\xd3\xe4\x93\x02\x33"./v1beta1/{parent=organizations/*}/assets:group:\x01*\x12\xcd\x01\n\rGroupFindings\x12\x39.google.cloud.securitycenter.v1beta1.GroupFindingsRequest\x1a:.google.cloud.securitycenter.v1beta1.GroupFindingsResponse"E\x82\xd3\xe4\x93\x02?":/v1beta1/{parent=organizations/*/sources/*}/findings:group:\x01*\x12\xaf\x01\n\nListAssets\x12\x36.google.cloud.securitycenter.v1beta1.ListAssetsRequest\x1a\x37.google.cloud.securitycenter.v1beta1.ListAssetsResponse"0\x82\xd3\xe4\x93\x02*\x12(/v1beta1/{parent=organizations/*}/assets\x12\xc1\x01\n\x0cListFindings\x12\x38.google.cloud.securitycenter.v1beta1.ListFindingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.ListFindingsResponse"<\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta1/{parent=organizations/*/sources/*}/findings\x12\xb3\x01\n\x0bListSources\x12\x37.google.cloud.securitycenter.v1beta1.ListSourcesRequest\x1a\x38.google.cloud.securitycenter.v1beta1.ListSourcesResponse"1\x82\xd3\xe4\x93\x02+\x12)/v1beta1/{parent=organizations/*}/sources\x12\xb3\x01\n\x11RunAssetDiscovery\x12=.google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest\x1a\x1d.google.longrunning.Operation"@\x82\xd3\xe4\x93\x02:"5/v1beta1/{parent=organizations/*}/assets:runDiscovery:\x01*\x12\xc6\x01\n\x0fSetFindingState\x12;.google.cloud.securitycenter.v1beta1.SetFindingStateRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"H\x82\xd3\xe4\x93\x02\x42"=/v1beta1/{name=organizations/*/sources/*/findings/*}:setState:\x01*\x12\x90\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"E\x82\xd3\xe4\x93\x02?":/v1beta1/{resource=organizations/*/sources/*}:setIamPolicy:\x01*\x12\xb6\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"K\x82\xd3\xe4\x93\x02\x45"@/v1beta1/{resource=organizations/*/sources/*}:testIamPermissions:\x01*\x12\xc7\x01\n\rUpdateFinding\x12\x39.google.cloud.securitycenter.v1beta1.UpdateFindingRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"M\x82\xd3\xe4\x93\x02G2</v1beta1/{finding.name=organizations/*/sources/*/findings/*}:\x07\x66inding\x12\x8a\x02\n\x1aUpdateOrganizationSettings\x12\x46.google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings"i\x82\xd3\xe4\x93\x02\x63\x32J/v1beta1/{organization_settings.name=organizations/*/organizationSettings}:\x15organization_settings\x12\xb7\x01\n\x0cUpdateSource\x12\x38.google.cloud.securitycenter.v1beta1.UpdateSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"@\x82\xd3\xe4\x93\x02:20/v1beta1/{source.name=organizations/*/sources/*}:\x06source\x12\xd0\x02\n\x13UpdateSecurityMarks\x12?.google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest\x1a\x32.google.cloud.securitycenter.v1beta1.SecurityMarks"\xc3\x01\x82\xd3\xe4\x93\x02\xbc\x01\x32\x45/v1beta1/{security_marks.name=organizations/*/assets/*/securityMarks}:\x0esecurity_marksZc2Q/v1beta1/{security_marks.name=organizations/*/sources/*/findings/*/securityMarks}:\x0esecurity_marksB~\n\'com.google.cloud.securitycenter.v1beta1P\x01ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenterb\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2.DESCRIPTOR,
google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR,
google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR,
google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,
google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.State",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STATE_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="UNUSED", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADDED", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REMOVED", index=3, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ACTIVE", index=4, number=4, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2754,
serialized_end=2832,
)
_sym_db.RegisterEnumDescriptor(_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE)
_CREATEFINDINGREQUEST = _descriptor.Descriptor(
name="CreateFindingRequest",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="finding_id",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.finding_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="finding",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.finding",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=699,
serialized_end=820,
)
_CREATESOURCEREQUEST = _descriptor.Descriptor(
name="CreateSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="source",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest.source",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=822,
serialized_end=920,
)
_GETORGANIZATIONSETTINGSREQUEST = _descriptor.Descriptor(
name="GetOrganizationSettingsRequest",
full_name="google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=922,
serialized_end=968,
)
_GETSOURCEREQUEST = _descriptor.Descriptor(
name="GetSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.GetSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.GetSourceRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=970,
serialized_end=1002,
)
_GROUPASSETSREQUEST = _descriptor.Descriptor(
name="GroupAssetsRequest",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_by",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.group_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="compare_duration",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.compare_duration",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.read_time",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.page_token",
index=5,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.page_size",
index=6,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1005,
serialized_end=1214,
)
_GROUPASSETSRESPONSE = _descriptor.Descriptor(
name="GroupAssetsResponse",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="group_by_results",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.group_by_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1217,
serialized_end=1386,
)
_GROUPFINDINGSREQUEST = _descriptor.Descriptor(
name="GroupFindingsRequest",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_by",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.group_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.page_token",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.page_size",
index=5,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1389,
serialized_end=1547,
)
_GROUPFINDINGSRESPONSE = _descriptor.Descriptor(
name="GroupFindingsResponse",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="group_by_results",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.group_by_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1550,
serialized_end=1721,
)
_GROUPRESULT_PROPERTIESENTRY = _descriptor.Descriptor(
name="PropertiesEntry",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1840,
serialized_end=1913,
)
_GROUPRESULT = _descriptor.Descriptor(
name="GroupResult",
full_name="google.cloud.securitycenter.v1beta1.GroupResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="properties",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.properties",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="count",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.count",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_GROUPRESULT_PROPERTIESENTRY],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1724,
serialized_end=1913,
)
_LISTSOURCESREQUEST = _descriptor.Descriptor(
name="ListSourcesRequest",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.page_size",
index=2,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1915,
serialized_end=1990,
)
_LISTSOURCESRESPONSE = _descriptor.Descriptor(
name="ListSourcesResponse",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="sources",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse.sources",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1992,
serialized_end=2100,
)
_LISTASSETSREQUEST = _descriptor.Descriptor(
name="ListAssetsRequest",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order_by",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.order_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="compare_duration",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.compare_duration",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.field_mask",
index=5,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.page_token",
index=6,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.page_size",
index=7,
number=9,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2103,
serialized_end=2359,
)
_LISTASSETSRESPONSE_LISTASSETSRESULT = _descriptor.Descriptor(
name="ListAssetsResult",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="asset",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.asset",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.state",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2580,
serialized_end=2832,
)
_LISTASSETSRESPONSE = _descriptor.Descriptor(
name="ListAssetsResponse",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="list_assets_results",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.list_assets_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="total_size",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.total_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_LISTASSETSRESPONSE_LISTASSETSRESULT],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2362,
serialized_end=2832,
)
_LISTFINDINGSREQUEST = _descriptor.Descriptor(
name="ListFindingsRequest",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order_by",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.order_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.field_mask",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.page_token",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.page_size",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2835,
serialized_end=3040,
)
_LISTFINDINGSRESPONSE = _descriptor.Descriptor(
name="ListFindingsResponse",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="findings",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.findings",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="total_size",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.total_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3043,
serialized_end=3221,
)
_SETFINDINGSTATEREQUEST = _descriptor.Descriptor(
name="SetFindingStateRequest",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.state",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3224,
serialized_end=3377,
)
_RUNASSETDISCOVERYREQUEST = _descriptor.Descriptor(
name="RunAssetDiscoveryRequest",
full_name="google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3379,
serialized_end=3421,
)
_UPDATEFINDINGREQUEST = _descriptor.Descriptor(
name="UpdateFindingRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="finding",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest.finding",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3424,
serialized_end=3558,
)
_UPDATEORGANIZATIONSETTINGSREQUEST = _descriptor.Descriptor(
name="UpdateOrganizationSettingsRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="organization_settings",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest.organization_settings",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3561,
serialized_end=3735,
)
_UPDATESOURCEREQUEST = _descriptor.Descriptor(
name="UpdateSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest.source",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3738,
serialized_end=3869,
)
_UPDATESECURITYMARKSREQUEST = _descriptor.Descriptor(
name="UpdateSecurityMarksRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="security_marks",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.security_marks",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3872,
serialized_end=4073,
)
_CREATEFINDINGREQUEST.fields_by_name[
"finding"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_CREATESOURCEREQUEST.fields_by_name[
"source"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_GROUPASSETSREQUEST.fields_by_name[
"compare_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GROUPASSETSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPASSETSRESPONSE.fields_by_name["group_by_results"].message_type = _GROUPRESULT
_GROUPASSETSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPFINDINGSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPFINDINGSRESPONSE.fields_by_name["group_by_results"].message_type = _GROUPRESULT
_GROUPFINDINGSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPRESULT_PROPERTIESENTRY.fields_by_name[
"value"
].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_GROUPRESULT_PROPERTIESENTRY.containing_type = _GROUPRESULT
_GROUPRESULT.fields_by_name["properties"].message_type = _GROUPRESULT_PROPERTIESENTRY
_LISTSOURCESRESPONSE.fields_by_name[
"sources"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_LISTASSETSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTASSETSREQUEST.fields_by_name[
"compare_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_LISTASSETSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTASSETSRESPONSE_LISTASSETSRESULT.fields_by_name[
"asset"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2._ASSET
)
_LISTASSETSRESPONSE_LISTASSETSRESULT.fields_by_name[
"state"
].enum_type = _LISTASSETSRESPONSE_LISTASSETSRESULT_STATE
_LISTASSETSRESPONSE_LISTASSETSRESULT.containing_type = _LISTASSETSRESPONSE
_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE.containing_type = (
_LISTASSETSRESPONSE_LISTASSETSRESULT
)
_LISTASSETSRESPONSE.fields_by_name[
"list_assets_results"
].message_type = _LISTASSETSRESPONSE_LISTASSETSRESULT
_LISTASSETSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTFINDINGSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTFINDINGSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTFINDINGSRESPONSE.fields_by_name[
"findings"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_LISTFINDINGSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SETFINDINGSTATEREQUEST.fields_by_name[
"state"
].enum_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING_STATE
)
_SETFINDINGSTATEREQUEST.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATEFINDINGREQUEST.fields_by_name[
"finding"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_UPDATEFINDINGREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATEORGANIZATIONSETTINGSREQUEST.fields_by_name[
"organization_settings"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS
)
_UPDATEORGANIZATIONSETTINGSREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESOURCEREQUEST.fields_by_name[
"source"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_UPDATESOURCEREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"security_marks"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2._SECURITYMARKS
)
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["CreateFindingRequest"] = _CREATEFINDINGREQUEST
DESCRIPTOR.message_types_by_name["CreateSourceRequest"] = _CREATESOURCEREQUEST
DESCRIPTOR.message_types_by_name[
"GetOrganizationSettingsRequest"
] = _GETORGANIZATIONSETTINGSREQUEST
DESCRIPTOR.message_types_by_name["GetSourceRequest"] = _GETSOURCEREQUEST
DESCRIPTOR.message_types_by_name["GroupAssetsRequest"] = _GROUPASSETSREQUEST
DESCRIPTOR.message_types_by_name["GroupAssetsResponse"] = _GROUPASSETSRESPONSE
DESCRIPTOR.message_types_by_name["GroupFindingsRequest"] = _GROUPFINDINGSREQUEST
DESCRIPTOR.message_types_by_name["GroupFindingsResponse"] = _GROUPFINDINGSRESPONSE
DESCRIPTOR.message_types_by_name["GroupResult"] = _GROUPRESULT
DESCRIPTOR.message_types_by_name["ListSourcesRequest"] = _LISTSOURCESREQUEST
DESCRIPTOR.message_types_by_name["ListSourcesResponse"] = _LISTSOURCESRESPONSE
DESCRIPTOR.message_types_by_name["ListAssetsRequest"] = _LISTASSETSREQUEST
DESCRIPTOR.message_types_by_name["ListAssetsResponse"] = _LISTASSETSRESPONSE
DESCRIPTOR.message_types_by_name["ListFindingsRequest"] = _LISTFINDINGSREQUEST
DESCRIPTOR.message_types_by_name["ListFindingsResponse"] = _LISTFINDINGSRESPONSE
DESCRIPTOR.message_types_by_name["SetFindingStateRequest"] = _SETFINDINGSTATEREQUEST
DESCRIPTOR.message_types_by_name["RunAssetDiscoveryRequest"] = _RUNASSETDISCOVERYREQUEST
DESCRIPTOR.message_types_by_name["UpdateFindingRequest"] = _UPDATEFINDINGREQUEST
DESCRIPTOR.message_types_by_name[
"UpdateOrganizationSettingsRequest"
] = _UPDATEORGANIZATIONSETTINGSREQUEST
DESCRIPTOR.message_types_by_name["UpdateSourceRequest"] = _UPDATESOURCEREQUEST
DESCRIPTOR.message_types_by_name[
"UpdateSecurityMarksRequest"
] = _UPDATESECURITYMARKSREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateFindingRequest = _reflection.GeneratedProtocolMessageType(
"CreateFindingRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATEFINDINGREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for creating a finding.
Attributes:
parent:
Resource name of the new finding's parent. Its format should
be "organizations/[organization\_id]/sources/[source\_id]".
finding_id:
Unique identifier provided by the client within the parent
scope. It must be alphanumeric and less than or equal to 32
characters and greater than 0 characters in length.
finding:
The Finding being created. The name and security\_marks will
be ignored as they are both output only fields on this
resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.CreateFindingRequest)
),
)
_sym_db.RegisterMessage(CreateFindingRequest)
CreateSourceRequest = _reflection.GeneratedProtocolMessageType(
"CreateSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATESOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for creating a source.
Attributes:
parent:
Resource name of the new source's parent. Its format should be
"organizations/[organization\_id]".
source:
The Source being created, only the display\_name and
description will be used. All other fields will be ignored.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.CreateSourceRequest)
),
)
_sym_db.RegisterMessage(CreateSourceRequest)
GetOrganizationSettingsRequest = _reflection.GeneratedProtocolMessageType(
"GetOrganizationSettingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETORGANIZATIONSETTINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for getting organization settings.
Attributes:
name:
Name of the organization to get organization settings for. Its
format is
"organizations/[organization\_id]/organizationSettings".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest)
),
)
_sym_db.RegisterMessage(GetOrganizationSettingsRequest)
GetSourceRequest = _reflection.GeneratedProtocolMessageType(
"GetSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETSOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for getting a source.
Attributes:
name:
Relative resource name of the source. Its format is
"organizations/[organization\_id]/source/[source\_id]".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GetSourceRequest)
),
)
_sym_db.RegisterMessage(GetSourceRequest)
GroupAssetsRequest = _reflection.GeneratedProtocolMessageType(
"GroupAssetsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPASSETSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for grouping by assets.
Attributes:
parent:
Name of the organization to groupBy. Its format is
"organizations/[organization\_id]".
filter:
Expression that defines the filter to apply across assets. The
expression is a list of zero or more restrictions combined via
logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. The fields map to those defined in the Asset
resource. Examples include: - name -
security\_center\_properties.resource\_name -
resource\_properties.a\_property -
security\_marks.marks.marka The supported operators are: -
``=`` for all value types. - ``>``, ``<``, ``>=``, ``<=`` for
integer values. - ``:``, meaning substring matching, for
strings. The supported value types are: - string literals
in quotes. - integer literals without quotes. - boolean
literals ``true`` and ``false`` without quotes. For example,
``resource_properties.size = 100`` is a valid filter string.
group_by:
Expression that defines what assets fields to use for
grouping. The string value should follow SQL syntax: comma
separated list of fields. For example: "security\_center\_prop
erties.resource\_project,security\_center\_properties.project"
. The following fields are supported when compare\_duration
is not set: - security\_center\_properties.resource\_project
- security\_center\_properties.resource\_type -
security\_center\_properties.resource\_parent The following
fields are supported when compare\_duration is set: -
security\_center\_properties.resource\_type
compare_duration:
When compare\_duration is set, the Asset's "state" property is
updated to indicate whether the asset was added, removed, or
remained present during the compare\_duration period of time
that precedes the read\_time. This is the time between
(read\_time - compare\_duration) and read\_time. The state
value is derived based on the presence of the asset at the two
points in time. Intermediate state changes between the two
times don't affect the result. For example, the results aren't
affected if the asset is removed and re-created again.
Possible "state" values when compare\_duration is specified:
- "ADDED": indicates that the asset was not present before
compare\_duration, but present at reference\_time. -
"REMOVED": indicates that the asset was present at the start
of compare\_duration, but not present at reference\_time. -
"ACTIVE": indicates that the asset was present at both the
start and the end of the time period defined by
compare\_duration and reference\_time. This field is
ignored if ``state`` is not a field in ``group_by``.
read_time:
Time used as a reference point when filtering assets. The
filter is limited to assets existing at the supplied time and
their values are those at that specific time. Absence of this
field will default to the API's version of NOW.
page_token:
The value returned by the last ``GroupAssetsResponse``;
indicates that this is a continuation of a prior
``GroupAssets`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupAssetsRequest)
),
)
_sym_db.RegisterMessage(GroupAssetsRequest)
GroupAssetsResponse = _reflection.GeneratedProtocolMessageType(
"GroupAssetsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPASSETSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for grouping by assets.
Attributes:
group_by_results:
Group results. There exists an element for each existing
unique combination of property/values. The element contains a
count for the number of times those specific property/values
appear.
read_time:
Time used for executing the groupBy request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupAssetsResponse)
),
)
_sym_db.RegisterMessage(GroupAssetsResponse)
GroupFindingsRequest = _reflection.GeneratedProtocolMessageType(
"GroupFindingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPFINDINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for grouping by findings.
Attributes:
parent:
Name of the source to groupBy. Its format is
"organizations/[organization\_id]/sources/[source\_id]". To
groupBy across all sources provide a source\_id of ``-``. For
example: organizations/123/sources/-
filter:
Expression that defines the filter to apply across findings.
The expression is a list of one or more restrictions combined
via logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. Examples include: - name -
source\_properties.a\_property - security\_marks.marks.marka
The supported operators are: - ``=`` for all value types. -
``>``, ``<``, ``>=``, ``<=`` for integer values. - ``:``,
meaning substring matching, for strings. The supported value
types are: - string literals in quotes. - integer literals
without quotes. - boolean literals ``true`` and ``false``
without quotes. For example, ``source_properties.size = 100``
is a valid filter string.
group_by:
Expression that defines what assets fields to use for grouping
(including ``state``). The string value should follow SQL
syntax: comma separated list of fields. For example:
"parent,resource\_name". The following fields are supported:
- resource\_name - category - state - parent
read_time:
Time used as a reference point when filtering findings. The
filter is limited to findings existing at the supplied time
and their values are those at that specific time. Absence of
this field will default to the API's version of NOW.
page_token:
The value returned by the last ``GroupFindingsResponse``;
indicates that this is a continuation of a prior
``GroupFindings`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupFindingsRequest)
),
)
_sym_db.RegisterMessage(GroupFindingsRequest)
GroupFindingsResponse = _reflection.GeneratedProtocolMessageType(
"GroupFindingsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPFINDINGSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for group by findings.
Attributes:
group_by_results:
Group results. There exists an element for each existing
unique combination of property/values. The element contains a
count for the number of times those specific property/values
appear.
read_time:
Time used for executing the groupBy request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupFindingsResponse)
),
)
_sym_db.RegisterMessage(GroupFindingsResponse)
GroupResult = _reflection.GeneratedProtocolMessageType(
"GroupResult",
(_message.Message,),
dict(
PropertiesEntry=_reflection.GeneratedProtocolMessageType(
"PropertiesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPRESULT_PROPERTIESENTRY,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry)
),
),
DESCRIPTOR=_GROUPRESULT,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Result containing the properties and count of a groupBy request.
Attributes:
properties:
Properties matching the groupBy fields in the request.
count:
Total count of resources for the given properties.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupResult)
),
)
_sym_db.RegisterMessage(GroupResult)
_sym_db.RegisterMessage(GroupResult.PropertiesEntry)
ListSourcesRequest = _reflection.GeneratedProtocolMessageType(
"ListSourcesRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSOURCESREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing sources.
Attributes:
parent:
Resource name of the parent of sources to list. Its format
should be "organizations/[organization\_id]".
page_token:
The value returned by the last ``ListSourcesResponse``;
indicates that this is a continuation of a prior
``ListSources`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListSourcesRequest)
),
)
_sym_db.RegisterMessage(ListSourcesRequest)
ListSourcesResponse = _reflection.GeneratedProtocolMessageType(
"ListSourcesResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSOURCESRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing sources.
Attributes:
sources:
Sources belonging to the requested parent.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListSourcesResponse)
),
)
_sym_db.RegisterMessage(ListSourcesResponse)
ListAssetsRequest = _reflection.GeneratedProtocolMessageType(
"ListAssetsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTASSETSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing assets.
Attributes:
parent:
Name of the organization assets should belong to. Its format
is "organizations/[organization\_id]".
filter:
Expression that defines the filter to apply across assets. The
expression is a list of zero or more restrictions combined via
logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. The fields map to those defined in the Asset
resource. Examples include: - name -
security\_center\_properties.resource\_name -
resource\_properties.a\_property -
security\_marks.marks.marka The supported operators are: -
``=`` for all value types. - ``>``, ``<``, ``>=``, ``<=`` for
integer values. - ``:``, meaning substring matching, for
strings. The supported value types are: - string literals
in quotes. - integer literals without quotes. - boolean
literals ``true`` and ``false`` without quotes. For example,
``resource_properties.size = 100`` is a valid filter string.
order_by:
Expression that defines what fields and order to use for
sorting. The string value should follow SQL syntax: comma
separated list of fields. For example:
"name,resource\_properties.a\_property". The default sorting
order is ascending. To specify descending order for a field, a
suffix " desc" should be appended to the field name. For
example: "name desc,resource\_properties.a\_property".
Redundant space characters in the syntax are insignificant.
"name desc,resource\_properties.a\_property" and " name desc ,
resource\_properties.a\_property " are equivalent.
read_time:
Time used as a reference point when filtering assets. The
filter is limited to assets existing at the supplied time and
their values are those at that specific time. Absence of this
field will default to the API's version of NOW.
compare_duration:
When compare\_duration is set, the ListAssetResult's "state"
attribute is updated to indicate whether the asset was added,
removed, or remained present during the compare\_duration
period of time that precedes the read\_time. This is the time
between (read\_time - compare\_duration) and read\_time. The
state value is derived based on the presence of the asset at
the two points in time. Intermediate state changes between the
two times don't affect the result. For example, the results
aren't affected if the asset is removed and re-created again.
Possible "state" values when compare\_duration is specified:
- "ADDED": indicates that the asset was not present before
compare\_duration, but present at read\_time. - "REMOVED":
indicates that the asset was present at the start of
compare\_duration, but not present at read\_time. - "ACTIVE":
indicates that the asset was present at both the start and
the end of the time period defined by compare\_duration and
read\_time. If compare\_duration is not specified, then the
only possible state is "UNUSED", which indicates that the
asset is present at read\_time.
field_mask:
Optional. A field mask to specify the ListAssetsResult fields
to be listed in the response. An empty field mask will list
all fields.
page_token:
The value returned by the last ``ListAssetsResponse``;
indicates that this is a continuation of a prior
``ListAssets`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsRequest)
),
)
_sym_db.RegisterMessage(ListAssetsRequest)
ListAssetsResponse = _reflection.GeneratedProtocolMessageType(
"ListAssetsResponse",
(_message.Message,),
dict(
ListAssetsResult=_reflection.GeneratedProtocolMessageType(
"ListAssetsResult",
(_message.Message,),
dict(
DESCRIPTOR=_LISTASSETSRESPONSE_LISTASSETSRESULT,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Result containing the Asset and its State.
Attributes:
asset:
Asset matching the search request.
state:
State of the asset.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult)
),
),
DESCRIPTOR=_LISTASSETSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing assets.
Attributes:
list_assets_results:
Assets matching the list request.
read_time:
Time used for executing the list request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
total_size:
The total number of assets matching the query.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsResponse)
),
)
_sym_db.RegisterMessage(ListAssetsResponse)
_sym_db.RegisterMessage(ListAssetsResponse.ListAssetsResult)
ListFindingsRequest = _reflection.GeneratedProtocolMessageType(
"ListFindingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTFINDINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing findings.
Attributes:
parent:
Name of the source the findings belong to. Its format is
"organizations/[organization\_id]/sources/[source\_id]". To
list across all sources provide a source\_id of ``-``. For
example: organizations/123/sources/-
filter:
Expression that defines the filter to apply across findings.
The expression is a list of one or more restrictions combined
via logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. Examples include: - name -
source\_properties.a\_property - security\_marks.marks.marka
The supported operators are: - ``=`` for all value types. -
``>``, ``<``, ``>=``, ``<=`` for integer values. - ``:``,
meaning substring matching, for strings. The supported value
types are: - string literals in quotes. - integer literals
without quotes. - boolean literals ``true`` and ``false``
without quotes. For example, ``source_properties.size = 100``
is a valid filter string.
order_by:
Expression that defines what fields and order to use for
sorting. The string value should follow SQL syntax: comma
separated list of fields. For example:
"name,resource\_properties.a\_property". The default sorting
order is ascending. To specify descending order for a field, a
suffix " desc" should be appended to the field name. For
example: "name desc,source\_properties.a\_property". Redundant
space characters in the syntax are insignificant. "name
desc,source\_properties.a\_property" and " name desc ,
source\_properties.a\_property " are equivalent.
read_time:
Time used as a reference point when filtering findings. The
filter is limited to findings existing at the supplied time
and their values are those at that specific time. Absence of
this field will default to the API's version of NOW.
field_mask:
Optional. A field mask to specify the Finding fields to be
listed in the response. An empty field mask will list all
fields.
page_token:
The value returned by the last ``ListFindingsResponse``;
indicates that this is a continuation of a prior
``ListFindings`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListFindingsRequest)
),
)
_sym_db.RegisterMessage(ListFindingsRequest)
ListFindingsResponse = _reflection.GeneratedProtocolMessageType(
"ListFindingsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTFINDINGSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing findings.
Attributes:
findings:
Findings matching the list request.
read_time:
Time used for executing the list request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
total_size:
The total number of findings matching the query.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListFindingsResponse)
),
)
_sym_db.RegisterMessage(ListFindingsResponse)
SetFindingStateRequest = _reflection.GeneratedProtocolMessageType(
"SetFindingStateRequest",
(_message.Message,),
dict(
DESCRIPTOR=_SETFINDINGSTATEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a finding's state.
Attributes:
name:
The relative resource name of the finding. See: https://cloud.
google.com/apis/design/resource\_names#relative\_resource\_nam
e Example: "organizations/123/sources/456/finding/789".
state:
The desired State of the finding.
start_time:
The time at which the updated state takes effect.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.SetFindingStateRequest)
),
)
_sym_db.RegisterMessage(SetFindingStateRequest)
RunAssetDiscoveryRequest = _reflection.GeneratedProtocolMessageType(
"RunAssetDiscoveryRequest",
(_message.Message,),
dict(
DESCRIPTOR=_RUNASSETDISCOVERYREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for running asset discovery for an organization.
Attributes:
parent:
Name of the organization to run asset discovery for. Its
format is "organizations/[organization\_id]".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest)
),
)
_sym_db.RegisterMessage(RunAssetDiscoveryRequest)
UpdateFindingRequest = _reflection.GeneratedProtocolMessageType(
"UpdateFindingRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATEFINDINGREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating or creating a finding.
Attributes:
finding:
The finding resource to update or create if it does not
already exist. parent, security\_marks, and update\_time will
be ignored. In the case of creation, the finding id portion
of the name must alphanumeric and less than or equal to 32
characters and greater than 0 characters in length.
update_mask:
The FieldMask to use when updating the finding resource. This
field is ignored if the finding does not already exist and the
finding is created.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateFindingRequest)
),
)
_sym_db.RegisterMessage(UpdateFindingRequest)
UpdateOrganizationSettingsRequest = _reflection.GeneratedProtocolMessageType(
"UpdateOrganizationSettingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATEORGANIZATIONSETTINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating an organization's settings.
Attributes:
organization_settings:
The organization settings resource to update.
update_mask:
The FieldMask to use when updating the settings resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest)
),
)
_sym_db.RegisterMessage(UpdateOrganizationSettingsRequest)
UpdateSourceRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a source.
Attributes:
source:
The source resource to update.
update_mask:
The FieldMask to use when updating the source resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateSourceRequest)
),
)
_sym_db.RegisterMessage(UpdateSourceRequest)
UpdateSecurityMarksRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSecurityMarksRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESECURITYMARKSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a SecurityMarks resource.
Attributes:
security_marks:
The security marks resource to update.
update_mask:
The FieldMask to use when updating the security marks
resource.
start_time:
The time at which the updated SecurityMarks take effect.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest)
),
)
_sym_db.RegisterMessage(UpdateSecurityMarksRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
"\n'com.google.cloud.securitycenter.v1beta1P\001ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenter"
),
)
_GROUPRESULT_PROPERTIESENTRY.has_options = True
_GROUPRESULT_PROPERTIESENTRY._options = _descriptor._ParseOptions(
descriptor_pb2.MessageOptions(), _b("8\001")
)
_SECURITYCENTER = _descriptor.ServiceDescriptor(
name="SecurityCenter",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter",
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=4076,
serialized_end=7660,
methods=[
_descriptor.MethodDescriptor(
name="CreateSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.CreateSource",
index=0,
containing_service=None,
input_type=_CREATESOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\0023")/v1beta1/{parent=organizations/*}/sources:\006source'
),
),
),
_descriptor.MethodDescriptor(
name="CreateFinding",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.CreateFinding",
index=1,
containing_service=None,
input_type=_CREATEFINDINGREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?"4/v1beta1/{parent=organizations/*/sources/*}/findings:\007finding'
),
),
),
_descriptor.MethodDescriptor(
name="GetIamPolicy",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetIamPolicy",
index=2,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST,
output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{resource=organizations/*/sources/*}:getIamPolicy:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="GetOrganizationSettings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetOrganizationSettings",
index=3,
containing_service=None,
input_type=_GETORGANIZATIONSETTINGSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\0026\0224/v1beta1/{name=organizations/*/organizationSettings}"
),
),
),
_descriptor.MethodDescriptor(
name="GetSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetSource",
index=4,
containing_service=None,
input_type=_GETSOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002+\022)/v1beta1/{name=organizations/*/sources/*}"
),
),
),
_descriptor.MethodDescriptor(
name="GroupAssets",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GroupAssets",
index=5,
containing_service=None,
input_type=_GROUPASSETSREQUEST,
output_type=_GROUPASSETSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\0023"./v1beta1/{parent=organizations/*}/assets:group:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="GroupFindings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GroupFindings",
index=6,
containing_service=None,
input_type=_GROUPFINDINGSREQUEST,
output_type=_GROUPFINDINGSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{parent=organizations/*/sources/*}/findings:group:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="ListAssets",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListAssets",
index=7,
containing_service=None,
input_type=_LISTASSETSREQUEST,
output_type=_LISTASSETSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002*\022(/v1beta1/{parent=organizations/*}/assets"
),
),
),
_descriptor.MethodDescriptor(
name="ListFindings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListFindings",
index=8,
containing_service=None,
input_type=_LISTFINDINGSREQUEST,
output_type=_LISTFINDINGSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\0026\0224/v1beta1/{parent=organizations/*/sources/*}/findings"
),
),
),
_descriptor.MethodDescriptor(
name="ListSources",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListSources",
index=9,
containing_service=None,
input_type=_LISTSOURCESREQUEST,
output_type=_LISTSOURCESRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002+\022)/v1beta1/{parent=organizations/*}/sources"
),
),
),
_descriptor.MethodDescriptor(
name="RunAssetDiscovery",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.RunAssetDiscovery",
index=10,
containing_service=None,
input_type=_RUNASSETDISCOVERYREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002:"5/v1beta1/{parent=organizations/*}/assets:runDiscovery:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="SetFindingState",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.SetFindingState",
index=11,
containing_service=None,
input_type=_SETFINDINGSTATEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002B"=/v1beta1/{name=organizations/*/sources/*/findings/*}:setState:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="SetIamPolicy",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.SetIamPolicy",
index=12,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST,
output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{resource=organizations/*/sources/*}:setIamPolicy:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="TestIamPermissions",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.TestIamPermissions",
index=13,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST,
output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002E"@/v1beta1/{resource=organizations/*/sources/*}:testIamPermissions:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="UpdateFinding",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateFinding",
index=14,
containing_service=None,
input_type=_UPDATEFINDINGREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002G2</v1beta1/{finding.name=organizations/*/sources/*/findings/*}:\007finding"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateOrganizationSettings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateOrganizationSettings",
index=15,
containing_service=None,
input_type=_UPDATEORGANIZATIONSETTINGSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002c2J/v1beta1/{organization_settings.name=organizations/*/organizationSettings}:\025organization_settings"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateSource",
index=16,
containing_service=None,
input_type=_UPDATESOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002:20/v1beta1/{source.name=organizations/*/sources/*}:\006source"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateSecurityMarks",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateSecurityMarks",
index=17,
containing_service=None,
input_type=_UPDATESECURITYMARKSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2._SECURITYMARKS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002\274\0012E/v1beta1/{security_marks.name=organizations/*/assets/*/securityMarks}:\016security_marksZc2Q/v1beta1/{security_marks.name=organizations/*/sources/*/findings/*/securityMarks}:\016security_marks"
),
),
),
],
)
_sym_db.RegisterServiceDescriptor(_SECURITYCENTER)
DESCRIPTOR.services_by_name["SecurityCenter"] = _SECURITYCENTER
# @@protoc_insertion_point(module_scope)
| 38.48368 | 10,592 | 0.64356 | [
"Apache-2.0"
] | Abd-Elrazek/google-cloud-python | securitycenter/google/cloud/securitycenter_v1beta1/proto/securitycenter_service_pb2.py | 116,721 | Python |