content
stringlengths 1
1.02M
⌀ |
---|
#!/usr/bin/env bash
ls *.log | xargs grep "\[test\]" | sed -e ''s/":INFO:"/" "/g'' | grep "AUC-PR" | awk '{ print $4 "\t" $1 }' |
#!/bin/sh
if [ ! -f docker-compose.yml ]; then
touch docker-compose.yml
fi
cat >| docker-compose.yml << EOF
version: "3.7"
services:
nginx:
image: appsmith/appsmith-editor
env_file: ./docker.env
ports:
- "80:80"
- "443:443"
volumes:
- ./data/nginx/app.conf.template:/nginx.conf.template
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
command: "/bin/sh -c 'while :; do sleep 6h & wait \$\${!}; nginx -s reload; done & /start-nginx.sh'"
depends_on:
- appsmith-internal-server
networks:
- appsmith
certbot:
image: certbot/certbot
volumes:
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait \$\${!}; done;'"
networks:
- appsmith
appsmith-internal-server:
image: appsmith/appsmith-server:latest
env_file:
- ./docker.env
- ./encryption.env
expose:
- "8080"
links:
- mongo
depends_on:
- mongo
networks:
- appsmith
mongo:
image: mongo
expose:
- "27017"
environment:
- MONGO_INITDB_DATABASE=appsmith
- MONGO_INITDB_ROOT_USERNAME=$mongo_root_user
- MONGO_INITDB_ROOT_PASSWORD=$mongo_root_password
volumes:
- ./data/mongo/db:/data/db
- ./data/mongo/init.js:/docker-entrypoint-initdb.d/init.js:ro
networks:
- appsmith
redis:
image: redis
expose:
- "6379"
networks:
- appsmith
networks:
appsmith:
driver: bridge
EOF
|
rm -R crypto-config/
cryptogen generate --config=crypto-config.yaml
rm -R config/
configtxgen -profile ehrOrgOrdererGenesis -outputBlock ./config/genesis.block -channelID ehrchannel
configtxgen -profile ehrOrgChannel -outputCreateChannelTx ./config/ehrchannel.tx -channelID ehrchannel
|
#!/usr/bin/env bash
set -e
aws --profile muumuus s3 sync --delete _site/ s3://muumu.us
|
# http://fastlane.tools
# fastlane.sh
function setup-fastlane() {
gem install fastlane -NV
}
if [ -x "$(command -v fastlane)" ]; then
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
fi
|
function setup_using_base_dir() {
# Declare all variables local not no mess with outside env in any way
local fzf_base
local fzf_shell
local fzfdirs
local dir
test -d "${FZF_BASE}" && fzf_base="${FZF_BASE}"
if [[ -z "${fzf_base}" ]]; then
fzfdirs=(
"${HOME}/.fzf"
"/usr/local/opt/fzf"
"/usr/share/fzf"
)
for dir in ${fzfdirs}; do
if [[ -d "${dir}" ]]; then
fzf_base="${dir}"
break
fi
done
if [[ -z "${fzf_base}" ]]; then
if (( ${+commands[brew]} )) && dir="$(brew --prefix fzf 2>/dev/null)"; then
if [[ -d "${dir}" ]]; then
fzf_base="${dir}"
fi
fi
fi
fi
if [[ -d "${fzf_base}" ]]; then
# Fix fzf shell directory for Archlinux package
if [[ ! -d "${fzf_base}/shell" ]] && [[ -f /etc/arch-release ]]; then
fzf_shell="${fzf_base}"
else
fzf_shell="${fzf_base}/shell"
fi
# Setup fzf binary path
if ! (( ${+commands[fzf]} )) && [[ ! "$PATH" == *$fzf_base/bin* ]]; then
export PATH="$PATH:$fzf_base/bin"
fi
# Auto-completion
if [[ ! "$DISABLE_FZF_AUTO_COMPLETION" == "true" ]]; then
[[ $- == *i* ]] && source "${fzf_shell}/completion.zsh" 2> /dev/null
fi
# Key bindings
if [[ ! "$DISABLE_FZF_KEY_BINDINGS" == "true" ]]; then
source "${fzf_shell}/key-bindings.zsh"
fi
else
return 1
fi
}
function setup_using_debian_package() {
(( $+command[dpkg] )) && dpkg -s fzf &> /dev/null
if (( $? )); then
# Either not a debian based distro, or no fzf installed. In any case skip ahead
return 1
fi
# NOTE: There is no need to configure PATH for debian package, all binaries
# are installed to /usr/bin by default
local completions="/usr/share/zsh/vendor-completions/_fzf"
local key_bindings="/usr/share/doc/fzf/examples/key-bindings.zsh"
# Auto-completion
if [[ $- == *i* ]] && [[ ! "$DISABLE_FZF_AUTO_COMPLETION" == "true" ]]; then
source $completions 2> /dev/null
fi
# Key bindings
if [[ ! "$DISABLE_FZF_KEY_BINDINGS" == "true" ]]; then
source $key_bindings
fi
return 0
}
function indicate_error() {
print "[oh-my-zsh] fzf plugin: Cannot find fzf installation directory.\n"\
"Please add \`export FZF_BASE=/path/to/fzf/install/dir\` to your .zshrc" >&2
}
# Check for debian package first, because it easy to short cut
# Indicate to user that fzf installation not found if nothing worked
setup_using_debian_package || setup_using_base_dir || indicate_error
unset -f setup_using_debian_package setup_using_base_dir indicate_error
|
#!/bin/bash
# tdnn_lstm_1l is same as tdnn_lstm_1b, but with the per-frame dropout
# added with location 4 in LSTM layer, see paper:
# http://www.danielpovey.com/files/2017_interspeech_dropout.pdf
# ./local/chain/compare_wer_general.sh tdnn_lstm_1b_ld5_sp tdnn_lstm_1l_ld5_sp
# System tdnn_lstm_1b_ld5_sp tdnn_lstm_1l_ld5_sp
# WER on train_dev(tg) 13.06 12.41
# WER on train_dev(fg) 12.13 11.59
# WER on eval2000(tg) 15.1 14.8
# WER on eval2000(fg) 13.9 13.5
# Final train prob -0.047 -0.069
# Final valid prob -0.093 -0.095
# Final train prob (xent) -0.735 -0.913
# Final valid prob (xent) -1.0151 -1.0820
# exp/chain/tdnn_lstm_1b_ld5_sp: num-iters=327 nj=3..16 num-params=39.6M dim=40+100->6074 combine=-0.062->-0.061 xent:train/valid[217,326,final]=(-0.877,-0.741,-0.735/-1.08,-1.02,-1.02) logprob:train/valid[217,326,final]=(-0.063,-0.048,-0.047/-0.095,-0.093,-0.093)
# exp/chain/tdnn_lstm_1l_ld5_sp: num-iters=327 nj=3..16 num-params=39.6M dim=40+100->6074 combine=-0.088->-0.084 xent:train/valid[217,326,final]=(-3.32,-0.961,-0.913/-3.40,-1.13,-1.08) logprob:train/valid[217,326,final]=(-0.176,-0.072,-0.069/-0.198,-0.097,-0.095)
set -e
# configs for 'chain'
stage=12
train_stage=-10
get_egs_stage=-10
speed_perturb=true
dir=exp/chain/tdnn_lstm_1l # Note: _sp will get added to this if $speed_perturb == true.
decode_iter=
decode_dir_affix=
# training options
leftmost_questions_truncate=-1
chunk_width=150
chunk_left_context=40
chunk_right_context=0
xent_regularize=0.025
label_delay=5
dropout_schedule='0,[email protected],[email protected],0'
# decode options
extra_left_context=50
extra_right_context=0
frames_per_chunk=
remove_egs=false
common_egs_dir=
affix=
# End configuration section.
echo "$0 $@" # Print the command line for logging
. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
# The iVector-extraction and feature-dumping parts are the same as the standard
# nnet3 setup, and you can skip them by setting "--stage 8" if you have already
# run those things.
suffix=
if [ "$speed_perturb" == "true" ]; then
suffix=_sp
fi
dir=$dir${affix:+_$affix}
if [ $label_delay -gt 0 ]; then dir=${dir}_ld$label_delay; fi
dir=${dir}$suffix
train_set=train_nodup$suffix
ali_dir=exp/tri4_ali_nodup$suffix
treedir=exp/chain/tri5_7d_tree$suffix
lang=data/lang_chain_2y
# if we are using the speed-perturbed data we need to generate
# alignments for it.
local/nnet3/run_ivector_common.sh --stage $stage \
--speed-perturb $speed_perturb \
--generate-alignments $speed_perturb || exit 1;
if [ $stage -le 9 ]; then
# Get the alignments as lattices (gives the CTC training more freedom).
# use the same num-jobs as the alignments
nj=$(cat exp/tri4_ali_nodup$suffix/num_jobs) || exit 1;
steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \
data/lang exp/tri4 exp/tri4_lats_nodup$suffix
rm exp/tri4_lats_nodup$suffix/fsts.*.gz # save space
fi
if [ $stage -le 10 ]; then
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
rm -rf $lang
cp -r data/lang $lang
silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
fi
if [ $stage -le 11 ]; then
# Build a tree using our new topology.
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
--leftmost-questions-truncate $leftmost_questions_truncate \
--context-opts "--context-width=2 --central-position=1" \
--cmd "$train_cmd" 7000 data/$train_set $lang $ali_dir $treedir
fi
if [ $stage -le 12 ]; then
echo "$0: creating neural net configs using the xconfig parser";
num_targets=$(tree-info $treedir/tree |grep num-pdfs|awk '{print $2}')
learning_rate_factor=$(echo "print (0.5/$xent_regularize)" | python)
mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input
# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
# the first splicing is moved before the lda layer, so no splicing here
relu-renorm-layer name=tdnn1 dim=1024
relu-renorm-layer name=tdnn2 input=Append(-1,0,1) dim=1024
relu-renorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024
# check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults
lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 dropout-per-frame=true
relu-renorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024
relu-renorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024
lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 dropout-per-frame=true
relu-renorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024
relu-renorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024
lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 dropout-per-frame=true
## adding the layers for chain branch
output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5
# adding the layers for xent branch
# This block prints the configs for a separate output that will be
# trained with a cross-entropy objective in the 'chain' models... this
# has the effect of regularizing the hidden parts of the model. we use
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
# 0.5 / args.xent_regularize is suitable as it means the xent
# final-layer learns at a rate independent of the regularization
# constant; and the 0.5 was tuned so as to make the relative progress
# similar in the xent and regular final layers.
output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi
if [ $stage -le 13 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/swbd-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage
fi
steps/nnet3/chain/train.py --stage $train_stage \
--cmd "$decode_cmd" \
--feat.online-ivector-dir exp/nnet3/ivectors_${train_set} \
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \
--chain.xent-regularize $xent_regularize \
--chain.leaky-hmm-coefficient 0.1 \
--chain.l2-regularize 0.00005 \
--chain.apply-deriv-weights false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--trainer.num-chunk-per-minibatch 64 \
--trainer.frames-per-iter 1200000 \
--trainer.max-param-change 2.0 \
--trainer.num-epochs 4 \
--trainer.optimization.shrink-value 0.99 \
--trainer.optimization.num-jobs-initial 3 \
--trainer.optimization.num-jobs-final 16 \
--trainer.optimization.initial-effective-lrate 0.001 \
--trainer.optimization.final-effective-lrate 0.0001 \
--trainer.optimization.momentum 0.0 \
--trainer.deriv-truncate-margin 8 \
--egs.stage $get_egs_stage \
--egs.opts "--frames-overlap-per-eg 0" \
--egs.chunk-width $chunk_width \
--egs.chunk-left-context $chunk_left_context \
--egs.chunk-right-context $chunk_right_context \
--trainer.dropout-schedule $dropout_schedule \
--egs.dir "$common_egs_dir" \
--cleanup.remove-egs $remove_egs \
--feat-dir data/${train_set}_hires \
--tree-dir $treedir \
--lat-dir exp/tri4_lats_nodup$suffix \
--dir $dir || exit 1;
fi
if [ $stage -le 14 ]; then
# Note: it might appear that this $lang directory is mismatched, and it is as
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from
# the lang directory.
utils/mkgraph.sh --self-loop-scale 1.0 data/lang_sw1_tg $dir $dir/graph_sw1_tg
fi
decode_suff=sw1_tg
graph_dir=$dir/graph_sw1_tg
if [ $stage -le 15 ]; then
[ -z $extra_left_context ] && extra_left_context=$chunk_left_context;
[ -z $extra_right_context ] && extra_right_context=$chunk_right_context;
[ -z $frames_per_chunk ] && frames_per_chunk=$chunk_width;
iter_opts=
if [ ! -z $decode_iter ]; then
iter_opts=" --iter $decode_iter "
fi
for decode_set in train_dev eval2000; do
(
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
--nj 50 --cmd "$decode_cmd" $iter_opts \
--extra-left-context $extra_left_context \
--extra-right-context $extra_right_context \
--frames-per-chunk "$frames_per_chunk" \
--online-ivector-dir exp/nnet3/ivectors_${decode_set} \
$graph_dir data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_${decode_suff} || exit 1;
if $has_fisher; then
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_sw1_{tg,fsh_fg} || exit 1;
fi
) &
done
fi
wait;
exit 0;
|
#!/bin/bash
# Trained weights: resnet18_resa_culane_20211016.pt
exp_name=resnet18_resa_culane
url=tcp://localhost:12345
# Training
python -m torch.distributed.launch --nproc_per_node=4 --use_env main_landec.py --epochs=12 --lr=0.06 --warmup-steps=500 --batch-size=5 --workers=4 --dataset=culane --method=resa --backbone=resnet18 --world-size=4 --dist-url=${url} --exp-name=${exp_name}
# Predicting lane points for testing
python main_landec.py --state=2 --batch-size=20 --continue-from=${exp_name}.pt --dataset=culane --method=resa --backbone=resnet18 --exp-name=${exp_name}
# Testing with official scripts
./autotest_culane.sh ${exp_name} test
|
#! /bin/bash
done=false
host=127.0.0.1
port=8080
serviceName=$1
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
main() {
declare -a validate
while [[ "$done" = false ]]; do
healthPath="actuator/health"
urlCheck=http://${host}:${port}/${serviceName}/${healthPath}
if [[ "$serviceName" = "zuul" ]]; then
urlCheck=http://${host}:${port}/${healthPath}
fi
local statusCode=$(checkStatus $urlCheck)
if [ "${statusCode}" == "200" ]; then
done=true
else
done=false
fi
if [[ "$done" = true ]]; then
echo "${serviceName} connected"
break;
fi
#curl -q http://${1?}:8080/health >& /dev/null && curl -q http://${1?}:8081/health >& /dev/null && curl -q http://${1?}:8082/health >& /dev/null
echo -n .
sleep 5
local statusContainer=$(checkContainerIsRunning $serviceName)
if [ "${statusContainer}" == "0" ]; then
echo "${serviceName} is stopped"
break;
fi
done
}
checkContainerIsRunning() {
cd $DIR/../docker
if [ -z `docker ps -q --no-trunc | grep $(docker-compose ps -q ${1})` ]; then
echo 0
else
echo 1
fi
cd $DIR
}
checkStatus() {
local status="$(curl -Is ${1} | head -1)"
declare -a validate
for i in ${status[@]}
do
validate+=($i)
done
echo ${validate[1]}
}
main |
#! /bin/bash
# Script For Building Android arm64 Kernel
#
# Copyright (c) 2018-2020 Panchajanya1999 <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#Kernel building script
# Function to show an informational message
msg() {
echo -e "\e[1;32m$*\e[0m"
}
err() {
echo -e "\e[1;41m$*\e[0m"
exit 1
}
##------------------------------------------------------##
##----------Basic Informations, COMPULSORY--------------##
# The defult directory where the kernel should be placed
KERNEL_DIR="$(pwd)"
# The name of the Kernel, to name the ZIP
ZIPNAME="Tea-Kernel"
# The name of the device for which the kernel is built
MODEL="xiaomi redmi note 7"
# The codename of the device
DEVICE="lavender"
# The defconfig which should be used. Get it from config.gz from
# your device or check source
DEFCONFIG=tea-old_defconfig
# Specify compiler.
# 'clang' or 'gcc'
COMPILER=clang
# Clean source prior building. 1 is NO(default) | 0 is YES
INCREMENTAL=1
# Push ZIP to Telegram. 1 is YES | 0 is NO(default)
PTTG=1
if [ $PTTG = 1 ]
then
# Set Telegram Chat ID
CHATID="-1001171905830"
fi
# Generate a full DEFCONFIG prior building. 1 is YES | 0 is NO(default)
DEF_REG=0
# Build dtbo.img (select this only if your source has support to building dtbo.img)
# 1 is YES | 0 is NO(default)
BUILD_DTBO=0
# Sign the zipfile
# 1 is YES | 0 is NO
SIGN=0
# Silence the compilation
# 1 is YES(default) | 0 is NO
SILENCE=0
# Debug purpose. Send logs on every successfull builds
# 1 is YES | 0 is NO(default)
LOG_DEBUG=0
##------------------------------------------------------##
##---------Do Not Touch Anything Beyond This------------##
# Check if we are using a dedicated CI ( Continuous Integration ), and
# set KBUILD_BUILD_VERSION and KBUILD_BUILD_HOST and CI_BRANCH
## Set defaults first
DISTRO=$(cat /etc/issue)
KBUILD_BUILD_HOST=Arch
CI_BRANCH=$(git rev-parse --abbrev-ref HEAD)
token=$TELEGRAM_TOKEN
export KBUILD_BUILD_HOST CI_BRANCH
## Check for CI
if [ -n "$CI" ]
then
if [ -n "$CIRCLECI" ]
then
export KBUILD_BUILD_VERSION=$CIRCLE_BUILD_NUM
export KBUILD_BUILD_HOST="Vcyzteen"
export CI_BRANCH=$CIRCLE_BRANCH
fi
if [ -n "$DRONE" ]
then
export KBUILD_BUILD_VERSION=$DRONE_BUILD_NUMBER
export KBUILD_BUILD_HOST=Vcyzteen
export CI_BRANCH=$DRONE_BRANCH
else
echo "Not presetting Build Version"
fi
fi
#Check Kernel Version
KERVER=$(make kernelversion)
# Set a commit head
COMMIT_HEAD=$(git log --oneline -1)
# Set Date
DATE=$(TZ=Asia/Jakarta date +"%Y%m%d-%T")
#Now Its time for other stuffs like cloning, exporting, etc
clone() {
echo " "
msg "|| Cloning GCC 9.3.0 baremetal ||"
git clone --depth=1 https://github.com/mvaisakh/gcc-arm64 -b gcc-master $KERNEL_DIR/gcc64
git clone --depth=1 https://github.com/mvaisakh/gcc-arm -b gcc-master $KERNEL_DIR/gcc32
git clone --depth=1 https://github.com/kdrag0n/proton-clang -b master $KERNEL_DIR/proton
GCC64_DIR=$KERNEL_DIR/gcc64
GCC32_DIR=$KERNEL_DIR/gcc32
PROTON_DIR=$KERNEL_DIR/proton
msg "|| Cloning Anykernel ||"
git clone --depth 1 --no-single-branch https://github.com/Calliope-K/AnyKernel3 -b master-x00td
cp -af AnyKernel3/anykernel-real.sh AnyKernel3/anykernel.sh
sed -i "s/kernel.string=.*/kernel.string=$ZIPNAME by Tea-Project/g" AnyKernel3/anykernel.sh
}
##------------------------------------------------------##
exports() {
export KBUILD_BUILD_USER="Arch"
export ARCH=arm64
export SUBARCH=arm64
KBUILD_COMPILER_STRING=$("$PROTON_DIR"/bin/clang --version | head -n 1)
PATH=$PROTON_DIR/bin/:$PROTON_DIR/bin/:/usr/bin:$PATH
export PATH KBUILD_COMPILER_STRING
export BOT_MSG_URL="https://api.telegram.org/bot$token/sendMessage"
export BOT_BUILD_URL="https://api.telegram.org/bot$token/sendDocument"
PROCS=$(nproc --all)
export PROCS
}
##---------------------------------------------------------##
tg_post_msg() {
curl -s -X POST "$BOT_MSG_URL" -d chat_id="-1001171905830" \
-d "disable_web_page_preview=true" \
-d "parse_mode=html" \
-d text="$1"
}
##----------------------------------------------------------------##
tg_post_build() {
#Post MD5Checksum alongwith for easeness
MD5CHECK=$(md5sum "$1" | cut -d' ' -f1)
#Show the Checksum alongwith caption
curl --progress-bar -F document=@"$1" "$BOT_BUILD_URL" \
-F chat_id="$2" \
-F "disable_web_page_preview=true" \
-F "parse_mode=html" \
-F caption="$3 | <b>MD5 Checksum : </b><code>$MD5CHECK</code>"
}
##----------------------------------------------------------##
build_kernel() {
if [ $INCREMENTAL = 0 ]
then
msg "|| Cleaning Sources ||"
make clean && make mrproper && rm -rf out
fi
if [ "$PTTG" = 1 ]
then
tg_post_msg "<b>🔨 $KBUILD_BUILD_VERSION CI Build Triggered</b>%0A<b>Docker OS: </b><code>$DISTRO</code>%0A<b>Kernel Version : </b><code>$KERVER</code>%0A<b>Date : </b><code>$(TZ=Asia/Jakarta date)</code>%0A<b>Device : </b><code>$MODEL [$DEVICE]</code>%0A<b>Pipeline Host : </b><code>$KBUILD_BUILD_HOST</code>%0A<b>Host Core Count : </b><code>$PROCS</code>%0A<b>Compiler Used : </b><code>$KBUILD_COMPILER_STRING</code>%0a<b>Branch : </b><code>$CI_BRANCH</code>%0A<b>Top Commit : </b><a href='$DRONE_COMMIT_LINK'><code>$COMMIT_HEAD</code></a>%0A<b>Status : </b>#Nightly" "$CHATID"
fi
make O=out $DEFCONFIG
if [ $DEF_REG = 1 ]
then
cp .config arch/arm64/configs/$DEFCONFIG
git add arch/arm64/configs/$DEFCONFIG
git commit -m "$DEFCONFIG: Regenerate
This is an auto-generated commit"
fi
BUILD_START=$(date +"%s")
if [ $COMPILER = "clang" ]
then
MAKE+=(
ARCH=arm64 \
CC=clang \
AR=llvm-ar \
NM=llvm-nm \
OBJCOPY=llvm-objcopy \
OBJDUMP=llvm-objdump \
STRIP=llvm-strip \
LD=ld.lld \
CROSS_COMPILE=aarch64-linux-gnu- \
CROSS_COMPILE_ARM32=arm-linux-gnueabi-
)
fi
if [ $SILENCE = "1" ]
then
MAKE+=( -s )
fi
msg "|| Started Compilation ||"
export CROSS_COMPILE_ARM32=$PROTON_DIR/bin/arm-linux-gnueabi-
export CROSS_COMPILE=$PROTON_DIR/bin/aarch64-linux-gnu-
make -j"$PROCS" O=out \
ARCH=arm64 \
CC=clang \
AR=llvm-ar \
NM=llvm-nm \
OBJCOPY=llvm-objcopy \
OBJDUMP=llvm-objdump \
STRIP=llvm-strip \
LD=ld.lld \
CROSS_COMPILE=aarch64-linux-gnu- \
CROSS_COMPILE_ARM32=arm-linux-gnueabi-
BUILD_END=$(date +"%s")
DIFF=$((BUILD_END - BUILD_START))
if [ -f "$KERNEL_DIR"/out/arch/arm64/boot/Image.gz-dtb ]
then
msg "|| Kernel successfully compiled ||"
if [ $BUILD_DTBO = 1 ]
then
msg "|| Building DTBO ||"
tg_post_msg "<code>Building DTBO..</code>" "$CHATID"
python2 "$KERNEL_DIR/scripts/ufdt/libufdt/utils/src/mkdtboimg.py" \
create "$KERNEL_DIR/out/arch/arm64/boot/dtbo.img" --page_size=4096 "$KERNEL_DIR/out/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dtbo"
fi
gen_zip
else
if [ "$PTTG" = 1 ]
then
tg_post_msg "<b>❌ Build failed to compile after $((DIFF / 60)) minute(s) and $((DIFF % 60)) seconds</b>" "$CHATID"
fi
fi
}
##--------------------------------------------------------------##
gen_zip() {
msg "|| Zipping into a flashable zip ||"
mv "$KERNEL_DIR"/out/arch/arm64/boot/Image.gz-dtb AnyKernel3/Image.gz-dtb
if [ $BUILD_DTBO = 1 ]
then
mv "$KERNEL_DIR"/out/arch/arm64/boot/dtbo.img AnyKernel3/dtbo.img
fi
cd AnyKernel3 || exit
zip -r9 $ZIPNAME-$DEVICE-"$DATE" * -x .git README.md
## Prepare a final zip variable
ZIP_FINAL="$ZIPNAME-$DEVICE-$DATE.zip"
if [ "$PTTG" = 1 ]
then
tg_post_build "$ZIP_FINAL" "$CHATID" "✅ Build took : $((DIFF / 60)) minute(s) and $((DIFF % 60)) second(s)"
fi
cd ..
}
clone
exports
build_kernel
if [ $LOG_DEBUG = "1" ]
then
tg_post_build "error.log" "$CHATID" "Debug Mode Logs"
fi
##----------------*****-----------------------------##
|
#!/bin/bash
##
# Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
set -e
set -x
if [ "${TEST_TYPE}" = "unit" ]; then
npm run test
if [[ "${TRAVIS_PULL_REQUEST}" = "false" && "${TRAVIS_BRANCH}" != "master" ]]; then
./scripts/run-sauce-tests.sh;
fi
fi
set +x
set +e
|
#!/usr/bin/env bash
# ----------------------------------------------------------------------------
# (C) Copyright IBM Corp. 2021
#
# SPDX-License-Identifier: Apache-2.0
# ----------------------------------------------------------------------------
# export patient with a _type of Patient
# 0 - Set the tenant id and password
export TENANT_ID=''
export PASS=''
# 1 - Create an Export Request
curl -k -u "fhiruser:${PASS}" -H "Content-Type: application/fhir+json" -X GET \
'https://localhost:9443/fhir-server/api/v4/Patient/$export?_outputFormat=application/fhir%2Bndjson&_type=Patient' \
-v --header "X-FHIR-TENANT-ID: ${TENANT_ID}"
# 2 - Get the Content-Location and the Job
# < HTTP/2 202
# < content-location: https://localhost:9443/fhir-server/api/v4/$bulkdata-status?job=eikWD%2BJszJJ_DkN6HWMLYQ
# 3 - Set the Job Id and execute the request
# Repeat until 200(OK)
curl --location --request GET 'https://localhost:9443/fhir-server/api/v4/$bulkdata-status?job=ZxApgK6zl4MyFudmamTsLQ' \
--header 'Content-Type: application/fhir+json' -k \
-u "fhiruser:${PASS}" -v --header "X-FHIR-TENANT-ID: ${TENANT_ID}"
# 4 - Check the file that is output. |
#! /bin/bash
# strict mode
set -euo pipefail
IFS=$'\n\t'
cd ./java/
echo -e "\nRunning FLY Graph tests and code coverage"
mvn -T $(nproc) clean package
|
#!/bin/bash
if [ "${TRAVIS_OS_NAME}" = "linux" ]; then
wget -nv https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
chmod +x miniconda.sh
./miniconda.sh -b
PATH=/home/travis/miniconda3/bin:${PATH}
fi
conda update --yes conda
conda create --yes -n test python="${PYTHON_VERSION}"
|
pkill docker
ip link set dev docker0 down
brctl delbr docker0
iptables -t nat -F POSTROUTING
brctl addbr bridge0
ip addr add 10.0.0.100/24 dev bridge0
ip link set dev bridge0 up
echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker
service docker start
update-ca-certificates
mkdir /disks
mkdir /files
for i in {1..4}; do
mkdir /disks/$i
mkdir /files/$i
for j in {1..5}; do
l=`echo $j | tr 12345 bcdef`
truncate -s 100G /files/$i/$j
mknod /dev/loop$i$j b 7 $i$j
chown --reference=/dev/loop0 /dev/loop$i$j
chmod --reference=/dev/loop0 /dev/loop$i$j
losetup /dev/loop$i$j /files/$i/$j
mkfs.xfs -f /dev/loop$i$j
mkdir /disks/$i/uuid-sd${l}1
mount /dev/loop$i$j /disks/$i/uuid-sd${l}1
/additional-prep.sh /dev/loop$i$j
rm /disks/$i/uuid-sd${l}1/0009
done
done
chmod -R 777 /disks
docker run -d -e SS_GENCONFIG=1 --name=ecs1 --hostname=ecs1.localdomain -p 80:80 -p 443:443 -p 4443:4443 -p 8443:8443 -p 9020:9020 -p 9021:9021 -p 9022:9022 -p 9023:9023 -p 9024:9024 -p 9025:9025 -v /network.json.10.0.0.1:/host/data/network.json -v /seeds:/host/files/seeds -v /partitions.json:/data/partitions.json -v /disks/1:/disks -v /var/log/ecs/1:/opt/storageos/logs djannot/ecs:v2.0HF2
sleep 5
docker exec -i -t ecs1 chmod -R 777 /host
docker run -d -e SS_GENCONFIG=1 --name=ecs2 --hostname=ecs2.localdomain -v /network.json.10.0.0.2:/host/data/network.json -v /seeds:/host/files/seeds -v /partitions.json:/data/partitions.json -v /disks/2:/disks -v /var/log/ecs/2:/opt/storageos/logs djannot/ecs:v2.0HF2
sleep 5
docker exec -i -t ecs2 chmod -R 777 /host
docker run -d -e SS_GENCONFIG=1 --name=ecs3 --hostname=ecs3.localdomain -v /network.json.10.0.0.3:/host/data/network.json -v /seeds:/host/files/seeds -v /partitions.json:/data/partitions.json -v /disks/3:/disks -v /var/log/ecs/3:/opt/storageos/logs djannot/ecs:v2.0HF2
sleep 5
docker exec -i -t ecs3 chmod -R 777 /host
docker run -d -e SS_GENCONFIG=1 --name=ecs4 --hostname=ecs4.localdomain -v /network.json.10.0.0.4:/host/data/network.json -v /seeds:/host/files/seeds -v /partitions.json:/data/partitions.json -v /disks/4:/disks -v /var/log/ecs/4:/opt/storageos/logs djannot/ecs:v2.0HF2
sleep 5
docker exec -i -t ecs4 chmod -R 777 /host
|
export TANGLE_COO_BOOTSTRAP_WAIT=10 # We will wait 10 seconds for coordinator bootstrap |
#!/bin/bash
usage() {
PROG=$( basename "$0" )
echo "usage: ${PROG} <config file>"
}
SCRIPT_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# shellcheck source=/dev/null
source "${SCRIPT_DIR}/config.sh"
if [ -z "$1" ]; then
usage
exit 1
fi
if [ -z "${DATA_DIR}" ]; then
echo 'DATA_DIR configuration variable was not set. Exiting!'
exit 2
fi
PREFS_DIR="${DATA_DIR}/.java"
CONFIG_DIR="${DATA_DIR}/configs"
CONFIG_FILE=$( basename "$1" )
CONFIG_PATH="${CONFIG_DIR}/${CONFIG_FILE}"
if [ ! -d "${PREFS_DIR}" ]; then
echo "Java preferences directory ${PREFS_DIR} not found...exiting"
exit 3
fi
if [ ! -f "${CONFIG_PATH}" ]; then
echo "Config file ${CONFIG_PATH} not found...exiting"
exit 4
fi
if [ "$TERM" != 'dumb' ]; then
TTY='-it'
fi
if [ ! -w "${DOCKER_SOCKET}" ]; then
SUDO='sudo'
fi
$SUDO docker run $TTY --rm \
--hostname gcds \
-v "${CONFIG_DIR}":/gcds/configs \
-v "${PREFS_DIR}":/root/.java \
"${GCDS_IMAGE}" \
/gcds/encrypt-util -c "/gcds/configs/${CONFIG_FILE}"
|
#!/bin/bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xeo pipefail
shopt -s nullglob
is_pod_ready() {
[[ "$(kubectl get po "$1" --namespace ${NAMESPACE} -o 'jsonpath={.status.conditions[?(@.type=="Ready")].status}')" == 'True' ]]
}
for test in /tests/*; do
testrunner "--test_spec=${test}"
done
|
#!/bin/bash
xset s noblank
xset s off
xset -dpms
cd bin/Debug
./PixelBlanket.exe
|
#!/usr/bin/env bash
# Try to install composer dev dependencies
cd /data/vendor/simplesamlphp/simplesamlphp/modules/silauth
COMPOSER_ROOT_VERSION=dev-develop composer install --no-interaction --optimize-autoloader --no-scripts --no-progress
# If that failed, exit.
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
# Try to run database migrations
./src/yii migrate --interactive=0
# If they failed, exit.
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
# Run the feature tests
./vendor/bin/behat --config=features/behat.yml
# If they failed, exit.
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
# Switch back to the folder we were in.
cd -
# Run the unit tests
cd /data/vendor/simplesamlphp/simplesamlphp/modules/silauth/src/tests
../../vendor/bin/phpunit .
# If they failed, exit.
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
# Switch back to the folder we were in.
cd -
|
#!/bin/bash
# Make sure remote images are pulled
# TODO: Use environment for image host and tags
docker pull ghcr.io/xant-tv/ecumene/ecumene-core:latest
docker pull ghcr.io/xant-tv/ecumene/ecumene-nginx:latest
# Compose command to deploy on remote host
docker-compose -f docker-compose.yml up -d --no-build
docker run -d -v "/opt/oracle/network/admin:/opt/oracle/network/admin" --env-file .env --name ecumene-bot ghcr.io/xant-tv/ecumene/ecumene-core:latest bot |
#! /bin/bash
mongoimport --username=admin --password=admin --authenticationDatabase=admin --host mongodb --db test --collection options --type json --file /mongo_seed/options.json --jsonArray
mongoimport --username=admin --password=admin --authenticationDatabase=admin --host mongodb --db test --collection phoneBook --type json --file /mongo_seed/phoneBook.json --jsonArray
|
#!/usr/bin/env bash
# This needs to be run in the build tree, which is normally ray/python/ray/core
# Cause the script to exit if a single command fails.
set -e
set -x
# Get the directory in which this script is executing.
SCRIPT_DIR="`dirname \"$0\"`"
RAY_ROOT="$SCRIPT_DIR/../../.."
# Makes $RAY_ROOT an absolute path.
RAY_ROOT="`( cd \"$RAY_ROOT\" && pwd )`"
if [ -z "$RAY_ROOT" ] ; then
exit 1
fi
# Ensure we're in the right directory.
if [ ! -d "$RAY_ROOT/python" ]; then
echo "Unable to find root Ray directory. Has this script moved?"
exit 1
fi
CORE_DIR="$RAY_ROOT/python/ray/core"
REDIS_DIR="$CORE_DIR/src/common/thirdparty/redis/src"
REDIS_MODULE="$CORE_DIR/src/common/redis_module/libray_redis_module.so"
STORE_EXEC="$CORE_DIR/src/plasma/plasma_store"
VALGRIND_CMD="valgrind --track-origins=yes --leak-check=full --show-leak-kinds=all --leak-check-heuristics=stdstring --error-exitcode=1"
if [[ "${RAY_USE_NEW_GCS}" = "on" ]]; then
REDIS_SERVER="$CORE_DIR/src/credis/redis/src/redis-server"
CREDIS_MODULE="$CORE_DIR/src/credis/build/src/libmember.so"
LOAD_MODULE_ARGS="--loadmodule ${CREDIS_MODULE} --loadmodule ${REDIS_MODULE}"
else
REDIS_SERVER="${REDIS_DIR}/redis-server"
LOAD_MODULE_ARGS="--loadmodule ${REDIS_MODULE}"
fi
echo "$STORE_EXEC"
echo "${REDIS_SERVER} --loglevel warning ${LOAD_MODULE_ARGS} --port 6379"
echo "$REDIS_DIR/redis-cli -p 6379 shutdown"
# Allow cleanup commands to fail.
killall plasma_store || true
$REDIS_DIR/redis-cli -p 6379 shutdown || true
sleep 1s
${REDIS_SERVER} --loglevel warning ${LOAD_MODULE_ARGS} --port 6379 &
sleep 1s
# Run tests.
$VALGRIND_CMD $CORE_DIR/src/ray/object_manager/object_manager_test $STORE_EXEC
sleep 1s
$VALGRIND_CMD $CORE_DIR/src/ray/object_manager/object_manager_stress_test $STORE_EXEC
$REDIS_DIR/redis-cli -p 6379 shutdown
sleep 1s
# Include raylet integration test once it's ready.
# $CORE_DIR/src/ray/raylet/object_manager_integration_test $STORE_EXEC
|
#!/bin/bash
#SBATCH --job-name=/data/unibas/boittier/test-neighbours
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --partition=short
#SBATCH --output=/data/unibas/boittier/test-neighbours_%A-%a.out
hostname
# Path to scripts and executables
cubefit=/home/unibas/boittier/fdcm_project/mdcm_bin/cubefit.x
fdcm=/home/unibas/boittier/fdcm_project/fdcm.x
ars=/home/unibas/boittier/fdcm_project/ARS.py
# Variables for the job
n_steps=0
n_charges=24
scan_name=SCAN_amide1.pdb-
suffix=.xyz.chk
cubes_dir=/data/unibas/boittier/fdcm/amide/scan-large
output_dir=/data/unibas/boittier/test-neighbours
frames=/home/unibas/boittier/fdcm_project/mdcms/amide/model1/frames.txt
initial_fit=/home/unibas/boittier/fdcm_project/mdcms/amide/model1/24_charges_refined.xyz
initial_fit_cube=/home/unibas/boittier/fdcm_project/mdcms/amide/model1/amide1.pdb.chk
prev_frame=21
start_frame=22
next_frame=23
acd=/home/unibas/boittier/fdcm_project/0_fit.xyz.acd
start=$start_frame
next=$next_frame
dir='frame_'$next
output_name=$output_dir/$dir/$dir'-'$start'-'$next'.xyz'
initial_fit=$output_dir/"frame_"$start/"frame_"$start'-'$prev_frame'-'$start'.xyz'
# Go to the output directory
mkdir -p $output_dir
cd $output_dir
mkdir -p $dir
cd $dir
# Do Initial Fit
# for initial fit
esp1=$cubes_dir/$scan_name$start$suffix'.p.cube'
dens1=$cubes_dir/$scan_name$start$suffix'.d.cube'
esp=$cubes_dir/$scan_name$next$suffix'.p.cube'
dens=$cubes_dir/$scan_name$next$suffix'.d.cube'
# adjust reference frame
python $ars -charges $initial_fit -pcube $dens1 -pcube2 $dens -frames $frames -output $output_name -acd $acd > $output_name.ARS.log
# do gradient descent fit
$fdcm -xyz $output_name.global -dens $dens -esp $esp -stepsize 0.2 -n_steps $n_steps -learning_rate 0.5 -output $output_name > $output_name.GD.log
# adjust reference frame
python $ars -charges $output_name -pcube $esp -pcube2 $esp -frames $frames -output $output_name -acd $acd > $output_name.ARS-2.log
# make a cube file for the fit
$cubefit -v -generate -esp $esp -dens $dens -xyz refined.xyz > $output_name.cubemaking.log
# do analysis
$cubefit -v -analysis -esp $esp -esp2 $n_charges'charges.cube' -dens $dens > $output_name.analysis.log
echo $PWD
sbatch /home/unibas/boittier/fdcm_project/job_files/test-neighbours/frame_22_25.sh
sbatch /home/unibas/boittier/fdcm_project/job_files/test-neighbours/frame_22_41.sh
sbatch /home/unibas/boittier/fdcm_project/job_files/test-neighbours/frame_23_24.sh
|
if [[ -n $SSH_CONNECTION ]]; then
export PS1='%m:%3~$(git_info_for_prompt)%# '
else
export PS1='%3~$(git_info_for_prompt)%# '
fi
# export TERM='xterm-color'
# export LSCOLORS='cat ~/.dir_colors'
# export LSCOLORS="exfxcxdxbxegedabagacad"
export CLICOLOR=true
# export ZLSCOLORS="${LS_COLORS}"
fpath=($ZSH/zsh/functions $fpath)
autoload -U $ZSH/zsh/functions/*(:t)
HISTFILE=~/.zsh_history
# HISTSIZE=1000
# SAVEHIST=1000
HISTSIZE=99999
# HISTFILESIZE=999999
SAVEHIST=$HISTSIZE
setopt NO_BG_NICE # don't nice background tasks
setopt NO_HUP
setopt NO_LIST_BEEP
setopt LOCAL_OPTIONS # allow functions to have local options
setopt LOCAL_TRAPS # allow functions to have local traps
setopt HIST_VERIFY
# setopt SHARE_HISTORY # share history between sessions ???
setopt EXTENDED_HISTORY # add timestamps to history
setopt PROMPT_SUBST
setopt CORRECT
setopt COMPLETE_IN_WORD
setopt IGNORE_EOF
setopt APPEND_HISTORY # adds history
setopt INC_APPEND_HISTORY # adds history incrementally
setopt HIST_IGNORE_ALL_DUPS # don't record dupes in history
setopt HIST_REDUCE_BLANKS
zle -N newtab
bindkey '^[^[[D' backward-word
bindkey '^[^[[C' forward-word
bindkey '^[[5D' beginning-of-line
bindkey '^[[5C' end-of-line
bindkey '^[[3~' delete-char
bindkey '^[^N' newtab
bindkey '^?' backward-delete-char
# GRC colorizes nifty unix tools all over the place
if $(gls &>/dev/null)
then
source `brew --prefix`/etc/grc.bashrc
fi
# Color grep results
export GREP_OPTIONS='--color=auto'
# key bindings
bindkey ' ' magic-space # also do history expansion on space
export EDITOR=vi
export LETTER_OPENER=1
|
#!/bin/bash
unset GREP_OPTIONS
# Black='\033[1;90m' # Black
Nc='\033[0m'
Red='\033[1;91m' # Red
# Green='\033[1;92m' # Green
# Yellow='\033[1;93m' # Yellow
# Blue='\033[1;94m' # Blue
# Purple='\033[1;95m' # Purple
# Cyan='\033[1;96m' # Cyan
# White='\033[1;97m' # White
snodes=$1
cnodes=$2
protocol=$3
shard_size=$4
c_shard_size=$(echo "$snodes / $shard_size" | bc)
c_shard_size=$(echo "$cnodes / $c_shard_size" | bc)
bsize=$5
run=$6
folder=$7
total_nodes=$((cnodes + snodes - 1))
avg_lt=0.0
lt_cnt=0
avg_clt=0
clt_cnt=0
avg_lt_cnt=0
avg_msg=0
msg_cnt=0
shard_server_counter=0
shard_server_tp=0
shard_server_ctp=0
shards_server_tp=()
shards_server_ctp=()
shard_cl_counter=0
shard_cl_tp=0
shard_cl_ctp=0
shards_cl_tp=()
shards_cl_ctp=()
shard_lt=0
shard_clt=0
avg_shard_lt=0
shards_lt=()
shards_clt=()
avg_shards_lt=()
if [ "$(uname)" == "Darwin" ]; then
flags="-E"
else
flags="-P"
fi
if [ ! -z "$folder" ]; then
cd $folder
else
cd results/
fi
echo "Throughputs:"
for i in $(seq 0 $(($total_nodes))); do
if [ "$i" -lt "$snodes" ]; then
temp=$(tail -10 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} '^tput=.{1,13}' | grep -o ${flags} '=\d+' | grep -o ${flags} '\d+')
temp3=$(tail -10 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} '^cput=.{1,13}' | grep -o ${flags} '=\d+' | grep -o ${flags} '\d+')
# temp2=$(tail -74 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} 'msg_send_cnt=.{1,13}' | grep -o ${flags} '=\d+' | grep -o ${flags} '\d+')
if [ -z "$temp" ]; then
temp=0
fi
if [ -z "$temp3" ]; then
temp3=0
fi
shard_server_counter=$(($shard_server_counter + 1))
shard_server_tp=$(($shard_server_tp + $temp))
shard_server_ctp=$(($shard_server_ctp + $temp3))
#avg_msg=$(($avg_msg + $temp2))
#msg_cnt=$(($msg_cnt + 1))
if [[ $shard_server_counter -eq $shard_size ]]; then
shards_server_tp=("${shards_server_tp[@]}" "$shard_server_tp")
shards_server_ctp=("${shards_server_ctp[@]}" "$shard_server_ctp")
shard_server_counter=0
shard_server_tp=0
shard_server_ctp=0
fi
j=$i
if [ $j -lt 10 ]; then
j="0$j"
fi
echo -e "$j: ${Red}${temp}${Nc}\t${Red}${temp3}${Nc}"
else
temp=$(tail -10 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} '^tput=.{1,13}' | grep -o ${flags} '=\d+' | grep -o ${flags} '\d+')
temp2=$(tail -10 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} '^cput=.{1,13}' | grep -o ${flags} '=\d+' | grep -o ${flags} '\d+')
if [ -z "$temp" ]; then
temp=0
fi
if [ -z "$temp2" ]; then
temp2=0
fi
shard_cl_counter=$(($shard_cl_counter + 1))
shard_cl_tp=$(($shard_cl_tp + $temp))
shard_cl_ctp=$(($shard_cl_ctp + $temp2))
#avg_msg=$(($avg_msg + $temp2))
#msg_cnt=$(($msg_cnt + 1))
if [[ $shard_cl_counter -eq $c_shard_size ]]; then
shards_cl_tp=("${shards_cl_tp[@]}" "$shard_cl_tp")
shards_cl_ctp=("${shards_cl_ctp[@]}" "$shard_cl_ctp")
shard_cl_counter=0
shard_cl_tp=0
shard_cl_ctp=0
fi
j=$i
if [ $j -lt 10 ]; then
j="0$j"
fi
echo -e "$j: ${Red}${temp}${Nc}\t${Red}${temp2}${Nc}"
fi
done
echo "Latencies:"
for i in $(seq $snodes $(($total_nodes))); do
temp=$(tail -11 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} '^Latency=.{1,13}' | grep -o ${flags} '\d+\.\d+')
temp2=$(tail -11 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} '^CLatency=.{1,13}' | grep -o ${flags} '\d+\.\d+')
temp3=$(tail -50 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} '^AVG Latency:.{1,13}' | grep -o ${flags} '\d+\.\d+')
if [ ! -z "$temp" ]; then
shard_lt=$(echo "scale=3;$shard_lt + $temp" | bc)
lt_cnt=$(($lt_cnt + 1))
else
temp="NAN"
fi
if [ ! -z "$temp2" ]; then
shard_clt=$(echo "scale=3;$shard_clt + $temp2" | bc)
clt_cnt=$(($clt_cnt + 1))
else
temp2="NAN"
fi
if [ ! -z "$temp3" ]; then
avg_shard_lt=$(echo "scale=3;$avg_shard_lt + $temp3" | bc)
avg_lt_cnt=$(($avg_lt_cnt + 1))
else
temp3="NAN"
fi
shard_cl_counter=$(($shard_cl_counter + 1))
echo -e "$i: ${Red}${temp}${Nc}\t${Red}${temp2}${Nc}"
if [[ $shard_cl_counter -eq $c_shard_size ]]; then
if [ $lt_cnt -ne 0 ]; then
t=$(echo "scale=3;$shard_lt / $lt_cnt" | bc)
else
t=0
fi
if [ $clt_cnt -ne 0 ]; then
t2=$(echo "scale=3;$shard_clt / $clt_cnt" | bc)
else
t2=0
fi
if [ $avg_lt_cnt -ne 0 ]; then
t3=$(echo "scale=3;$avg_shard_lt / $avg_lt_cnt" | bc)
else
t3=0
fi
shards_lt=("${shards_lt[@]}" "$t")
shards_clt=("${shards_clt[@]}" "$t2")
avg_shards_lt=("${avg_shards_lt[@]}" "$t3")
shard_cl_counter=0
shard_lt=0
shard_clt=0
avg_shard_lt=0
lt_cnt=0
clt_cnt=0
avg_lt_cnt=0
fi
done
echo
echo "idle times:"
for i in $(seq 0 $(($snodes - 1))); do
echo "I Node: $i"
times=($(tail -50 s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} "idle_time_worker.{1,10}" | grep -o ${flags} '\d+\.\d+'))
for ((j = 0; j < ${#times[@]}; j++)); do
# echo -e "Worker THD ${j}: ${Red}${times[$j]}${Nc}"
echo -en "${Red}${times[$j]}${Nc} "
done
echo
done
echo "Memory:"
for i in $(seq 0 $total_nodes); do
lines=10
if [ $i -lt $snodes ]; then
lines=20
fi
mem=$(tail -${lines} s${snodes}_c${cnodes}_results_${protocol}_b${bsize}_run${run}_node${i}.out | grep -o ${flags} 's_mem_usage=.{1,7}' | grep -o ${flags} '=\d+' | grep -o ${flags} '\d+')
[ ! -z "$mem" ] || mem=0
echo "$i: $(echo "$mem / 1000" | bc)"
done
echo
# ----------------------------------- Servers
total_server_tp=0
avg_server_ctp=0
counter=0
echo "Shards Servers TP:"
for t in ${shards_server_tp[@]}; do
shard_avg_tp=$(echo "$t / $shard_size" | bc)
total_server_tp=$(($total_server_tp + $shard_avg_tp))
echo "${counter}: $(echo -e ${Red}${shard_avg_tp}${Nc})"
counter=$(($counter + 1))
done
counter=0
echo "Shards Servers CTP:"
for t in ${shards_server_ctp[@]}; do
shard_avg_tp=$(echo "$t / $shard_size" | bc)
avg_server_ctp=$(($avg_server_ctp + $shard_avg_tp))
echo "${counter}: $(echo -e ${Red}${shard_avg_tp}${Nc})"
if [ $shard_avg_tp -gt 0 ]; then
counter=$(($counter + 1))
fi
done
echo
if [ $counter -eq 0 ]; then
counter=1
fi
echo "total avg servers TP: $(echo -e ${Red}${total_server_tp}${Nc})"
echo "total avg servers CTP: $(echo -e ${Red}$(echo "scale=0;$avg_server_ctp / $counter" | bc)${Nc})"
echo "Final avg servers TP: $(echo -e ${Red}$(echo "scale=0;$avg_server_ctp / $counter + $total_server_tp" | bc)${Nc})"
echo "--------------------------------------------"
# ----------------------------------- Clients
total_cl_tp=0
total_cl_ctp=0
counter=0
echo -e "\nShards Clients TP:"
for t in ${shards_cl_tp[@]}; do
total_cl_tp=$(($total_cl_tp + $t))
echo -e "${counter}: $(echo -e ${Red}${t}${Nc})"
counter=$(($counter + 1))
done
counter=0
echo "Shards Clients CTP:"
for t in ${shards_cl_ctp[@]}; do
total_cl_ctp=$(($total_cl_ctp + ${t}))
echo -e "${counter}: $(echo -e ${Red}${t}${Nc})"
counter=$(($counter + 1))
done
echo "total avg clients TP: $(echo -e ${Red}${total_cl_tp}${Nc})"
echo "total avg clients CTP: $(echo -e ${Red}${total_cl_ctp}${Nc})"
echo "Final avg clients TP: $(echo -e ${Red}$(echo "scale=0;$total_cl_tp + $total_cl_ctp" | bc)${Nc})"
echo "--------------------------------------------"
# ----------------------------------- Latency
avg_latency=0
avg_latency_cnt=0
avg_clatency=0
avg_clatency_cnt=0
avg_avg_latency=0
avg_avg_latency_cnt=0
echo -e "\nShards Latency:"
for t in ${shards_lt[@]}; do
if [ ! "$t" = "0" ]; then
avg_latency=$(echo "$avg_latency + $t" | bc)
echo -e "${avg_latency_cnt}: $(echo -e ${Red}${t}${Nc})"
avg_latency_cnt=$(($avg_latency_cnt + 1))
else
echo -e "${avg_latency_cnt}: $(echo -e ${Red}NAN${Nc})"
fi
done
echo "Shards Cross Latency:"
for t in ${shards_clt[@]}; do
if [ ! "$t" = "0" ]; then
avg_clatency=$(echo "$avg_clatency + ${t}" | bc)
echo -e "${avg_clatency_cnt}: $(echo -e ${Red}${t}${Nc})"
avg_clatency_cnt=$(($avg_clatency_cnt + 1))
else
echo -e "${avg_clatency_cnt}: $(echo -e ${Red}NAN${Nc})"
fi
done
echo "Shards AVG Latency:"
for t in ${avg_shards_lt[@]}; do
if [ ! "$t" = "0" ]; then
avg_avg_latency=$(echo "$avg_avg_latency + ${t}" | bc)
echo -e "${avg_avg_latency_cnt}: $(echo -e ${Red}${t}${Nc})"
avg_avg_latency_cnt=$(($avg_avg_latency_cnt + 1))
else
echo -e "${avg_avg_latency_cnt}: $(echo -e ${Red}NAN${Nc})"
fi
done
echo "total avg Intra Latency: $(echo -e ${Red}$(echo "scale=3;$avg_latency / $avg_latency_cnt" | bc)${Nc})"
if [ $avg_clatency_cnt -ne 0 ]; then
echo "total avg Cross Latency: $(echo -e ${Red}$(echo "scale=3;$avg_clatency / $avg_clatency_cnt" | bc)${Nc})"
fi
echo "Final avg Latency: $(echo -e ${Red}$(echo "scale=3;($avg_latency + $avg_clatency) / ($avg_clatency_cnt + $avg_latency_cnt)" | bc)${Nc})"
|
#!/usr/bin/env bash
: ${NAMESPACE:="elastifile-csi"}
: ${LOGDIR:="/tmp/csi-logs-"$(date +%s)}
: ${TARBALL:="/tmp/csi-logs.tgz"}
: ${LOGSCRIPT:="${LOGDIR}/fetch-logs.sh"}
mkdir -p ${LOGDIR}
pushd ${LOGDIR}
export NAMESPACE
kubectl get pod -n ${NAMESPACE} -o go-template='{{range .items}}{{$podName := .metadata.name}}{{range .status.containerStatuses}}{{if ge .restartCount 1}}{{print "kubectl logs -p " $podName " -c " .name " -n $NAMESPACE > " $podName "--" .name "-prev.log\n"}}{{end}}{{end}}{{end}}' > ${LOGSCRIPT}
kubectl get pod -n ${NAMESPACE} -o go-template='{{range .items}}{{$podName := .metadata.name}}{{range .status.containerStatuses}}{{print "kubectl logs " $podName " -c " .name " -n $NAMESPACE > " $podName "--" .name ".log\n"}}{{end}}{{end}}' >> ${LOGSCRIPT}
bash -x ${LOGSCRIPT}
cd ..
tar czvf ${TARBALL} $(basename ${LOGDIR})
popd
rm -rf ${LOGDIR}
echo Done - logs were saved as ${TARBALL}
|
#!/usr/bin/env bash
VENVNAME=Wine_env
python3 -m venv $VENVNAME
source $VENVNAME/bin/activate
pip install --upgrade pip
pip install ipython
pip install jupyter
python -m ipykernel install --user --name=$VENVNAME
test -f requirements.txt && pip install -r requirements.txt
python -m spacy download en_core_web_sm
deactivate
echo "build $VENVNAME"
|
#!/bin/bash
browserify -r buffer-browserify > buffer.js
echo ';module.exports=require("buffer-browserify")' >> buffer.js
|
#!/bin/bash
#$ -hold_jid 559321,559322,559323,559324,559325,559326,559327,559328,559329,559330,559331,559332,559333,559334,559335,559336,559337,559338,559339,559340,559341,559342,559343,559344,559345,559346,559347,559348,559349,559350,559351,559352,559353,559354,559355,559356,559357,559358,559359,559360,559361,559362,559363,559364,559365,559366,559367,559368,559369,559370
set -e
set -o pipefail
umask 0002
#### SET THE STAGE
SCRATCH_DIR=/scratch/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00_181_temp$$
GSTORE_DIR=/srv/gstore/projects
INPUT_DATASET=/srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00/input_dataset.tsv
LAST_JOB=FALSE
echo "Job runs on `hostname`"
echo "at $SCRATCH_DIR"
mkdir $SCRATCH_DIR || exit 1
cd $SCRATCH_DIR || exit 1
source /usr/local/ngseq/etc/lmod_profile
module add Tools/EAGLE/1.1.1b
#### NOW THE ACTUAL JOBS STARTS
eagle --version
eagle-rc --ngi --ref1=/srv/GT/reference/Finger_millet/KEN/DENOVO_v2.0_A_subgenome/Sequence/WholeGenomeFasta/genome.fa --ref2=/srv/GT/reference/Finger_millet/KEN/DENOVO_v2.0_B_subgenome/Sequence/WholeGenomeFasta/genome.fa --bam1=/srv/gstore/projects/p1634/BWA_Reseq_low_151_200_on_A_2020-03-21--17-38-05/181.bam --bam2=/srv/gstore/projects/p1634/BWA_Reseq_low_151_200_on_B_2020-03-17--16-08-12/181.bam -o 181 > 181.sort.stdout.log 2> 181.sort.errout.log
mv 1811.ref.bam 181_A_ref.bam
mv 1811.alt.bam 181_A_alt.bam
mv 1811.unk.bam 181_A_unk.bam
mv 1811.mul.bam 181_A_mul.bam
mv 1812.ref.bam 181_B_ref.bam
mv 1812.alt.bam 181_B_alt.bam
mv 1812.unk.bam 181_B_unk.bam
mv 1812.mul.bam 181_B_mul.bam
echo 'Supercalifragilisticexpialidocious!!' > 181_dummy.txt
#### JOB IS DONE WE PUT THINGS IN PLACE AND CLEAN AUP
g-req -w copy 181_A_ref.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_A_alt.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_A_unk.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_A_mul.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_B_ref.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_B_alt.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_B_unk.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_B_mul.bam /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181.sort.stdout.log /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181.sort.errout.log /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
g-req -w copy 181_dummy.txt /srv/gstore/projects/p1634/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00
cd /scratch
rm -rf /scratch/EAGLERC_Reseq_low_151_200_2020-03-23--08-27-00_181_temp$$ || exit 1
|
#!/bin/bash
if [[ ! $BUILD_RUNTIME =~ cloud ]]; then
echo "Build runtime is not cloud runtime."
exit 0
fi
# Packages
PACKAGES=(
cloud-init
)
if [[ $BUILD_GUEST_OS =~ centos ]]; then
yum install -y ${PACKAGES[@]}
elif [[ $BUILD_GUEST_OS =~ debian ]]; then
apt-get install -y -qq ${PACKAGES[@]}
fi
mkdir -p /etc/cloud
cat > /etc/cloud/cloud.cfg <<"EOF"
users:
- default
disable_root: true
preserve_hostname: false
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
- bootcmd
- write-files
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- ca-certs
- rsyslog
- users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- emit_upstart
- mounts
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- apt-pipelining
- apt-configure
- package-update-upgrade-install
- landscape
- timezone
- puppet
- chef
- salt-minion
- mcollective
- disable-ec2-metadata
- runcmd
- byobu
# The modules that run in the 'final' stage
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
- power-state-change
system_info:
distro: debian
default_user:
name: admin
lock_passwd: True
shell: /bin/bash
sudo: ['ALL=(ALL) NOPASSWD:ALL']
groups: [sudo]
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
upstart_dir: /etc/init/
package_mirrors:
- arches: [default]
failsafe:
primary: http://ftp.debian.org/debian
EOF
CONSOLE="console=ttyS0,115200n8 console=tty0"
sed -i -e "s/\(GRUB_CMDLINE_LINUX.*\)\"/\1 $CONSOLE\"/" /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg
dracut --force --add-drivers xen_blkfront /boot/initramfs-$(uname -r).img
rm /etc/hosts
|
#!/bin/sh
# replace config default values with those provided via environment variables
for env in `printenv`
do
if [ "$env" != "${env#DIGILIB_}" ]
then
# split the env variable at the "=" character and
# save the tokes in $f1 and $f2
IFS='=' read -r f1 f2 <<EOF
$env
EOF
# turn the key into lower-case and replace underscore with hyphen
KEY=$(echo ${f1} | cut -c9- | tr '[:upper:]' '[:lower:]' | tr '_' '-')
VALUE=${f2}
# replace existing default values in digilib-config.xml
sed -i -e "/name=\"$KEY\"/ s#value=\"[^\"]*\"#value=\"$VALUE\"#" ${JETTY_WEBAPPS}/ROOT/WEB-INF/digilib-config.xml
# add new entries to digilib-config.xml
if ! grep $KEY -q ${JETTY_WEBAPPS}/ROOT/WEB-INF/digilib-config.xml
then
sed -i -e "/<\/digilib-config>/i \ \ <parameter name=\"$KEY\" value=\"$VALUE\"/>" ${JETTY_WEBAPPS}/ROOT/WEB-INF/digilib-config.xml
fi
fi
done
# run the command given in the Dockerfile at CMD
exec /docker-entrypoint.sh |
#!/bin/bash
until nc -z ${RABBIT_HOST} ${RABBIT_PORT}; do
echo "$(date) - waiting for rabbitmq..."
sleep 1
done
until nc -z ${REDIS_HOST} ${REDIS_PORT}; do
echo "$(date) - waiting for redis..."
sleep 1
done
n=15
while [ $n -gt 0 ]
do
echo "Wait for kafka $n more times."
n=$(( n-1 ))
sleep 10
done
while python check_kafka.py; do echo 'connecting to kafka...'; sleep 10; done;
nameko run --config config.yml services
|
#!/usr/bin/env bash
# ./scripts/install-operator-sdk.sh
#
#
# Installs Operator SDK if not installed. Optional parameters specifies the
# version of Operator SDK to install. Defaults tot eh value of the environment
# variable OPERATOR_SDK_VERSION and if that is not set, the value of the
# DEFAULT_OPERATOR_SDK_VERSION variable.
#
# NOTE: uses `sudo mv` to relocate a downloaded binary to /code-generator/bin/operator-sdk
set -eo pipefail
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
ROOT_DIR="$SCRIPTS_DIR/.."
DEFAULT_OPERATOR_SDK_BIN_PATH="$ROOT_DIR/../code-generator/bin"
OPERATOR_SDK_BIN_PATH=${OPERATOR_SDK_BIN_PATH:-$DEFAULT_OPERATOR_SDK_BIN_PATH}
DEFAULT_OPERATOR_SDK_VERSION="1.17.0"
source "${SCRIPTS_DIR}/lib/common.sh"
__operator_sdk_version="${1}"
if [ "x${__operator_sdk_version}" == "x" ]; then
__operator_sdk_version=${OPERATOR_SDK_VERSION:-$DEFAULT_OPERATOR_SDK_VERSION}
fi
if ! is_installed ${OPERATOR_SDK_BIN_PATH}/operator-sdk; then
__platform=$(uname | tr '[:upper:]' '[:lower:]')
__tmp_install_dir=$(mktemp -d -t install-operator-sdk-XXX)
__operator_sdk_url="https://github.com/operator-framework/operator-sdk/releases/download/v${__operator_sdk_version}/operator-sdk_${__platform}_amd64"
__install_dir=${OPERATOR_SDK_BIN_PATH}
# If __install_dir does not exist, create it
[[ -d $__install_dir ]] || mkdir -p "$__install_dir"
__install_path="$__install_dir/operator-sdk"
echo -n "installing operator-sdk from ${__operator_sdk_url} ... "
curl -sq -L ${__operator_sdk_url} --output ${__tmp_install_dir}/operator-sdk_${__platform}_amd64
chmod +x ${__tmp_install_dir}/operator-sdk_${__platform}_amd64
sudo mv "${__tmp_install_dir}/operator-sdk_${__platform}_amd64" "$__install_path"
echo "ok."
fi
|
#!/bin/bash
#
# Regenerate python code from OTLP protos in
# https://github.com/open-telemetry/opentelemetry-proto
#
# To use, update PROTO_REPO_BRANCH_OR_COMMIT variable below to a commit hash or
# tag in opentelemtry-proto repo that you want to build off of. Then, just run
# this script to update the proto files. Commit the changes as well as any
# fixes needed in the OTLP exporter.
#
# Optional envars:
# PROTO_REPO_DIR - the path to an existing checkout of the opentelemetry-proto repo
# Pinned commit/branch/tag for the current version used in opentelemetry-proto python package.
PROTO_REPO_BRANCH_OR_COMMIT="v0.6.0"
set -e
PROTO_REPO_DIR=${PROTO_REPO_DIR:-"/tmp/opentelemetry-proto"}
# root of opentelemetry-python repo
repo_root="$(git rev-parse --show-toplevel)"
venv_dir="/tmp/proto_codegen_venv"
# run on exit even if crash
cleanup() {
echo "Deleting $venv_dir"
rm -rf $venv_dir
}
trap cleanup EXIT
echo "Creating temporary virtualenv at $venv_dir using $(python3 --version)"
python3 -m venv $venv_dir
source $venv_dir/bin/activate
python -m pip install \
-c $repo_root/dev-requirements.txt \
grpcio-tools mypy-protobuf
# Clone the proto repo if it doesn't exist
if [ ! -d "$PROTO_REPO_DIR" ]; then
git clone https://github.com/open-telemetry/opentelemetry-proto.git $PROTO_REPO_DIR
fi
# Pull in changes and switch to requested branch
(
cd $PROTO_REPO_DIR
git fetch --all
git checkout $PROTO_REPO_BRANCH_OR_COMMIT
# pull if PROTO_REPO_BRANCH_OR_COMMIT is not a detached head
git symbolic-ref -q HEAD && git pull --ff-only || true
)
cd $repo_root/opentelemetry-proto/src
# clean up old generated code
find opentelemetry/ -regex ".*_pb2.*\.pyi?" -exec rm {} +
# generate proto code for all protos
all_protos=$(find $PROTO_REPO_DIR/ -iname "*.proto")
python -m grpc_tools.protoc \
-I $PROTO_REPO_DIR \
--python_out=. \
--mypy_out=. \
$all_protos
# generate grpc output only for protos with service definitions
service_protos=$(grep -REl "service \w+ {" $PROTO_REPO_DIR/opentelemetry/)
python -m grpc_tools.protoc \
-I $PROTO_REPO_DIR \
--python_out=. \
--mypy_out=. \
--grpc_python_out=. \
$service_protos
|
#!/bin/sh
set -e
SCRIPT=`basename "$0"`
# NOTE: git is required, but it should already be preinstalled on Ubuntu 16.0
#echo "[INFO] [${SCRIPT}] Setup git"
#sudo apt install -y git
# Using Docker CE directly provided by Docker
echo "[INFO] [${SCRIPT}] Setup docker"
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
sudo usermod -aG docker ubuntu |
#!/bin/sh
HGROOT=`hg root`
ASTYLE=astyle
echo "Formatting with astyle"
FILES=$(hg st -macn | grep -E "\.(cpp|h)$" | xargs -i echo $HGROOT/{})
echo "Reformatting the following files:"
echo $FILES
if [ "$FILES" ]; then
echo $FILES | xargs $ASTYLE -n --style=linux --indent=tab --indent-switches --indent-namespaces
else
echo "No files to be reformatted"
fi
|
#!/usr/bin/env bash
echo '开始整理需要打包的目录'
echo '清理dist目录,script: \\rm -rf ./dist/ && mkdir dist'
\rm -rf ./dist/
mkdir dist
echo 'script:\\cp -R ./node_modules/ ./dist/node_modules/ & \\cp -R ./bin/ ./dist/bin/ & \\cp -R ./public/ ./dist/public/ & \\cp -R ./server/ ./dist/server/ & \\cp -R ./commons/ ./dist/commons/'
\cp -R ./node_modules/ ./dist/node_modules/ & \cp -R -P ./bin/ ./dist/bin/ & \cp -R ./public/ ./dist/public/ & \cp -R ./server/ ./dist/server/ & \cp -R ./commons/ ./dist/commons/
echo 'script:rm -rf ./dist/public/dll/ ./dist/server/templates/'
rm -rf ./dist/public/dll/ ./dist/server/templates/
echo '目录授权,script:chmod -R +x ./dist/'
chmod -R +x ./dist/
echo '需要打包的目录整理完毕' |
#! /bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -u
cd /build
wget https://shellcheck.storage.googleapis.com/shellcheck-v0.6.0.linux.x86_64.tar.xz
tar -xf shellcheck-v0.6.0.linux.x86_64.tar.xz
install -o 0 -g 0 -m 0755 shellcheck-v0.6.0/shellcheck /usr/local/bin/shellcheck
rm -rf shellcheck-v0.6.0 shellcheck-v0.6.0.linux.x86_64.tar.xz
|
if [[ -z "${SQLALCHEMY_DATABASE_PASSWORD}" ]]; then
echo "SQLALCHEMY_DATABASE_PASSWORD is not set"
else
docker create -v /var/lib/postgresql/data --name postgres9.6-data busybox
docker run --name local-postgres-9.6 -p 5432:5432 -e POSTGRES_PASSWORD=$SQLALCHEMY_DATABASE_PASSWORD --volumes-from postgres9.6-data -d postgres:9.6
fi
|
#!/bin/bash
docker run -it --rm --name builder-go -v /Users/kyle/src:/opt/src builder-go bash
say -v Karen Exiting... And removing the builder. go. container.
|
#! /bin/bash
echo -e "Enter a characters = \c"
read char
case $char in
[a-z] )
echo "You entered lower case character.";;
[A-Z] )
echo "You entered upper case character.";;
[0-9] )
echo "You entered digits";;
? )
echo "You entered special character";;
* )
echo "You entered unknown characters";;
esac
|
tick="✓"
cross="✗"
step_log() {
message=$1
printf "\n\033[90;1m==> \033[0m\033[37;1m%s\033[0m\n" "$message"
}
add_log() {
mark=$1
subject=$2
message=$3
if [ "$mark" = "$tick" ]; then
printf "\033[32;1m%s \033[0m\033[34;1m%s \033[0m\033[90;1m%s\033[0m\n" "$mark" "$subject" "$message"
else
printf "\033[31;1m%s \033[0m\033[34;1m%s \033[0m\033[90;1m%s\033[0m\n" "$mark" "$subject" "$message"
fi
}
step_log "Setup PHP and Composer"
version=$1
export HOMEBREW_NO_INSTALL_CLEANUP=TRUE
brew tap shivammathur/homebrew-php >/dev/null 2>&1
brew install shivammathur/php/php@"$1" composer >/dev/null 2>&1
brew link --force --overwrite php@"$1" >/dev/null 2>&1
ini_file=$(php -d "date.timezone=UTC" --ini | grep "Loaded Configuration" | sed -e "s|.*:s*||" | sed "s/ //g")
echo "date.timezone=UTC" >> "$ini_file"
ext_dir=$(php -i | grep "extension_dir => /usr" | sed -e "s|.*=> s*||")
sudo chmod 777 "$ini_file"
mkdir -p "$(pecl config-get ext_dir)"
composer global require hirak/prestissimo >/dev/null 2>&1
semver=$(php -v | head -n 1 | cut -f 2 -d ' ')
add_log "$tick" "PHP" "Installed PHP $semver"
add_log "$tick" "Composer" "Installed"
add_extension() {
extension=$1
install_command=$2
prefix=$3
if ! php -m | grep -i -q "$extension" && [ -e "$ext_dir/$extension.so" ]; then
echo "$prefix=$extension" >>"$ini_file" && add_log $tick "$extension" "Enabled"
elif php -m | grep -i -q "$extension"; then
add_log "$tick" "$extension" "Enabled"
elif ! php -m | grep -i -q "$extension"; then
exists=$(curl -sL https://pecl.php.net/json.php?package="$extension" -w "%{http_code}" -o /dev/null)
if [ "$exists" = "200" ]; then
(
eval "$install_command" && \
add_log "$tick" "$extension" "Installed and enabled"
) || add_log "$cross" "$extension" "Could not install $extension on PHP $semver"
else
if ! php -m | grep -i -q "$extension"; then
add_log "$cross" "$extension" "Could not find $extension for PHP $semver on PECL"
fi
fi
fi
} |
#This shell script runs the program that parses out
#what music I played in a given month, as captrured by iTunes. See format below
#SHIT THAT STILL NEEDS TO BE DONE:
#Create a program to update the music played index.
#Automate the process of getting iTunes file to the server
MonthPrep=$(date +"%m")
Month=`expr $MonthPrep - 1`
Year=$(date +"%Y")
perl /[PATH]/iTunesPlayListing.pl $Month $Year
|
echo "HELLO"
|
#!/usr/bin/env bash
set -eux
sudo curl -LO https://go.dev/dl/go1.18.darwin-amd64.tar.gz
sudo rm -rf /usr/local/go
sudo tar -C /usr/local -xzf go1.18.darwin-amd64.tar.gz
sudo mv /usr/local/go/bin/go /usr/local/bin/go
sudo mv /usr/local/go/bin/gofmt /usr/local/bin/gofmt
|
#!/usr/bin/env bash
#############################################################################
# Copyright (c) 2017-2018 SiteWare Corp. All right reserved
#############################################################################
UNUSED_USER_ID=21338
UNUSED_GROUP_ID=21337
UNUSED_DOCKER_GROUP_ID=21336
export DEBIAN_FRONTEND=noninteractive
# Find the package manager, Ubunut uses apt-get, AML uses yum
(type apt-get &> /dev/null) && DOCKER_PKGUPD="apt-get -y update"
(type apt-get &> /dev/null) && DOCKER_PKGMGR="apt-get -y install"
(type yum &> /dev/null) && DOCKER_PKGUPD="true"
(type yum &> /dev/null) && DOCKER_PKGMGR="yum -y install"
# The ensure_group_id_is_free and ensure_user_id_is_free functions come from here
# https://github.com/schmidigital/permission-fix/blob/master/LICENSE
# MIT License
function ensure_group_id_is_free() {
local DOCKER_GROUP=$1
local HOST_GROUP_ID=$2
local UNUSED_ID=$3
echo "EGIF: Check if group with ID $HOST_GROUP_ID already exists"
DOCKER_GROUP_OLD=`getent group $HOST_GROUP_ID | cut -d: -f1`
if [ -z "$DOCKER_GROUP_OLD" ]; then
echo "EGIF: Group ID is free. Good."
elif [ x"$DOCKER_GROUP_OLD" = x"DOCKER_GROUP" ]; then
echo "EGIF: Group ID is taken by the right group"
else
echo "EGIF: Group ID is already taken by group: $DOCKER_GROUP_OLD"
echo "EGIF: Changing the ID of $DOCKER_GROUP_OLD group to $UNUSED_GROUP_ID"
groupmod -o -g $UNUSED_ID $DOCKER_GROUP_OLD
fi
#echo "Changing the ID of $DOCKER_GROUP group to $HOST_GROUP_ID"
#groupmod -o -g $HOST_GROUP_ID $DOCKER_GROUP || true
echo "EGIF: Finished"
echo "EGIF: -- -- -- -- --"
}
function ensure_user_id_is_free() {
local DOCKER_USER=$1
local HOST_USER_ID=$2
local UNUSED_ID=$3
# Setting User Permissions
echo "EUIF: Check if user with ID $HOST_USER_ID already exists"
DOCKER_USER_OLD=`getent passwd $HOST_USER_ID | cut -d: -f1`
if [ -z "$DOCKER_USER_OLD" ]; then
echo "EUIF: User ID is free. Good."
elif [ x"$DOCKER_USER_OLD" = x"DOCKER_USER" ]; then
echo "EUIF: USER ID is taken by the right user"
else
echo "EUIF: User ID is already taken by user: $DOCKER_USER_OLD"
echo "EUIF: Changing the ID of $DOCKER_USER_OLD to 21337"
usermod -o -u $UNUSED_ID $DOCKER_USER_OLD
fi
#echo "Changing the ID of $DOCKER_USER user to $HOST_USER_ID"
#usermod -o -u $HOST_USER_ID $DOCKER_USER || true
echo "EUIF: Finished"
}
if [ x"$USER" != x"" ] ; then
(type yum &> /dev/null) && $DOCKER_PKGMGR shadow-utils # for usermod etc
(type sudo &> /dev/null) || ($DOCKER_PKGUPD && $DOCKER_PKGMGR sudo)
if [ x"$GROUP_ID" != x"" -a x"$(getent group $GROUP_ID | cut -d: -f1)" != x"$USER" ] ; then
ensure_group_id_is_free $USER $GROUP_ID $UNUSED_GROUP_ID
(type yum &> /dev/null) && groupadd --gid $GROUP_ID $USER
(type apt-get &> /dev/null) && addgroup --gid $GROUP_ID $USER
fi
if [ x"$USER_ID" != x"" -a x"$(getent passwd $USER_ID | cut -d: -f1)" != x"$USER" ] ; then
ensure_user_id_is_free $USER $USER_ID $UNUSED_USER_ID
(type yum &> /dev/null) && adduser \
--no-create-home --uid $USER_ID --gid $GROUP_ID $USER
(type apt-get &> /dev/null) && adduser \
--disabled-password \
--no-create-home \
--gecos '' \
--uid $USER_ID \
--ingroup $USER $USER
echo "$USER ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/$USER
fi
fi
if [ x"$DOCKER_GROUP_ID" != x"" -a \
x"$(getent group $HOST_USER_ID | cut -d: -f1)" != x"docker" ] ; then
ensure_group_id_is_free docker $DOCKER_GROUP_ID $UNUSED_DOCKER_GROUP_ID
(type yum &> /dev/null) && groupadd --gid $DOCKER_GROUP_ID docker
(type apt-get &> /dev/null) && addgroup --gid $DOCKER_GROUP_ID docker
usermod -aG docker $USER
fi
if [ x"$SD2_EP_SSH" = x"1" ]; then
(type sshd &> /dev/null) || ($DOCKER_PKGUPD && $DOCKER_PKGMGR openssh-server)
(type yum &> /dev/null) && service sshd start
(type apt-get &> /dev/null) && service ssh start
fi
if [ -n "$SD2_EP_TZ" ] ; then
export TZ=$SD2_EP_TZ
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ > /etc/timezone
fi
[ -n "$SD2_EP_SCRIPT" ] && $SD2_EP_SCRIPT
if [ x"$SD2_EP_SHELL" = x"1" ]; then
sudo -i -u $USER
fi
if [ x"$SD2_EP_DAEMON" = x"1" ]; then
echo Sleeping for ever...
sleep infinity
fi
|
#!/bin/bash
script_name=$0
script_abs=$(readlink -f "$0")
script_dir=$(dirname $script_abs)
BOLT_ROOT=${script_dir}/..
CXX=$1
AR=$2
STRIP=$3
build_dir=$4
use_mali=$5
use_debug=$6
use_android=$7
use_android_log=$8
use_ios=$9
use_openmp=${10}
allSrcs=""
skip_list=()
srcs=""
searchFiles() {
srcs=""
for line in ${allSrcs}
do
skip=false
for str in "${skip_list[@]}"
do
if [[ ${line} =~ ${str} ]];
then
skip=true
break
fi
done
if [[ ${skip} == false ]]
then
srcs="${srcs} ${build_dir}/${line}"
fi
done
}
if [ $use_ios == "OFF" ];
then
allSrcs=`find ${build_dir} -name "*.o" -printf "%P\n"`
skip_list=("static" "model_tools" "tests" "tools" "examples" "flow" "data_loader")
searchFiles
jniLibrarySrcs="${srcs} \
${build_dir}/model_tools/src/CMakeFiles/model_tools.dir/model_tools.cpp.o"
fi
allSrcs=`find ${build_dir} -name "*.o" -printf "%P\n"| grep "static.dir"`
skip_list=("tests" "tools" "examples" "BoltModel_Jni" "flow" "data_loader")
searchFiles
staticLibrarySrcs="${srcs} \
${build_dir}/model_tools/src/CMakeFiles/model_tools_static.dir/model_tools.cpp.o"
allSrcs=`find ${build_dir} -name "*.o" -printf "%P\n"`
skip_list=("static" "tests" "tools" "examples" "BoltModel_Jni" "flow" "data_loader")
searchFiles
sharedLibrarySrcs="${srcs} \
${build_dir}/model_tools/src/CMakeFiles/model_tools_static.dir/model_tools.cpp.o"
if [ -f "${build_dir}/common/gcl/tools/kernel_source_compile/libkernelsource.so" ] && [ $use_mali == "ON" ];
then
gclLibrarySrcs="${build_dir}/common/gcl/tools/kernel_source_compile/CMakeFiles/kernelsource.dir/src/cl/gcl_kernel_source.cpp.o \
${build_dir}/common/gcl/tools/kernel_source_compile/CMakeFiles/kernelsource.dir/src/cl/inline_cl_source.cpp.o \
${build_dir}/common/gcl/tools/kernel_source_compile/CMakeFiles/kernelsource.dir/src/option/gcl_kernel_option.cpp.o \
${build_dir}/common/gcl/tools/kernel_source_compile/CMakeFiles/kernelsource.dir/src/option/inline_cl_option.cpp.o"
jniLibrarySrcs="${jniLibrarySrcs} ${gclLibrarySrcs}"
staticLibrarySrcs="${staticLibrarySrcs} ${gclLibrarySrcs}"
sharedLibrarySrcs="${sharedLibrarySrcs} ${gclLibrarySrcs}"
fi
if [ -f "${BOLT_ROOT}/third_party/arm_llvm/opencl/lib64/libOpenCL.so" ] && [ $use_mali == "ON" ];
then
cp ${BOLT_ROOT}/third_party/arm_llvm/opencl/lib64/libOpenCL.so ${build_dir}
${STRIP} ${build_dir}/libOpenCL.so || exit 1
fi
if [ -f "${build_dir}/libbolt.a" ];
then
rm -rf ${build_dir}/libbolt.a
fi
if [ -f "${build_dir}/libbolt.so" ];
then
rm -rf ${build_dir}/libbolt.so
fi
if [ -f "${build_dir}/libbolt.dylib" ];
then
rm -rf ${build_dir}/libbolt.dylib
fi
if [ -f "${build_dir}/libBoltModel.so" ];
then
rm -rf ${build_dir}/libBoltModel.so
fi
lib=""
if [ $use_android_log == "ON" ] && [ $use_android == "ON" ];
then
lib="${lib} -llog"
fi
if [ $use_openmp == "ON" ];
then
lib="${lib} -fopenmp"
fi
if [ -f "${build_dir}/common/gcl/tools/kernel_source_compile/libkernelsource.so" ] && [ $use_mali == "ON" ];
then
${STRIP} ${build_dir}/common/gcl/tools/kernel_source_compile/libkernelsource.so || exit 1
lib="${lib} -L${BOLT_ROOT}/third_party/arm_llvm/opencl/lib64 -lOpenCL"
fi
if [ $use_ios == "ON" ];
then
${CXX} -shared -o ${build_dir}/libbolt.dylib ${sharedLibrarySrcs} ${lib} || exit 1
else
${CXX} -shared -o ${build_dir}/libBoltModel.so ${jniLibrarySrcs} ${lib} -Wl,-soname,libBoltModel.so || exit 1
${CXX} -shared -o ${build_dir}/libbolt.so ${sharedLibrarySrcs} ${lib} -Wl,-soname,libbolt.so || exit 1
fi
${AR} -rc ${build_dir}/libbolt.a ${staticLibrarySrcs} || exit 1
if [ $use_debug == "OFF" ];
then
if [ $use_ios == "OFF" ];
then
${STRIP} ${build_dir}/libBoltModel.so || exit 1
fi
if [ $use_ios == "OFF" ];
then
${STRIP} ${build_dir}/libbolt.so || exit 1
${STRIP} -g -S -d --strip-debug --strip-unneeded ${build_dir}/libbolt.a || exit 1
fi
fi
|
sudo apt-get update
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-add-repository 'deb https://apt.dockerproject.org/repo ubuntu-xenial main'
sudo apt-get update
sudo apt-get install -y docker-engine
sudo usermod -aG docker $(whoami)
exec sudo su ${USER}
sudo curl -o /usr/local/bin/docker-compose -L "https://github.com/docker/compose/releases/download/1.11.2/docker-compose-$(uname -s)-$(uname -m)"
sudo chmod +x /usr/local/bin/docker-compose
|
#!/usr/bin/env /bin/bash
set -e # halt script on error
cd _sass/
bourbon install
cd ../../
bundle exec jekyll build
|
#!/bin/bash
# This script parses in the command line parameters from runCust,
# maps them to the correct command line parameters for DispNet training script and launches that task
# The last line of runCust should be: bash $CONFIG_FILE --data-dir $DATA_DIR --log-dir $LOG_DIR
# Parse the command line parameters
# that runCust will give out
DATA_DIR=NONE
LOG_DIR=NONE
CONFIG_DIR=NONE
MODEL_DIR=NONE
# Parsing command line arguments:
while [[ $# > 0 ]]
do
key="$1"
case $key in
-h|--help)
echo "Usage: run_dispnet_training_philly.sh [run_options]"
echo "Options:"
echo " -d|--data-dir <path> - directory path to input data (default NONE)"
echo " -l|--log-dir <path> - directory path to save the log files (default NONE)"
echo " -p|--config-file-dir <path> - directory path to config file directory (default NONE)"
echo " -m|--model-dir <path> - directory path to output model file (default NONE)"
exit 1
;;
-d|--data-dir)
DATA_DIR="$2"
shift # pass argument
;;
-p|--config-file-dir)
CONFIG_DIR="$2"
shift # pass argument
;;
-m|--model-dir)
MODEL_DIR="$2"
shift # pass argument
;;
-l|--log-dir)
LOG_DIR="$2"
shift
;;
*)
echo Unkown option $key
;;
esac
shift # past argument or value
done
# Prints out the arguments that were passed into the script
echo "DATA_DIR=$DATA_DIR"
echo "LOG_DIR=$LOG_DIR"
echo "CONFIG_DIR=$CONFIG_DIR"
echo "MODEL_DIR=$MODEL_DIR"
# Run training on philly
# Add the root folder of the code to the PYTHONPATH
export PYTHONPATH=$PYTHONPATH:$CONFIG_DIR
# Run the actual job
python $CONFIG_DIR/examples/AnytimeNetwork/resnet-ann.py \
--data_dir=$DATA_DIR \
--log_dir=$LOG_DIR \
--model_dir=$MODEL_DIR \
--ds_name=cifar10 \
-f=9 \
--opt_at=-1 \
-n=9 \
-c=16 \
-s=1 \
--samloss=6 \
--batch_size=128 \
--exp_gamma=0.3 --sum_rand_ratio=2 --last_reward_rate=0.8 \
|
#!/bin/sh
# Generate the file NoC.vhdl accoring to the network size
PORTTYPE_VERTICAL="P_PORT_VERTICAL"
PORTTYPE_VERTICAL_BACK="P_PORT_VERTICAL_BACK"
PORTTYPE_HORIZONTAL="P_PORT_HORIZONTAL"
PORTTYPE_HORIZONTAL_BACK="P_PORT_HORIZONTAL_BACK"
PORTTYPE_LOCAL="P_PORT_LOCAL"
ADDRESSTYPE="Address"
NODETYPE="NOCUNIT"
CLOCK="CLOCK_50"
RESET="RST_N"
# $1 value
# $2 len
print_bin ()
{
n="$1"
bit=""
len="$2"
while [ "$len" -gt 0 ]
do
bit="$(( n&1 ))$bit";
: $(( n >>= 1 ))
: $(( len -= 1 ))
done
printf "$bit"
}
if [ $# -ne 2 ]
then
echo "Usage: $0 <width> <height>"
exit 1
fi
# ---------------------------------
# single core
# ---------------------------------
if [ $1 -eq 1 -a $2 -eq 1 ]
then
cat <<EOL
library ieee;
use ieee.std_logic_1164.all;
use work.constants.all;
use work.LibNode.all;
use work.libproc.all;
entity NoC is
port(LEDR : out std_logic_vector(63 downto 0));
end;
architecture STRUCTURE of NoC is
component sim_clock
port(
rst_n : out std_logic;
clk : out std_logic
);
end component;
signal CLOCK_50 : std_logic;
signal RST_N : std_logic;
signal BufferToProcessor : P_PORT_BUFFER;
signal ProcessorToBuffer : P_PORT_BUFFER;
signal ProcessorRequest : REQUEST_PORT;
signal SendBufferFull : std_logic;
begin
CLOCK : sim_clock port map(
rst_n => RST_N,
clk => CLOCK_50
);
BufferToProcessor.Data <= (others => '0');
BufferToProcessor.Address <= (others => '0');
BufferToProcessor.DataAvailable <= '0';
SendBufferFull <= '0';
LEDR(0) <= ProcessorToBuffer.DataAvailable;
LEDR(63 downto 1) <= ProcessorToBuffer.Data(63 downto 1);
cpu_inst : cpu_top
generic map ( id => 0, count => 1, nocdim => x"0001000100010001" )
port map (
clk => CLOCK_50,
rst_n => RST_N,
nbi => BufferToProcessor,
nbo => ProcessorToBuffer,
nbr => ProcessorRequest,
nbsf => SendBufferFull
);
end;
EOL
exit
fi
XMAX=$(( $1 - 1))
YMAX=$(( $2 - 1))
# How many bits are required? log_2($YMAX)
XBITS=1
max=2
while [ "$1" -gt "$max" ]
do
max=$(( $max * 2 ))
XBITS=$(( $XBITS + 1 ))
done
YBITS=1
max=2
while [ "$2" -gt "$max" ]
do
max=$(( $max * 2 ))
YBITS=$(( $YBITS + 1 ))
done
cat <<EOL
library IEEE;
use IEEE.STD_LOGIC_1164.all;
use WORK.CONSTANTS.all;
use WORK.LIBNODE.all;
entity NoC is
port(--RST_N : in std_logic;
--CLOCK_50 : in std_logic;
LEDR : out std_logic_vector(64 downto 0));
end;
architecture STRUCTURE of NoC is
component NOCUNIT
generic(id : integer; count : integer; nocdim : std_logic_vector(63 downto 0));
port(Clk : in std_logic;
rst_n : in std_logic;
LED : out std_logic;
NorthIn : in P_PORT_VERTICAL_BACK;
NorthOut : out P_PORT_VERTICAL;
SouthIn : in P_PORT_VERTICAL;
SouthOut : out P_PORT_VERTICAL_BACK;
EastIn : in P_PORT_HORIZONTAL_BACK;
EastOut : out P_PORT_HORIZONTAL;
WestIn : in P_PORT_HORIZONTAL;
WestOut : out P_PORT_HORIZONTAL_BACK;
CoreAddress : in Address
);
end component;
EOL
for y in $(seq 0 $YMAX)
do
for x in $(seq 0 $XMAX)
do
printf "\n-------------------------------------------------"
printf "\n--SIGNALS for NODE "
print_bin $y $YBITS
print_bin $x $XBITS
printf "\n\n"
printf "SIGNAL N_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "EAST_OUT\t\t\t:%s;\n" $PORTTYPE_HORIZONTAL
printf "SIGNAL N_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "NORTH_OUT\t\t\t:%s;\n" $PORTTYPE_VERTICAL
printf "SIGNAL N_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "WEST_OUT\t\t\t:%s;\n" $PORTTYPE_HORIZONTAL_BACK
printf "SIGNAL N_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "SOUTH_OUT\t\t\t:%s;\n" $PORTTYPE_VERTICAL_BACK
printf "SIGNAL N_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "CORE_ADDRESS\t\t\t:%s;\n" $ADDRESSTYPE
done
done
cat <<EOL
-- added
component sim_clock
port(
rst_n : out std_logic;
clk : out std_logic
);
end component;
SIGNAL CLOCK_50 : std_logic;
SIGNAL RST_N : std_logic;
--
begin
-- added
CLOCK : sim_clock
port map(
rst_n => RST_N,
clk => CLOCK_50
);
--
EOL
YBASE=`echo 2 ^ $XBITS | bc`
NNODES=$(( $1 * $2 ))
for y in $(seq 0 $YMAX)
do
for x in $(seq 0 $XMAX)
do
ID=$(( $y * $YBASE + $x ))
printf "\t$NODETYPE"
print_bin $y $YBITS
print_bin $x $XBITS
printf " : $NODETYPE\n"
printf "generic map ( id => $ID, count => $NNODES, nocdim => x\"00010001%04x%04x\" )\n" $2 $1
printf " port map( "
printf "\n\t\t\t\t$CLOCK,\n\t\t\t\t$RESET,\n"
printf "\t\t\t\tLEDR($ID),\n"
printf "\t\t\t\tN_"
print_bin $(( ($y+$YMAX) % ($YMAX+1) )) $YBITS
print_bin $x $XBITS
printf "SOUTH_OUT,\n"
printf "\t\t\t\tN_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "NORTH_OUT,\n"
printf "\t\t\t\tN_"
print_bin $(( ($y+1) % ($YMAX+1) )) $YBITS
print_bin $x $XBITS
printf "NORTH_OUT,\n"
printf "\t\t\t\tN_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "SOUTH_OUT,\n"
printf "\t\t\t\tN_"
print_bin $y $YBITS
print_bin $(( ($x+1) % ($XMAX+1) )) $XBITS
printf "WEST_OUT,\n"
printf "\t\t\t\tN_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "EAST_OUT,\n"
printf "\t\t\t\tN_"
print_bin $y $YBITS
print_bin $(( ($x+$XMAX) % ($XMAX+1) )) $XBITS
printf "EAST_OUT,\n"
printf "\t\t\t\tN_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "WEST_OUT,\n"
printf "\t\t\t\tN_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "CORE_ADDRESS);\n"
done
done
printf "\n\n\n\n"
printf "\n\n\n\n"
for y in $(seq 0 $YMAX)
do
for x in $(seq 0 $XMAX)
do
printf "\tN_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "CORE_ADDRESS.X <= \""
print_bin $x $XBITS
printf "\";\n\tN_"
print_bin $y $YBITS
print_bin $x $XBITS
printf "CORE_ADDRESS.Y <= \""
print_bin $y $YBITS
printf "\";\n"
done
done
printf "end; \n\n"
|
nasm -f bin -o bootloader.bin bootloader.asm
dd status=noxfer conv=notrunc if=bootloader.bin of=bootloader.flp
mkdir iso
mkisofs -o bootloader.iso -b bootloader.flp iso/
|
#!/usr/bin/env bash
cd tests
if [ "${TRAVIS:-false}" = "false" ]; then
pip install virtualenv | grep -v 'Requirement already satisfied'
virtualenv docker-selenium-tests
source docker-selenium-tests/bin/activate
fi
python -m pip install selenium===3.14.1 \
docker===3.5.0 \
| grep -v 'Requirement already satisfied'
python test.py $1 $2
ret_code=$?
if [ "${TRAVIS:-false}" = "false" ]; then
deactivate
fi
exit $ret_code
|
cd blog
./update.sh
cd ..
git add --all
git commit -m "$(date +%s)"
git push
|
#!/bin/bash
# Fail hard on either errors or unset variables
set -e -u
# Syntax help if no parameters were given
if (( $# < 1 )); then
echo "Syntax: echo <password> | $0 ssh-copy-id -i <id-file> user@remote"
exit 1
fi
# Provide response if $0 was called from ssh via SSH_ASKPASS
if [ -n "$MySshPass" ] && [ -n "$SSH_ASKPASS" ]; then
echo "$MySshPass"
exit 0
fi
# Read the password from the pipe (stdin) if it wasn't set externally
if [ -z "$MySshPass" ]; then
read -s MySshPass
export $MySshPass
fi
export SSH_ASKPASS="$0"
[ -n $DISPLAY ] || export DISPLAY=dummy:0
# Call the ssh script in a new session
setsid "$@"
# Never let a password linger in the environment
unset MySshPass
unset SSH_ASKPASS
|
#!/bin/bash
curl https://api.kucoin.com/api/v1/symbols | jq -r '.data[].symbol' | sed 's/-//' | grep USDT | sed 's/^/kucoin:/'
|
#!/usr/bin/env bash
set -e
kubectl apply -f k8s/demo-app.yaml
|
dirs=($(ls -d /home/andres/dotFiles/backup/wallpapers/Pc/*/))
if ((${#dirs[@]} > $1))
then
nitrogen --random --set-zoom-fill ${dirs[$1]}
else
nitrogen --random --set-zoom-fill /home/andres/dotFiles/backup/wallpapers/Pc/
fi |
sudo cp runSqoop.sh /home/hdfs/runSqoop.sh
sudo cp hiveCreate.sh /home/hdfs/hiveCreate.sh
sudo cp yieldIngestWorkflow.xml /home/hdfs/yieldIngestWorkflow.xml
sudo cp yieldIngestProperties.properties /home/hdfs/yieldIngestProperties.properties
sudo cp yieldIngestWorkflow-Long-0.xml /home/hdfs/yieldIngestWorkflow-Long-0.xml
sudo cp yieldIngestWorkflow-Short-0.xml /home/hdfs/yieldIngestWorkflow-Short-0.xml
sudo cp yieldIngestWorkflow-Short-1.xml /home/hdfs/yieldIngestWorkflow-Short-1.xml
sudo cp yieldIngestWorkflow-Short-2.xml /home/hdfs/yieldIngestWorkflow-Short-2.xml
|
dffml accuracy \
-model spacyner \
-sources s=op \
-source-opimp model.spacy.dffml_model_spacy.ner.utils:parser \
-source-args train.json False \
-model-model_name_or_path en_core_web_sm \
-model-location temp \
-model-n_iter 5 \
-scorer sner \
-log debug |
psql -U postgres -d tododb << "EOSQL"
create table todo (ID integer PRIMARY KEY, Title varchar(10) NOT NULL, Content varchar(100) , Status varchar(1));
insert into todo (ID, Title, Content, Status) values (1, 'todo1', 'sample-todo', '0');
insert into todo (ID, Title, Content, Status) values (2, 'todo2', 'sample-todo', '0');
insert into todo (ID, Title, Content, Status) values (3, 'todo3', 'sample-todo', '0');
EOSQL
|
#!/bin/bash
if [ $(id -u) -ne 0 ]; then
echo "ERROR: permission denied"
exit 1
fi
usermod -L -e 1 $1
rocks sync users
|
#!/bin/bash
set -ex
targetdir="../velrok.github.io"
lein gen
cp -r public/* $targetdir
cd $targetdir
git add .
git commit -m 'publish'
git push
echo DONE
|
#!/usr/bin/env bash
set -ex
# shellcheck disable=SC1090
# shellcheck disable=SC1091
source "$(dirname "$0")"/../automation_utils.sh
RESULT_FILE="$1"
# Runs offline stake operations tests against a running cluster launched from the automation framework
bootstrapper_ip_address="$(get_bootstrap_validator_ip_address)"
entrypoint=http://"${bootstrapper_ip_address}":8899
PATH="$REPO_ROOT"/panoptes-release/bin:$PATH "$REPO_ROOT"/system-test/stake-operations-testcases/offline_stake_operations.sh "$entrypoint"
echo "Offline Stake Operations Test Succeeded" >>"$RESULT_FILE"
|
#!/bin/bash
#stoping the Gluu Server
#zf170822.1710
sudo service gluu-server-3.0.2 stop
|
#!/usr/bin/env bash
cd ServerRoot
node GameServer.js &
cd ../GameClient
./GameClient_start.sh |
#!/bin/bash
# (C) Sergey Tyurin 2021-02-22 15:00:00
# Disclaimer
##################################################################################################################
# You running this script/function means you will not blame the author(s)
# if this breaks your stuff. This script/function is provided AS IS without warranty of any kind.
# Author(s) disclaim all implied warranties including, without limitation,
# any implied warranties of merchantability or of fitness for a particular purpose.
# The entire risk arising out of the use or performance of the sample scripts and documentation remains with you.
# In no event shall author(s) be held liable for any damages whatsoever
# (including, without limitation, damages for loss of business profits, business interruption,
# loss of business information, or other pecuniary loss) arising out of the use of or inability
# to use the script or documentation. Neither this script/function,
# nor any part of it other than those parts that are explicitly copied from others,
# may be republished without author(s) express written permission.
# Author(s) retain the right to alter this disclaimer at any time.
##################################################################################################################
SCRPT_USER=$USER
USER_HOME=$HOME
[[ -z "$SCRPT_USER" ]] && SCRPT_USER=$LOGNAME
[[ -n $(echo "$USER_HOME"|grep 'root') ]] && SCRPT_USER="root"
DELAY_TIME=0 # Delay time from the start of elections
TIME_SHIFT=300 # Time between sequential scripts
echo
echo "############################## Set crontab for next elections ##################################"
echo "INFO: $(basename "$0") BEGIN $(date +%s) / $(date +'%F %T %Z')"
# ===================================================
SCRIPT_DIR=`cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P`
source "${SCRIPT_DIR}/env.sh"
source "${SCRIPT_DIR}/functions.shinc"
#=================================================
echo "INFO from env: Network: $NETWORK_TYPE; Node: $NODE_TYPE; Elector: $ELECTOR_TYPE; Staking mode: $STAKE_MODE"
echo
echo -e "$(Determine_Current_Network)"
echo
[[ ! -d $ELECTIONS_WORK_DIR ]] && mkdir -p $ELECTIONS_WORK_DIR
[[ ! -d $ELECTIONS_HISTORY_DIR ]] && mkdir -p $ELECTIONS_HISTORY_DIR
# ===================================================
function GET_M_H() {
OS_SYSTEM=`uname -s`
ival="${1}"
if [[ "$OS_SYSTEM" == "Linux" ]];then
echo "$(date +'%M %H' -d @$ival)"
else
echo "$(date -r $ival +'%M %H')"
fi
}
#######################################################################################################
#===================================================
# Get current electoin cycle info
elector_addr=$(Get_Elector_Address)
echo "INFO: Elector Address: $elector_addr"
election_id=$(Get_Current_Elections_ID)
echo "INFO: Current Election ID: $election_id"
case "$NODE_TYPE" in
RUST)
ELECT_TIME_PAR=$($CALL_TC getconfig 15 2>&1 |sed -e '1,4d'|sed "s/Config p15: //")
LIST_PREV_VALS=$($CALL_TC getconfig 32 2>&1 |sed -e '1,4d'|sed "s/Config p32: //")
LIST_CURR_VALS=$($CALL_TC getconfig 34 2>&1 |sed -e '1,4d'|sed "s/Config p34: //")
LIST_NEXT_VALS=$($CALL_TC getconfig 36 2>&1 |sed -e '1,4d'|sed "s/Config p36: //")
declare -i CURR_VAL_UNTIL=`echo "${LIST_CURR_VALS}" | jq '.utime_until'| head -n 1` # utime_until
if [[ "$election_id" == "0" ]];then
CURR_VAL_UNTIL=`echo "${LIST_PREV_VALS}" | jq '.utime_until'| head -n 1` # utime_until
if [[ "$(echo "${LIST_NEXT_VALS}"|head -n 1)" != "null" ]];then
CURR_VAL_UNTIL=`echo "${LIST_NEXT_VALS}" | jq '.utime_since'| head -n 1` # utime_since
fi
fi
declare -i VAL_DUR=`echo "${ELECT_TIME_PAR}" | jq '.validators_elected_for'| head -n 1` # validators_elected_for
declare -i STRT_BEFORE=`echo "${ELECT_TIME_PAR}" | jq '.elections_start_before'| head -n 1` # elections_start_before
declare -i EEND_BEFORE=`echo "${ELECT_TIME_PAR}" | jq '.elections_end_before'| head -n 1` # elections_end_before
;;
CPP)
ELECT_TIME_PAR=`$CALL_LC -rc "getconfig 15" -t "3" -rc "quit" 2>/dev/null`
LIST_PREV_VALS=`$CALL_LC -rc "getconfig 32" -t "3" -rc "quit" 2>/dev/null`
LIST_CURR_VALS=`$CALL_LC -rc "getconfig 34" -t "3" -rc "quit" 2>/dev/null`
LIST_NEXT_VALS=`$CALL_LC -rc "getconfig 36" -t "3" -rc "quit" 2>/dev/null`
declare -i CURR_VAL_UNTIL=`echo "${LIST_CURR_VALS}" | grep -i "cur_validators" | awk -F ":" '{print $4}'|awk '{print $1}'` # utime_until
NEXT_VAL_EXIST=`echo "${LIST_NEXT_VALS}"| grep -i "ConfigParam(36)" | grep -i 'null'` # Config p36: null
if [[ "$election_id" == "0" ]];then
CURR_VAL_UNTIL=`echo "${LIST_PREV_VALS}" | grep -i "prev_validators" | awk -F ":" '{print $4}'|awk '{print $1}'` # utime_until
if [[ -z "$NEXT_VAL_EXIST" ]];then
CURR_VAL_UNTIL=`echo "${LIST_NEXT_VALS}" | grep -i "next_validators" | awk -F ":" '{print $3}'|awk '{print $1}'` # utime_until
fi
fi
declare -i VAL_DUR=`echo "${ELECT_TIME_PAR}" | grep -i "ConfigParam(15)" | awk -F ":" '{print $2}' |awk '{print $1}'` # validators_elected_for
declare -i STRT_BEFORE=`echo "${ELECT_TIME_PAR}" | grep -i "ConfigParam(15)" | awk -F ":" '{print $3}' |awk '{print $1}'` # elections_start_before
declare -i EEND_BEFORE=`echo "${ELECT_TIME_PAR}" | grep -i "ConfigParam(15)" | awk -F ":" '{print $4}' |awk '{print $1}'` # elections_end_before
;;
*)
echo "###-ERROR(line $LINENO): Unknown node type! Set NODE_TYPE= to 'RUST' or CPP' in env.sh"
exit 1
;;
esac
#===================================================
#
PREV_ELECTION_TIME=$((CURR_VAL_UNTIL - STRT_BEFORE + TIME_SHIFT + DELAY_TIME))
PREV_ELECTION_SECOND_TIME=$(($PREV_ELECTION_TIME + $TIME_SHIFT))
PREV_ADNL_TIME=$(($PREV_ELECTION_SECOND_TIME + $TIME_SHIFT))
PREV_BAL_TIME=$(($PREV_ADNL_TIME + $TIME_SHIFT))
PREV_CHG_TIME=$(($PREV_BAL_TIME + $TIME_SHIFT))
PRV_ELECT_1=$(GET_M_H "$PREV_ELECTION_TIME")
PRV_ELECT_2=$(GET_M_H "$PREV_ELECTION_SECOND_TIME")
PRV_ELECT_3=$(GET_M_H "$PREV_ADNL_TIME")
PRV_ELECT_4=$(GET_M_H "$PREV_BAL_TIME")
PRV_ELECT_5=$(GET_M_H "$PREV_CHG_TIME")
#===================================================
#
NEXT_ELECTION_TIME=$((CURR_VAL_UNTIL + VAL_DUR - STRT_BEFORE + $TIME_SHIFT + DELAY_TIME))
NEXT_ELECTION_SECOND_TIME=$(($NEXT_ELECTION_TIME + $TIME_SHIFT))
NEXT_ADNL_TIME=$(($NEXT_ELECTION_SECOND_TIME + $TIME_SHIFT))
NEXT_BAL_TIME=$(($NEXT_ADNL_TIME + $TIME_SHIFT))
NEXT_CHG_TIME=$(($NEXT_BAL_TIME + $TIME_SHIFT))
NXT_ELECT_1=$(GET_M_H "$NEXT_ELECTION_TIME")
NXT_ELECT_2=$(GET_M_H "$NEXT_ELECTION_SECOND_TIME")
NXT_ELECT_3=$(GET_M_H "$NEXT_ADNL_TIME")
NXT_ELECT_4=$(GET_M_H "$NEXT_BAL_TIME")
NXT_ELECT_5=$(GET_M_H "$NEXT_CHG_TIME")
GET_PART_LIST_TIME=$((election_id - EEND_BEFORE))
GPL_TIME_MH=$(GET_M_H "$GET_PART_LIST_TIME")
#===================================================
CURRENT_CHG_TIME=`crontab -l |tail -n 1 | awk '{print $1 " " $2}'`
GET_F_T(){
OS_SYSTEM=`uname`
ival="${1}"
if [[ "$OS_SYSTEM" == "Linux" ]];then
echo "$(date +'%Y-%m-%d %H:%M:%S' -d @$ival)"
else
echo "$(date -r $ival +'%Y-%m-%d %H:%M:%S')"
fi
}
Curr_Elect_Time=$((CURR_VAL_UNTIL - STRT_BEFORE))
Next_Elect_Time=$((CURR_VAL_UNTIL + VAL_DUR - STRT_BEFORE))
echo
echo "Current elections time start: $Curr_Elect_Time / $(GET_F_T "$Curr_Elect_Time")"
echo "Next elections time start: $Next_Elect_Time / $(GET_F_T "$Next_Elect_Time")"
echo "-------------------------------------------------------------------"
# if [[ ! -z $NEXT_VAL__EXIST ]] && [[ "$election_id" == "0" ]];then
# NXT_ELECT_1=$PRV_ELECT_1
# NXT_ELECT_2=$PRV_ELECT_2
# NXT_ELECT_3=$PRV_ELECT_3
# NXT_ELECT_4=$PRV_ELECT_4
# fi
# sudo crontab -u $SCRPT_USER -r
OS_SYSTEM=`uname -s`
FB_CT_HEADER=""
if [[ "$OS_SYSTEM" == "FreeBSD" ]];then
CRONT_JOBS=$(cat <<-_ENDCRN_
SHELL=/bin/bash
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:$USER_HOME/bin
HOME=$USER_HOME
$NXT_ELECT_1 * * * cd ${SCRIPT_DIR} && ./prepare_elections.sh >> ${TON_LOG_DIR}/validator.log
$NXT_ELECT_2 * * * cd ${SCRIPT_DIR} && ./take_part_in_elections.sh >> ${TON_LOG_DIR}/validator.log
$NXT_ELECT_3 * * * cd ${SCRIPT_DIR} && ./next_elect_set_time.sh >> ${TON_LOG_DIR}/validator.log && ./part_check.sh >> ${TON_LOG_DIR}/validator.log
# $GPL_TIME_MH * * * cd ${SCRIPT_DIR} && ./get_participant_list.sh > ${ELECTIONS_HISTORY_DIR}/${election_id}_parts.lst && chmod 444 ${ELECTIONS_HISTORY_DIR}/${election_id}_parts.lst
_ENDCRN_
)
else
CRONT_JOBS=$(cat <<-_ENDCRN_
$NXT_ELECT_1 * * * script --return --quiet --append --command "cd ${SCRIPT_DIR} && ./prepare_elections.sh >> ${TON_LOG_DIR}/validator.log"
$NXT_ELECT_2 * * * script --return --quiet --append --command "cd ${SCRIPT_DIR} && ./take_part_in_elections.sh >> ${TON_LOG_DIR}/validator.log"
$NXT_ELECT_3 * * * script --return --quiet --append --command "cd ${SCRIPT_DIR} && ./next_elect_set_time.sh >> ${TON_LOG_DIR}/validator.log && ./part_check.sh >> ${TON_LOG_DIR}/validator.log"
# $GPL_TIME_MH * * * script --return --quiet --append --command "cd ${SCRIPT_DIR} && ./get_participant_list.sh > ${ELECTIONS_HISTORY_DIR}/${election_id}_parts.lst && chmod 444 ${ELECTIONS_HISTORY_DIR}/${election_id}_parts.lst"
_ENDCRN_
)
fi
[[ "$1" == "show" ]] && echo "$CRONT_JOBS"&& exit 0
echo "$CRONT_JOBS" | sudo crontab -u $SCRPT_USER -
sudo crontab -l -u $SCRPT_USER | tail -n 8
echo "+++INFO: $(basename "$0") FINISHED $(date +%s) / $(date)"
echo "================================================================================================"
exit 0
|
#!/bin/bash
# -----------------------------------------------------------------------------
# Trivadis - Part of Accenture, Platform Factory - Transactional Data Platform
# Saegereistrasse 29, 8152 Glattbrugg, Switzerland
# -----------------------------------------------------------------------------
# Name.......: config_krb_cmu.sh
# Author.....: Stefan Oehrli (oes) [email protected]
# Editor.....: Stefan Oehrli
# Date.......: 2021.09.28
# Revision...:
# Purpose....: Script to configure Kerberos and Centrally Managed Users
# Notes......: --
# Reference..: --
# License....: Apache License Version 2.0, January 2004 as shown
# at http://www.apache.org/licenses/
# -----------------------------------------------------------------------------
# - Customization -------------------------------------------------------------
ORA_WALLET_PASSWORD=${ORA_WALLET_PASSWORD:-""}
ORA_KRB5_KEYTAB_FILE=${ORA_KRB5_KEYTAB_FILE:-"$(hostname).standard.six-group.net.keytab"}
ORA_CMU_USER=${ORA_CMU_USER:-"A"}
ORA_CMU_USER_DN=${ORA_CMU_USER_DN:-"b"}
ORA_CMU_PASSWORD=${ORA_CMU_PASSWORD:-"c"}
ORA_CMU_ROOT_CERT=${ORA_CMU_ROOT_CERT:-"$TNS_ADMIN/CARootCert.pam"}
# - End of Customization ------------------------------------------------------
# - Default Values ------------------------------------------------------------
# source genric environment variables and functions
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
SCRIPT_BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SCRIPT_BASE=$(dirname ${SCRIPT_BIN_DIR})
DATE_STAMP=$(date '+%Y%m%d')
# define logfile and logging
export LOG_BASE=${LOG_BASE:-"${SCRIPT_BIN_DIR}"} # Use script directory as default logbase
TIMESTAMP=$(date "+%Y.%m.%d_%H%M%S")
readonly LOGFILE="$LOG_BASE/$(basename $SCRIPT_NAME .sh)_$TIMESTAMP.log"
# - EOF Default Values --------------------------------------------------------
# - Functions ---------------------------------------------------------------
function command_exists () {
# Purpose....: check if a command exists.
# ---------------------------------------------------------------------------
command -v $1 >/dev/null 2>&1;
}
function gen_password {
# Purpose....: generate a password string
# -----------------------------------------------------------------------
Length=${1:-16}
# make sure, that the password length is not shorter than 4 characters
if [ ${Length} -lt 4 ]; then
Length=4
fi
# generate password
if [ $(command -v pwgen) ]; then
pwgen -s -1 ${Length}
else
while true; do
# use urandom to generate a random string
s=$(cat /dev/urandom | tr -dc "A-Za-z0-9" | fold -w ${Length} | head -n 1)
# check if the password meet the requirements
if [[ ${#s} -ge ${Length} && "$s" == *[A-Z]* && "$s" == *[a-z]* && "$s" == *[0-9]* ]]; then
echo "$s"
break
fi
done
fi
}
# - EOF Functions -----------------------------------------------------------
# - Initialization ------------------------------------------------------------
# Define a bunch of bash option see
# https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html
set -o nounset # stop script after 1st cmd failed
set -o errexit # exit when 1st unset variable found
set -o pipefail # pipefail exit after 1st piped commands failed
# initialize logfile
touch $LOGFILE 2>/dev/null
exec &> >(tee -a "$LOGFILE") # Open standard out at `$LOG_FILE` for write.
exec 2>&1
echo "INFO: Start ${SCRIPT_NAME} on host $(hostname) at $(date)"
# load config
if [ -f "$SCRIPT_BIN_DIR/$SCRIPT_NAME.conf" ]; then
echo "INFO: source config file"
. $SCRIPT_BIN_DIR/$SCRIPT_NAME.conf
fi
echo "INFO: Current settings ---------------------------------------------------"
echo "SCRIPT_NAME : $SCRIPT_NAME"
echo "SCRIPT_BIN_DIR : $SCRIPT_BIN_DIR"
echo "SCRIPT_BASE : $SCRIPT_BASE"
echo "ORA_WALLET_PASSWORD : $ORA_WALLET_PASSWORD"
echo "ORA_KRB5_KEYTAB_FILE : $ORA_KRB5_KEYTAB_FILE"
echo "ORA_CMU_USER : $ORA_CMU_USER"
echo "ORA_CMU_USER_DN : $ORA_CMU_USER_DN"
echo "ORA_CMU_PASSWORD : <ORA_CMU_PASSWORD>"
echo "ORA_CMU_ROOT_CERT : $ORA_CMU_ROOT_CERT"
# if exist make a copy of the existing krb5.conf file
if [ -f "$TNS_ADMIN/krb5.keytab" ]; then
echo "INFO: save existing krb5.keytab file as krb5.keytab.${DATE_STAMP}"
cp $TNS_ADMIN/krb5.keytab $TNS_ADMIN/krb5.keytab.${DATE_STAMP}
ln -fs $TNS_ADMIN/$(hostname).standard.six-group.net.keytab $TNS_ADMIN/krb5.keytab
fi
# if exist make a copy of the existing krb5.conf file
if [ -f "$TNS_ADMIN/krb5.conf" ]; then
echo "INFO: save existing krb5.conf file as krb5.conf.${DATE_STAMP}"
cp $TNS_ADMIN/krb5.conf $TNS_ADMIN/krb5.conf.${DATE_STAMP}
fi
# create a new krb5.conf
echo "INFO: create new krb5.conf file"
cat << EOF >$TNS_ADMIN/krb5.conf
# ---------------------------------------------------------------------
# Trivadis AG, Platform Factory - Transactional Data Platform
# Saegereistrasse 29, 8152 Glattbrugg, Switzerland
# ---------------------------------------------------------------------
# Name.......: krb5.conf
# Author.....: Stefan Oehrli (oes) [email protected]
# Editor.....: Stefan Oehrli
# Date.......: 2021.09.28
# Revision...: --
# Purpose....: Oracle Kerberos Configuration File
# Notes......: --
# Reference..: Oracle Database Security 19c
# ---------------------------------------------------------------------
# Modified...:
# YYYY.MM.DD Visa Change
# ----------- ----- ---------------------------------------------------
# 2021.09.28 soe Initial version
# ---------------------------------------------------------------------
[libdefaults]
forwardable = true
default_realm = BASE.DOM
[realms]
BASE.DOM = {
kdc = base.dom
}
[domain_realm]
.standard.six-group.net = BASE.DOM
standard.six-group.net = BASE.DOM
.BASE.DOM = BASE.DOM
BASE.DOM = BASE.DOM
.base.dom = BASE.DOM
base.dom = BASE.DOM
.six-group.com = BASE.DOM
six-group.com = BASE.DOM
# ---------------------------------------------------------------------
EOF
# Update sqlnet.ora
if [ -f "$TNS_ADMIN/sqlnet.ora" ]; then
echo "INFO: save existing sqlnet.ora file as sqlnet.ora.${DATE_STAMP}"
cp $TNS_ADMIN/sqlnet.ora $TNS_ADMIN/sqlnet.ora.${DATE_STAMP}
echo "INFO: remove KRB5 config in sqlnet.ora file"
sed -i '/AUTHENTICATION/d' $TNS_ADMIN/sqlnet.ora
sed -i '/KERBEROS5/d' $TNS_ADMIN/sqlnet.ora
echo "INFO: update sqlnet.ora file"
cat << EOF >>$TNS_ADMIN/sqlnet.ora
# ---------------------------------------------------------------------
# Kerberos settings
# ---------------------------------------------------------------------
SQLNET.AUTHENTICATION_SERVICES=(BEQ,KERBEROS5PRE,KERBEROS5)
SQLNET.AUTHENTICATION_KERBEROS5_SERVICE = oracle
SQLNET.FALLBACK_AUTHENTICATION = TRUE
SQLNET.KERBEROS5_KEYTAB = $TNS_ADMIN/krb5.keytab
SQLNET.KERBEROS5_CONF = $TNS_ADMIN/krb5.conf
SQLNET.KERBEROS5_CONF_MIT=TRUE
# - EOF ---------------------------------------------------------------
EOF
else
echo "ERR : Could not find an sqlnet.ora ($TNS_ADMIN/sqlnet.ora)"
echo "ERR : Please create manually an sqlnet.ora"
exit 1
fi
# create CMU config folder
echo "INFO: create CMU configuration folder $TNS_ADMIN/cmu"
mkdir -p $TNS_ADMIN/cmu
# if exist make a copy of the existing dsi.ora file
if [ -f "$TNS_ADMIN/cmu/dsi.ora" ]; then
echo "INFO: save existing dsi.ora file as dsi.ora.${DATE_STAMP}"
cp $TNS_ADMIN/cmu/dsi.ora $TNS_ADMIN/cmu/dsi.ora.${DATE_STAMP}
fi
# create a new dsi.ora file
echo "INFO: create new dsi.ora file"
cat << EOF >$TNS_ADMIN/cmu/dsi.ora
# ---------------------------------------------------------------------
# Trivadis AG, Platform Factory - Transactional Data Platform
# Saegereistrasse 29, 8152 Glattbrugg, Switzerland
# ---------------------------------------------------------------------
# Name.......: dsi.ora
# Author.....: Stefan Oehrli (oes) [email protected]
# Editor.....: Stefan Oehrli
# Date.......: 2021.09.28
# Revision...: --
# Purpose....: Oracle Centrally Managed Users Configuration File
# Notes......: --
# Reference..: Oracle Database Security 19c
# ---------------------------------------------------------------------
# Modified...:
# YYYY.MM.DD Visa Change
# ----------- ----- ---------------------------------------------------
# 2021.09.28 soe Initial version
# ---------------------------------------------------------------------
DSI_DIRECTORY_SERVERS = (base.dom::636)
DSI_DEFAULT_ADMIN_CONTEXT = "DC=base,DC=dom"
DSI_DIRECTORY_SERVER_TYPE = AD
# ---------------------------------------------------------------------
EOF
# create Oracle CMU Wallet password
if [ -z ${ORA_WALLET_PASSWORD} ]; then
# Auto generate a wallet password
echo "INFO: auto generate new Oracle CMU Wallet password..."
ORA_WALLET_PASSWORD=$(gen_password 16)
echo $ORA_WALLET_PASSWORD>$TNS_ADMIN/cmu/wallet_pwd_${DATE_STAMP}.txt
fi
# create Oracle Wallet
orapki wallet create -wallet $TNS_ADMIN/cmu -pwd $ORA_WALLET_PASSWORD -auto_login
if [ -n "$ORA_CMU_USER" ]; then
echo "INFO: add username $ORA_CMU_USER to the Oracle CMU Wallet"
echo $ORA_WALLET_PASSWORD|mkstore -wrl $TNS_ADMIN/cmu -createEntry ORACLE.SECURITY.USERNAME $ORA_CMU_USER
else
echo "WARN: can not add username to the Oracle CMU Wallet"
fi
if [ -n "$ORA_CMU_USER_DN" ]; then
echo "INFO: add username DN $ORA_CMU_USER_DN to the Oracle CMU Wallet"
echo $ORA_WALLET_PASSWORD|mkstore -wrl $TNS_ADMIN/cmu -createEntry ORACLE.SECURITY.DN $ORA_CMU_USER_DN
else
echo "WARN: can not add username DN to the Oracle CMU Wallet"
fi
if [ -n "$ORA_CMU_PASSWORD" ]; then
echo "INFO: add password to the Oracle CMU Wallet"
echo $ORA_WALLET_PASSWORD|mkstore -wrl $TNS_ADMIN/cmu -createEntry ORACLE.SECURITY.PASSWORD $ORA_CMU_PASSWORD
else
echo "WARN: can not add password to the Oracle CMU Wallet"
fi
if [ -f "$ORA_CMU_ROOT_CERT" ]; then
echo "INFO: add root certificate $TNS_ADMIN/CARootCert.pam to the Oracle CMU Wallet"
orapki wallet add -wallet $TNS_ADMIN/cmu -pwd $ORA_WALLET_PASSWORD -trusted_cert -cert $TNS_ADMIN/CARootCert.pam
else
echo "WARN: can not root certificate to the Oracle CMU Wallet"
fi
echo "INFO: Wallet Information"
echo $ORA_WALLET_PASSWORD|mkstore -wrl $TNS_ADMIN/cmu
orapki wallet display -wallet $TNS_ADMIN/cmu -pwd $ORA_WALLET_PASSWORD
# print information
echo "INFO: CMU and Kerberos OS configuration finished."
echo " it is recommended to restart the listener and databases"
echo " to make sure new SQLNet configuration is used."
echo ""
echo "INFO: Finish ${SCRIPT_NAME} on host $(hostname) at $(date)"
# --- EOF ---------------------------------------------------------------------
|
export FLASK_DEBUG=1
export FLASK_ENV="development"
export FLASK_APP="flasky.py"
flask run --host="0.0.0.0" --port="5002" |
#!/bin/bash
for item in $1*
do
if [[ -f $item ]]
then echo "[F] $item"
elif [[ -d $item ]]
then echo "[D] $item"
fi
done |
#/usr/bin/env bash
gem build cocoapods-rome2.gemspec
sudo gem uninstall cocoapods-rome2 -a -x
sudo gem install cocoapods-rome2-2.0.1.gem -n /usr/local/bin
rm cocoapods-rome2-2.0.1.gem |
#!/bin/bash
if test "$(grep "geodb_enabled: true" mika.yaml)"; then
echo "Geo database: Enabled"
if ! test -f "geo_data/IP2LOCATION-LITE-ASN.CSV"; then
echo "Updating geo database (Fresh)"
./mika updategeo
fi
if test "$(find "geo_data/IP2LOCATION-LITE-ASN.CSV" -mmin +10080)"; then
echo "Updating geo database (Old)"
./mika updategeo
fi
else
echo "Geo database: Disabled"
fi
echo "Starting mika..."
exec "$@" |
#!/bin/bash
current_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
template="$current_dir/kms-dependency-role.template"
stack_name='kms-dependency-support'
$current_dir/../create-role.sh $stack_name $template
|
# -*- mode: sh; sh-indentation: 4; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# Copyright (c) 2020
# According to the Zsh Plugin Standard:
# http://zdharma.org/Zsh-100-Commits-Club/Zsh-Plugin-Standard.html
0=${${ZERO:-${0:#$ZSH_ARGZERO}}:-${(%):-%N}}
0=${${(M)0:#/*}:-$PWD/$0}
# Then ${0:h} to get plugin's directory
if [[ ${zsh_loaded_plugins[-1]} != */ignition && -z ${fpath[(r)${0:h}]} ]] {
fpath+=( "${0:h}" )
}
# Standard hash for plugins, to not pollute the namespace
typeset -gA Plugins
Plugins[IGNITION_DIR]="${0:h}"
# autoload -Uz example-script
source aliases.zsh
# Use alternate vim marks [[[ and ]]] as the original ones can
# confuse nested substitutions, e.g.: ${${${VAR}}}
# vim:ft=zsh:tw=80:sw=4:sts=4:et:foldmarker=[[[,]]]
|
#!/bin/bash
#
# Automatically export Godot to itch.io
# by lapspider45
# License: CC0
#
# This is a script for automatically exporting your godot project and pushing it to itch.io
# Edit it according to your needs
# Requires:
# * butler (https://itchio.itch.io/butler)
# * the correct export templates for your Godot version
# * your godot project is setup with export presets matching these names:
# * auto-web
# * auto-windows
# * auto-linux
#
# EDIT THESE VARS BEFORE USING:
project_name='gwj-28' # the name for your executable files
butler='~/.config/itch/apps/butler/butler' # path to your butler binary
project='..' # where your project is located
exportdir='../../export/' # where to put the exports
itch_page='lapspider45/gwj28' # identifier for your itch page, see butler docs
# This does the exporting
cd "$exportdir"
godot --path "$project" --export "auto-web" "$exportdir/$project_name.html"
mv "$project_name.html" index.html
mkdir html5
mv favicon.png $project_name.js $project_name.pck $project_name.png $project_name.wasm index.html html5/
sleep 2
godot --path "$project" --export "auto-windows" "$exportdir/$project_name.exe"
mkdir windows
mv $project_name.exe $project_name.pck windows/
sleep 2
godot --path "$project" --export "auto-linux" "$exportdir/$project_name.x86_64"
mkdir linux
mv $project_name.x86_64 $project_name.pck linux/
sleep 2
# Now, use butler to push all the builds
# during rating period, comment out as this is too dangerous
$butler push html5/ $itch_page:html5-postjam
$butler push windows/ $itch_page:windows-postjam
$butler push linux/ $itch_page:linux-postjam
|
#!/bin/bash
# Path to directory with github code
SOURCE_PATH="/cluster/home/onewla01/hughes/time_series_prediction/src"
# Paths to raw dataset
TS_DATA_PATH="/cluster/tufts/hugheslab/datasets/mimic-iii-v1.4/v20181213/tidy/mimic3benchmarks_inhospital_mortality/20190406/vitals_data_per_tstamp.csv"
TS_METADATA_PATH="/cluster/tufts/hugheslab/datasets/mimic-iii-v1.4/v20181213/tidy/mimic3benchmarks_inhospital_mortality/20190406/metadata_per_seq.csv"
TS_DATA_DICT_PATH="/cluster/home/onewla01/hughes/time_series_prediction/docs/mimic-iii-v1.4/20190406/mimic_dict.json"
# Path to directory in which modified dataset files will be stored
TEMP_DATA_PATH="/cluster/tufts/hugheslab/onewla01/mimic"
# Path to directory in which html classifier performance report should be saved
REPORT_DIR="$SOURCE_PATH/rnn/html"
# Check directory and file exists
if [ ! -d "$SOURCE_PATH" ]; then
echo "Could not find directory SOURCE_PATH: $SOURCE_PATH"
exit 1
fi
if [ ! -f "$TS_METADATA_PATH" ]; then
echo "Could not find file TS_METADATA_PATH: $TS_METADATA_PATH"
exit 1
fi
# Format data unless user adds command line arg "classifier"
if [ "$1" != "classifier" ]; then
# Check files and directories exist
if [ ! -f "$TS_DATA_PATH" ]; then
echo "Could not find file TS_DATA_PATH: $TS_DATA_PATH"
exit 1
fi
if [ ! -f "$TS_DATA_DICT_PATH" ]; then
echo "Could not find file TS_DATA_DICT_PATH: $TS_DATA_DICT_PATH"
exit 1
fi
if [ ! -d "$TEMP_DATA_PATH" ]; then
echo "Could not find directory TEMP_DATA_PATH: $TEMP_DATA_PATH"
exit 1
fi
# Format data
echo "Align to grid"
python $SOURCE_PATH/align_to_grid.py \
--input_ts_csv_path $TS_DATA_PATH \
--data_dict $TS_DATA_DICT_PATH \
--step_size 1 \
--output $TEMP_DATA_PATH/ts_temp.csv
echo "Fill missing values"
python $SOURCE_PATH/fill_missing_values.py \
--data $TEMP_DATA_PATH/ts_temp.csv \
--data_dict $TS_DATA_DICT_PATH \
--multiple_strategies True \
--strategy carry_forward \
--second_strategy pop_mean \
--output $TEMP_DATA_PATH/ts_temp.csv \
--third_strategy nulls
echo "Normalize Features"
python $SOURCE_PATH/normalize_features.py \
--input $TEMP_DATA_PATH/ts_temp.csv \
--data_dict $TS_DATA_DICT_PATH \
--output $TEMP_DATA_PATH/ts_temp.csv
echo "Split dataset"
python $SOURCE_PATH/split_dataset.py \
--input $TEMP_DATA_PATH/ts_temp.csv \
--data_dict $TS_DATA_DICT_PATH \
--test_size 0.1 \
--output_dir $TEMP_DATA_PATH/ts_test_train
fi
# Check files and directories exist
if [ ! -f "$TEMP_DATA_PATH/ts_test_train/train.csv" ]; then
echo "Could not find directory train.csv in $TEMP_DATA_PATH/ts_test_train/"
exit 1
fi
if [ ! -f "$TEMP_DATA_PATH/ts_test_train/test.csv" ]; then
echo "Could not find directory test.csv in $TEMP_DATA_PATH/ts_test_train/"
exit 1
fi
if [ ! -d "$REPORT_DIR" ]; then
echo "Could not find directory REPORT_DIR: $REPORT_DIR"
exit 1
fi
# Run classifier
echo "Run classifier"
python $SOURCE_PATH/rnn/main_mimic.py \
--train_vitals_csv $TEMP_DATA_PATH/ts_test_train/train.csv \
--test_vitals_csv $TEMP_DATA_PATH/ts_test_train/test.csv \
--metadata_csv $TS_METADATA_PATH \
--data_dict $TS_DATA_DICT_PATH \
--report_dir $SOURCE_PATH/rnn/html
|
#!/bin/zsh
#
# https://github.com/getmicah/spotify-now
# track info
album () {
res=$(echo "$META" | grep -m 1 "xesam:album" -b1 | tail -n1)
res="${res%\"*}"
res="${res#*\"}"
echo "$res"
}
artist () {
res=$(echo "$META" | grep -m 1 "xesam:artist" -b2 | tail -n1)
res="${res%\"*}"
res="${res#*\"}"
# if advertisement is playing currently
if [[ "$res" == "" ]]; then
echo "Ad"
else
echo "$res"
fi
}
disc () {
res=$(echo "$META" | grep -m 1 "xesam:discNumber" -b1 | tail -n1)
res="${res#*3}"
res="${res#*2}"
echo "$res"
}
title () {
res=$(echo "$META" | grep -m 1 "xesam:title" -b1 | tail -n1)
res="${res%\"*}"
res="${res#*\"}"
echo "$res"
}
track () {
res=$(echo "$META" | grep -m 1 "xesam:trackNumber" -b1 | tail -n1)
res="${res#*3}"
res="${res#*2}"
echo "$res"
}
# error message
errorMsg () {
echo "Error: invalid argument"
echo "Help: 'spotify-now -h'"
echo "Info: https://github.com/getmicah/spotify-now"
exit 0
}
# help message
helpMsg () {
echo -e "\nUsage: spotify-now -i \"<info>\" -e \"<error>\" -p \"<paused>\""
echo -e "\n\"<info>\" can contain the following keywords:"
echo -e "\t%album, %artist, %disc, %title, %track"
echo -e "\n\"<error>\" your custom Spotify closed message."
echo -e "\n\"<paused>\" your custom Spotify paused message."
echo -e "\nhttps://github.com/getmicah/spotify-now\n"
exit 0
}
# check args
if [[ "$#" == 0 ]]; then
helpMsg
elif [[ "$#" == 1 ]]; then
if [[ "${1}" == "-h" || "${1}" == "--help" ]]; then
helpMsg
else
errorMsg
fi
elif [[ "$#" > 6 ]]; then
errorMsg
fi
if [[ "$#" -ge 2 ]]; then
if [[ "${1}" != "-i" && "${1}" != "-p" && "${1}" != "-e" ]]; then
errorMsg
fi
fi
if [[ "$#" -ge 4 ]]; then
if [[ "${3}" != "-i" && "${3}" != "-p" && "${3}" != "-e" ]]; then
errorMsg
fi
fi
if [[ "$#" -ge 6 ]]; then
if [[ "${5}" != "-i" && "${5}" != "-p" && "${5}" != "-e" ]]; then
errorMsg
fi
fi
# identify parameters
INFO=""
PAUSED=""
ERROR=""
ESCAPE=false
if [[ "${1}" == "-i" ]]; then
INFO="${2}"
elif [[ "${1}" == "-p" ]]; then
PAUSED="${2}"
elif [[ "${1}" == "-e" ]]; then
ERROR="${2}"
fi
if [[ "${3}" == "-i" ]]; then
INFO="${4}"
elif [[ "${3}" == "-p" ]]; then
PAUSED="${4}"
elif [[ "${3}" == "-e" ]]; then
ERROR="${4}"
fi
if [[ "${5}" == "-i" ]]; then
INFO="${6}"
elif [[ "${5}" == "-p" ]]; then
PAUSED="${6}"
elif [[ "${5}" == "-e" ]]; then
ERROR="${6}"
fi
if [[ "${3}" == "--escape" || "${5}" == "--escape" || "${7}" == "--escape" ]]; then
ESCAPE=true
fi
# check if spotify is running
status=`pidof spotify | wc -l`
if [[ "$status" != 1 && "$ERROR" != "" ]]; then
echo "$ERROR"
else
playback=`dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.freedesktop.DBus.Properties.Get string:'org.mpris.MediaPlayer2.Player' string:'PlaybackStatus' | grep -o Paused`
if [[ "$playback" == "Paused" && "$PAUSED" != "" ]]; then
echo "$PAUSED"
elif [[ "$INFO" != "" ]]; then
# get mpris2 dbus status of spotify player
META=`dbus-send --print-reply --session --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.freedesktop.DBus.Properties.Get string:'org.mpris.MediaPlayer2.Player' string:'Metadata'`
INFO="${INFO//"%album"/$(album)}"
INFO="${INFO//"%artist"/$(artist)}"
INFO="${INFO//"%disc"/$(disc)}"
INFO="${INFO//"%title"/$(title)}"
INFO="${INFO//"%track"/$(track)}"
if [ "$ESCAPE" = true ]; then
INFO="${INFO//&/&}"
INFO="${INFO//</<}"
INFO="${INFO//>/>}"
INFO="${INFO//\"/"}"
INFO="${INFO//\\/\\\\}"
fi
echo $INFO
fi
fi
|
cat WEBCRAWLER/log | egrep "Images" | wc -l
cat WEBCRAWLER/log | egrep "http" | egrep "jpg" | wc -l
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-rare/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-rare/512+512+512-rare-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function remove_all_but_rare_words_first_third_full --eval_function last_element_eval |
#!/bin/bash
# LinuxGSM command_details.sh function
# Author: Daniel Gibbs
# Contributor: UltimateByte
# Website: https://linuxgsm.com
# Description: Displays server information.
local commandname="DETAILS"
local commandaction="Details"
local function_selfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")"
# Run checks and gathers details to display.
check.sh
info_config.sh
info_parms.sh
info_distro.sh
info_messages.sh
query_gamedig.sh
fn_info_message_distro
fn_info_message_performance
fn_info_message_disk
fn_info_message_gameserver
fn_info_message_script
fn_info_message_backup
# Some game servers do not have parms.
if [ "${shortname}" != "ts3" ]&&[ "${shortname}" != "jc2" ]&&[ "${shortname}" != "dst" ]&&[ "${shortname}" != "pz" ]&&[ "${engine}" != "renderware" ]; then
fn_parms
fn_info_message_commandlineparms
fi
fn_info_message_ports
fn_info_message_select_engine
fn_info_message_statusbottom
core_exit.sh
|
cd /storage/home/users/pjt6/phy/orthofinder
python /storage/home/users/pjt6/misc_python/BLAST_output_parsing/Blast_RBH_two_fasta_file_evalue.py --threads 2 -o ../GCA_000509525.1_Phyt_para_IAC_01_95_V1_cds_from_genomi.fa_GCA_001712645.2_PkChile7v2.0_cds_from_genomi.fa GCA_000509525.1_Phyt_para_IAC_01_95_V1_cds_from_genomi.fa GCA_001712645.2_PkChile7v2.0_cds_from_genomi.fa |
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-N-VB-ADJ/13-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-N-VB-ADJ/13-1024+0+512-pad-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function truncate_and_pad_first_two_thirds_sixth --eval_function penultimate_sixth_eval |
#!/bin/bash
set -e
function commit_list {
local tag=${1:?}
local repo_domain=${2:?}
local repo_name=${3:?}
git fetch --tags
local previous_tag=`curl -H "Authorization: token $ACCESS_TOKEN" -s https://api.github.com/repos/$repo_domain/$repo_name/tags | jq --raw-output '.[1].name'`
local release_notes=`git log $previous_tag..$tag --oneline`
local parsed_release_notes=$(echo "$release_notes" | sed -n -e 'H;${x;s/\n/\\n- /g;s/^\\n//;s/"/\\"/g;p;}')
echo $parsed_release_notes
}
function get_release_notes {
local tag=${1:?}
local repo_domain=${2:?}
local repo_name=${3:?}
commits=`commit_list $tag $repo_domain $repo_name`
notes=$(echo "\
This release includes the following commits and features:\\n\
$commits\\n\\n\
To install the latest version of this image use the manifest included in the Kubeless main repository.\\n\
\\n\
")
echo "${notes}"
}
function get_release_body {
local tag=${1:?}
local repo_domain=${2:?}
local repo_name=${3:?}
local release_notes=$(get_release_notes $tag $repo_domain $repo_name)
echo '{
"tag_name": "'$tag'",
"target_commitish": "master",
"name": "'$tag'",
"body": "'$release_notes'",
"draft": true,
"prerelease": false
}'
}
function update_release_tag {
local tag=${1:?}
local repo_domain=${2:?}
local repo_name=${3:?}
local release_id=$(curl -H "Authorization: token $ACCESS_TOKEN" -s https://api.github.com/repos/$repo_domain/$repo_name/releases | jq --raw-output '.[0].id')
local body=$(get_release_body $tag $repo_domain $repo_name)
local release=`curl -H "Authorization: token $ACCESS_TOKEN" -s --request PATCH --data $body https://api.github.com/repos/$repo_domain/$repo_name/releases/$release_id`
echo $release
}
function release_tag {
local tag=$1
local repo_domain=${2:?}
local repo_name=${3:?}
local body=$(get_release_body $tag $repo_domain $repo_name)
local release=`curl -H "Authorization: token $ACCESS_TOKEN" -s --request POST --data "$body" https://api.github.com/repos/$repo_domain/$repo_name/releases`
echo $release
}
function upload_asset {
local repo_domain=${1:?}
local repo_name=${2:?}
local release_id=${3:?}
local asset=${4:?}
local filename=$(basename $asset)
if [[ "$filename" == *".zip" ]]; then
local content_type="application/zip"
elif [[ "$filename" == *".yaml" ]]; then
local content_type="text/yaml"
fi
curl -H "Authorization: token $ACCESS_TOKEN" \
-H "Content-Type: $content_type" \
--data-binary @"$asset" \
"https://uploads.github.com/repos/$repo_domain/$repo_name/releases/$release_id/assets?name=$filename"
}
|
#!/bin/sh
test_description='check handling of .. in submodule names
Exercise the name-checking function on a variety of names, and then give a
real-world setup that confirms we catch this in practice.
'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-pack.sh
test_expect_success 'check names' '
cat >expect <<-\EOF &&
valid
valid/with/paths
EOF
git submodule--helper check-name >actual <<-\EOF &&
valid
valid/with/paths
../foo
/../foo
..\foo
\..\foo
foo/..
foo/../
foo\..
foo\..\
foo/../bar
EOF
test_cmp expect actual
'
test_expect_success 'create innocent subrepo' '
git init innocent &&
git -C innocent commit --allow-empty -m foo
'
test_expect_success 'submodule add refuses invalid names' '
test_must_fail \
git submodule add --name ../../modules/evil "$PWD/innocent" evil
'
test_expect_success 'add evil submodule' '
git submodule add "$PWD/innocent" evil &&
mkdir modules &&
cp -r .git/modules/evil modules &&
write_script modules/evil/hooks/post-checkout <<-\EOF &&
echo >&2 "RUNNING POST CHECKOUT"
EOF
git config -f .gitmodules submodule.evil.update checkout &&
git config -f .gitmodules --rename-section \
submodule.evil submodule.../../modules/evil &&
git add modules &&
git commit -am evil
'
# This step seems like it shouldn't be necessary, since the payload is
# contained entirely in the evil submodule. But due to the vagaries of the
# submodule code, checking out the evil module will fail unless ".git/modules"
# exists. Adding another submodule (with a name that sorts before "evil") is an
# easy way to make sure this is the case in the victim clone.
test_expect_success 'add other submodule' '
git submodule add "$PWD/innocent" another-module &&
git add another-module &&
git commit -am another
'
test_expect_success 'clone evil superproject' '
git clone --recurse-submodules . victim >output 2>&1 &&
! grep "RUNNING POST CHECKOUT" output
'
test_expect_success 'fsck detects evil superproject' '
test_must_fail git fsck
'
test_expect_success 'transfer.fsckObjects detects evil superproject (unpack)' '
rm -rf dst.git &&
git init --bare dst.git &&
git -C dst.git config transfer.fsckObjects true &&
test_must_fail git push dst.git HEAD
'
test_expect_success 'transfer.fsckObjects detects evil superproject (index)' '
rm -rf dst.git &&
git init --bare dst.git &&
git -C dst.git config transfer.fsckObjects true &&
git -C dst.git config transfer.unpackLimit 1 &&
test_must_fail git push dst.git HEAD
'
# Normally our packs contain commits followed by trees followed by blobs. This
# reverses the order, which requires backtracking to find the context of a
# blob. We'll start with a fresh gitmodules-only tree to make it simpler.
test_expect_success 'create oddly ordered pack' '
git checkout --orphan odd &&
git rm -rf --cached . &&
git add .gitmodules &&
git commit -m odd &&
{
pack_header 3 &&
pack_obj $(git rev-parse HEAD:.gitmodules) &&
pack_obj $(git rev-parse HEAD^{tree}) &&
pack_obj $(git rev-parse HEAD)
} >odd.pack &&
pack_trailer odd.pack
'
test_expect_success 'transfer.fsckObjects handles odd pack (unpack)' '
rm -rf dst.git &&
git init --bare dst.git &&
test_must_fail git -C dst.git unpack-objects --strict <odd.pack
'
test_expect_success 'transfer.fsckObjects handles odd pack (index)' '
rm -rf dst.git &&
git init --bare dst.git &&
test_must_fail git -C dst.git index-pack --strict --stdin <odd.pack
'
test_expect_success 'index-pack --strict works for non-repo pack' '
rm -rf dst.git &&
git init --bare dst.git &&
cp odd.pack dst.git &&
test_must_fail git -C dst.git index-pack --strict odd.pack 2>output &&
# Make sure we fail due to bad gitmodules content, not because we
# could not read the blob in the first place.
grep gitmodulesName output
'
test_expect_success 'fsck detects symlinked .gitmodules file' '
git init symlink &&
(
cd symlink &&
# Make the tree directly to avoid index restrictions.
#
# Because symlinks store the target as a blob, choose
# a pathname that could be parsed as a .gitmodules file
# to trick naive non-symlink-aware checking.
tricky="[foo]bar=true" &&
content=$(git hash-object -w ../.gitmodules) &&
target=$(printf "$tricky" | git hash-object -w --stdin) &&
{
printf "100644 blob $content\t$tricky\n" &&
printf "120000 blob $target\t.gitmodules\n"
} | git mktree &&
# Check not only that we fail, but that it is due to the
# symlink detector; this grep string comes from the config
# variable name and will not be translated.
test_must_fail git fsck 2>output &&
test_i18ngrep gitmodulesSymlink output
)
'
test_expect_success 'fsck detects non-blob .gitmodules' '
git init non-blob &&
(
cd non-blob &&
# As above, make the funny tree directly to avoid index
# restrictions.
mkdir subdir &&
cp ../.gitmodules subdir/file &&
git add subdir/file &&
git commit -m ok &&
git ls-tree HEAD | sed s/subdir/.gitmodules/ | git mktree &&
test_must_fail git fsck 2>output &&
test_i18ngrep gitmodulesBlob output
)
'
test_expect_success 'fsck detects corrupt .gitmodules' '
git init corrupt &&
(
cd corrupt &&
echo "[broken" >.gitmodules &&
git add .gitmodules &&
git commit -m "broken gitmodules" &&
git fsck 2>output &&
test_i18ngrep gitmodulesParse output &&
test_i18ngrep ! "bad config" output
)
'
test_expect_success MINGW 'prevent git~1 squatting on Windows' '
git init squatting &&
(
cd squatting &&
mkdir a &&
touch a/..git &&
git add a/..git &&
test_tick &&
git commit -m initial &&
modules="$(test_write_lines \
"[submodule \"b.\"]" "url = ." "path = c" \
"[submodule \"b\"]" "url = ." "path = d\\\\a" |
git hash-object -w --stdin)" &&
rev="$(git rev-parse --verify HEAD)" &&
hash="$(echo x | git hash-object -w --stdin)" &&
test_must_fail git update-index --add \
--cacheinfo 160000,$rev,d\\a 2>err &&
test_i18ngrep "Invalid path" err &&
git -c core.protectNTFS=false update-index --add \
--cacheinfo 100644,$modules,.gitmodules \
--cacheinfo 160000,$rev,c \
--cacheinfo 160000,$rev,d\\a \
--cacheinfo 100644,$hash,d./a/x \
--cacheinfo 100644,$hash,d./a/..git &&
test_tick &&
git -c core.protectNTFS=false commit -m "module"
) &&
test_must_fail git -c core.protectNTFS=false \
clone --recurse-submodules squatting squatting-clone 2>err &&
test_i18ngrep -e "directory not empty" -e "not an empty directory" err &&
! grep gitdir squatting-clone/d/a/git~2
'
test_expect_success 'git dirs of sibling submodules must not be nested' '
git init nested &&
test_commit -C nested nested &&
(
cd nested &&
cat >.gitmodules <<-EOF &&
[submodule "hippo"]
url = .
path = thing1
[submodule "hippo/hooks"]
url = .
path = thing2
EOF
git clone . thing1 &&
git clone . thing2 &&
git add .gitmodules thing1 thing2 &&
test_tick &&
git commit -m nested
) &&
test_must_fail git clone --recurse-submodules nested clone 2>err &&
test_i18ngrep "is inside git dir" err
'
test_done
|
#!/bin/bash
### Utility to test models by using command lines actions to run
### Actions are typically model training / translation
### or options setter.
###
### Actions are executed in the order its provided, therefore setters must
### be first
###
### Example:
### - Run all tests:
### ./test_models.sh all
### or
### ./test_models.sh
###
### - Run all tests using GPU (i.e. -gpuid 0):
### ./test_models.sh set_gpu all
### (note that set_gpu comes first!!!)
### you can set all GPU (i.e. to match CUDA_VISIBLE_DEVICES)!
### ./test_models.sh set_all_gpu all
###
### - Train each models, and run translation (for each!):
### ./test_models.sh translate_each all
### (note that translate_each comes first!!!)
###
### - Train and translate a specific model (e.g. lstm):
### ./test_models.sh lstm translate
### note that translate only consider the last model therefore:
### ./test_models.sh lstm cnn translate
### would actually use CNN model for translation
###
### - Run in debug mode (stops on first error)
### ./test_models set_debug all
###
PYTHON_BIN=python
MODEL_DIR="/tmp"
MODEL_NAME="onmt_tmp_model"
MODEL_PATH="$MODEL_DIR/$MODEL_NAME"
MODEL_FILES_PREFIX="${MODEL_NAME}_acc_"
TEST_DIR="./onmt/tests"
TEST_MODEL_NAME="test_model.pt"
TEST_MODEL_PATH="$TEST_DIR/$TEST_MODEL_NAME"
DATA_DIR="./data"
DATA_PATH="$DATA_DIR/data"
# Do not edit directly, use calls 'set_gpu' and 'translate_each'
GPUID=-1
TRANSLATE_EACH=0
### Some setters
###############################################
set_gpu(){
GPUID=0
}
set_all_gpu(){
GPUID="$(echo $CUDA_VISIBLE_DEVICES)"
}
print_gpuid(){
echo "$GPUID"
}
set_debug(){
set -e
}
translate_each(){
TRANSLATE_EACH=1
}
### Some utils functions
###############################################
mv_best_checkpoint(){
best_model="$(ls -lsrt $MODEL_DIR | grep "${MODEL_FILES_PREFIX}*" | tail -n 1 | awk '{print $NF}')"
mv "$MODEL_DIR/$best_model" "$TEST_MODEL_PATH"
}
rm_tmp_checkpoints(){
rm -f "$MODEL_DIR/${MODEL_FILES_PREFIX}"*
}
### RNNLM
###############################################
lstm(){
rm -f "$DATA_DIR"/*.pt
$PYTHON_BIN preprocess.py -train_src "$DATA_DIR"/src-train.txt \
-train_tgt "$DATA_DIR"/tgt-train.txt \
-valid_src "$DATA_DIR"/src-val.txt \
-valid_tgt "$DATA_DIR"/tgt-val.txt \
-save_data "$DATA_PATH" \
-src_vocab_size 1000 \
-tgt_vocab_size 1000
# Expected: 2.28M Param - Epoch10: PPL 15.37 ACC 45.87
$PYTHON_BIN train.py -data "$DATA_PATH" \
-save_model "$MODEL_PATH" \
-gpuid "$GPUID" \
-rnn_size 512 \
-word_vec_size 512 \
-layers 1 \
-epochs 10 \
-optim adam \
-learning_rate 0.001 \
-rnn_type LSTM
mv_best_checkpoint
maybe_translate
rm_tmp_checkpoints
}
### SRU
###############################################
sru(){
rm -f "$DATA_DIR"/*.pt
$PYTHON_BIN preprocess.py -train_src "$DATA_DIR"/src-train.txt \
-train_tgt "$DATA_DIR"/tgt-train.txt \
-valid_src "$DATA_DIR"/src-val.txt \
-valid_tgt "$DATA_DIR"/tgt-val.txt \
-save_data "$DATA_PATH" \
-src_vocab_size 1000 \
-tgt_vocab_size 1000 \
-rnn_type "SRU" \
-input_feed 0
$PYTHON_BIN train.py -data "$DATA_PATH" \
-save_model "$MODEL_PATH" \
-gpuid "$GPUID" \
-rnn_size 512 \
-word_vec_size 512 \
-layers 1 \
-epochs 10 \
-optim adam \
-learning_rate 0.001 \
-rnn_type LSTM
mv_best_checkpoint
maybe_translate
rm_tmp_checkpoints
}
### CNN
###############################################
cnn(){
rm -f "$DATA_DIR"/*.pt
$PYTHON_BIN preprocess.py -train_src "$DATA_DIR"/src-train.txt\
-train_tgt "$DATA_DIR"/tgt-train.txt \
-valid_src "$DATA_DIR"/src-val.txt \
-valid_tgt "$DATA_DIR"/tgt-val.txt \
-save_data "$DATA_PATH" \
-src_vocab_size 1000 \
-tgt_vocab_size 1000
# size256 - 1.76M Param - Epoch10: PPL 24.34 ACC 40.08
# 2x256 - 2.61M Param - Epoch10: PPL 22.91 ACC 39.14
$PYTHON_BIN train.py -data "$DATA_PATH" \
-save_model "$MODEL_PATH" \
-gpuid "$GPUID" \
-rnn_size 256 \
-word_vec_size 256 \
-layers 2 \
-epochs 10 \
-optim adam \
-learning_rate 0.001 \
-encoder_type cnn \
-decoder_type cnn
mv_best_checkpoint
maybe_translate
rm_tmp_checkpoints
}
### MORPH DATA
###############################################
morph(){
################# MORPH DATA
rm -f "$DATA_DIR"/morph/*.pt
$PYTHON_BIN preprocess.py -train_src "$DATA_DIR"/morph/src.train \
-train_tgt "$DATA_DIR"/morph/tgt.train \
-valid_src "$DATA_DIR"/morph/src.valid \
-valid_tgt "$DATA_DIR"/morph/tgt.valid \
-save_data "$DATA_DIR"/morph/data
$PYTHON_BIN train.py -data "$DATA_DIR"/morph/data \
-save_model "$MODEL_PATH" \
-gpuid "$GPUID" \
-rnn_size 400 \
-word_vec_size 100 \
-layers 1 \
-epochs 8 \
-optim adam \
-learning_rate 0.001
mv_best_checkpoint
maybe_translate
rm_tmp_checkpoints
}
### TRANSFORMER
###############################################
transformer(){
rm -f "$DATA_DIR"/*.pt
$PYTHON_BIN preprocess.py -train_src "$DATA_DIR"/src-train.txt \
-train_tgt "$DATA_DIR"/tgt-train.txt \
-valid_src "$DATA_DIR"/src-val.txt \
-valid_tgt "$DATA_DIR"/tgt-val.txt \
-save_data "$DATA_PATH" \
-src_vocab_size 1000 \
-tgt_vocab_size 1000 \
-share_vocab
# Expected: 3.41M Param - Epoch10: PPL 15.50 ACC 45.67
$PYTHON_BIN train.py -data "$DATA_PATH" \
-save_model "$MODEL_PATH" \
-share_embedding \
-batch_type tokens \
-batch_size 1024 \
-accum_count 4 \
-layers 1 \
-rnn_size 256 \
-word_vec_size 256 \
-encoder_type transformer \
-decoder_type transformer \
-epochs 10 \
-gpuid "$GPUID" \
-max_generator_batches 4 \
-dropout 0.1 \
-normalization tokens \
-max_grad_norm 0 \
-optim sparseadam \
-decay_method noam \
-learning_rate 2 \
-position_encoding \
-param_init 0 \
-warmup_steps 100 \
-param_init_glorot \
-adam_beta2 0.998
mv_best_checkpoint
maybe_translate
rm_tmp_checkpoints
#$PYTHON_BIN train.py -data "$DATA_PATH" -save_model "$MODEL_PATH" -batch_type tokens -batch_size 128 -accum_count 4 \
# -layers 4 -rnn_size 128 -word_vec_size 128 -encoder_type transformer -decoder_type transformer \
# -epochs 10 -gpuid "$GPUID" -max_generator_batches 4 -dropout 0.1 -normalization tokens \
# -max_grad_norm 0 -optim sparseadam -decay_method noam -learning_rate 2 \
# -position_encoding -param_init 0 -warmup_steps 8000 -param_init_glorot -adam_beta2 0.998
}
### TRANSLATION
###############################################
translate(){
$PYTHON_BIN translate.py -gpu "$GPUID" \
-model "$TEST_MODEL_PATH" \
-output "$TEST_DIR"/output_hyp.txt \
-beam 5 \
-batch_size 32 \
-src "$DATA_DIR"/src-val.txt
}
maybe_translate(){
if [ $TRANSLATE_EACH -eq 1 ]
then
translate
fi
}
all(){
lstm
sru
cnn
morph
transformer
translate
}
actions="$@"
# set the default action
if [ -z "$1" ]; then
actions="all"
fi
# Process actions (in order)
for action in $actions; do
echo "Running: $action"
eval "$action"
done
echo "Done."
|
#!/bin/bash
TRAIN_IMAGE_DIR="./object_detection/datasets/val2017/images/"
VAL_IMAGE_DIR="${TRAIN_IMAGE_DIR}"
TEST_IMAGE_DIR="${TRAIN_IMAGE_DIR}"
TRAIN_ANNOTATIONS_FILE="./object_detection/datasets/val2017/annotations/instances_val2017.json"
VAL_ANNOTATIONS_FILE="${TRAIN_ANNOTATIONS_FILE}"
TESTDEV_ANNOTATIONS_FILE="${TRAIN_ANNOTATIONS_FILE}"
OUTPUT_DIR="./object_detection/datasets/val2017/tfrecords/"
python ./object_detection/dataset_tools/create_coco_tf_record.py --logtostderr \
--train_image_dir="${TRAIN_IMAGE_DIR}" \
--val_image_dir="${VAL_IMAGE_DIR}" \
--test_image_dir="${TEST_IMAGE_DIR}" \
--train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--val_annotations_file="${VAL_ANNOTATIONS_FILE}" \
--testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \
--output_dir="${OUTPUT_DIR}"
|
#!/usr/bin/env bash
# NumID - Number to and from ID Generator/Crypter.
# Version: 1.0
# Written by Metaspook
# Copyright (c) 2020 Metaspook.
# Requires version 4+ of Bash.
if [ -z $BASH_VERSION ] || [ ${BASH_VERSINFO[0]} -lt 4 ]; then
echo -e "ERROR: Requires Bash version 4+\n";exit 1;fi
### VARIABLES/ARRAY ###
declare -A App=(
[Name]="NumID"
[FName]=$(case ${0##*/} in numid|numid.sh) echo "${0##*/}";; *) echo "numid";; esac)
[Ver]="1.0"
[CRDate]=$([[ $(date +'%Y' 2>/dev/null) -gt 2020 ]] && echo $(date +'%Y') || echo 2020)
)
declare -A RGX=(
[repeat]="(.).*\1"
[alnum10]="^[A-Za-z0-9]{10}+$"
[alnum]="^[A-Za-z0-9]+$"
[alpha]="^[A-Za-z]+$"
[digit]="^[0-9]+$"
)
CharSet=("ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789")
### FUNCTIONS ###
function ptrn_gen(){
local AlCnt AlNum SChar UChar; case $1 in alnum) AlNum=$1; shift;; esac
for ((i=10;i>${#UChar};i)); do
SChar="${1:${RANDOM}%${#1}:1}"
[[ "$AlNum" && "$SChar" =~ ${RGX[alpha]} && $((++AlCnt)) -lt 2 ]] && SChar="" || AlCnt=0
[[ ! "$SChar" =~ ^[$UChar]+$ ]] && UChar="${UChar}${SChar}"
done; echo $UChar
}
function interadd(){
if ((${#1}==1)); then echo $1
elif ((${#1}>1)); then
local OutNum=0; for ((i=0;i<${#1};++i)); do ((OutNum+=${1:((i-1)):1}))
done; ((${#OutNum}!=1)) && $FUNCNAME $OutNum || echo $OutNum
fi
}
function is_odd(){ (( 10#$1%2!=0)); }
function revstr(){
echo $(for ((i;i<=${#1};i++)); do printf '%s' "${1:((${#1}-i)):1}"; done)
}
function num2str(){
# Number to String using character set.**
# Usage: str2num <charset> <number>
echo $(for ((i=1;i<=${#2};i++)); do local N="${2:((i-1)):1}"; printf '%s' "${1:N:1}"; done)
}
function str2num(){
# String to Number using character set.**
# Usage: str2num <charset> <string>
echo $(for ((i=1;i<=${#2};i++)); do local P="${1#*${2:((i-1)):1}}"; printf '%s' "$((${#1}-${#P}-1))"; done)
}
function rotnum(){
# Number rotator. [0-9]**
# Usage: rotnum <rotation 0 to 9> <number>
local InDgt=$2 Rot=$1 DgtCnt OutDgt
while [[ ${#InDgt} -ge $((++DgtCnt)) ]]; do
local SDgt=${InDgt:((DgtCnt-1)):1}
if [ $Rot -ge 5 -a $SDgt -ge 5 ]; then
OutDgt=${OutDgt}$((SDgt-((10-Rot))))
else
local OutDgtT=$((SDgt+Rot))
[ $((SDgt+Rot)) -gt 9 ] && OutDgtT=${OutDgtT: -1:1}
OutDgt=${OutDgt}$OutDgtT
fi
done; echo $OutDgt
}
function id_encrypt(){
# Number -> ID.**
local Rot CharSet InDgt
case $# in
3) CharSet=$2; InDgt=$3; Rot=$1;;
2) CharSet=$1; InDgt=$2; Rot=$((10-$(interadd ${#InDgt})));;
*) return 1
esac
InDgt=$(rotnum $Rot $InDgt)
is_odd $(interadd $InDgt) && InDgt=$(revstr $InDgt)
num2str $CharSet $InDgt
}
function id_decrypt(){
# ID -> Number.**
local Rot CharSet InDgt InStr
case $# in
3) CharSet=$2; InStr=$3; Rot=$(($1==0?0:10-$1));;
2) CharSet=$1; InStr=$2; Rot=$(interadd ${#InStr});;
*) return 1
esac
InDgt=$(str2num $CharSet $InStr)
is_odd $(interadd $InDgt) && InDgt=$(revstr $InDgt)
rotnum $Rot $InDgt
}
function chk(){
# Error handler.**
for X in $@; do local P K=${X/=*} V=${X#*=}; case $K in
-opt) echo "ERROR: Invalid option! Use '-h' or '--help' for available options."; exit 1;;
-rot) if [[ ${#V} -ne 1 || ! "$V" =~ ${RGX[digit]} ]]; then
echo "ERROR: Rotation must in range of 0 to 9."
exit 1; fi;;
-ptrn) P=$V; if [[ ! "$V" =~ ${RGX[alnum10]} || "$V" =~ ${RGX[repeat]} ]]; then
echo "ERROR: Pattern must consist of 10 unique alpha-numeric characters."
exit 1; fi;;
-num) if [[ ! "$V" =~ ${RGX[digit]} ]]; then
echo "ERROR: Number must contain numeric characters."
exit 1; fi;;
-id) if [[ ! "$V" =~ ^[$P]+$ ]]; then
echo "ERROR: Invalid ID or Pattern!"
exit 1; fi;;
esac; done
}
function Main_Usage(){
echo "
NumID (Number to and from ID Generator/Crypter)
Version: ${App[Ver]} | MIT License (Open Source)
Copyright © ${App[CRDate]} Metaspook.
NumID's algorithm uses an pattern consist of 10 unique
alpha-numeric characters and auto/manual rotation as the
key to encrypt Number to ID and decrypt ID to Number.
Usage: ${App[FName]} <options..> <pattern> <number|id>
or: ${App[FName]} <options>
<options> <details>
-e Encrypt Number to ID using pattern.
-R[0-9] Specify manual rotation to encrypt.
-d Decrypt ID to Number using pattern.
-R[0-9] Specify manual rotation encrypted with.
-A|-a Generate an unique alphabetic pattern,
use '-a' for lowercase.
-N Generate an unique numeric pattern.
-aN|-AN|-NA|-Na Generate an unique alpha-numeric pattern.
-h, --help Display this help and exit.
"
}
#----< CALL CENTER >----#
#
# Cheack options.
case $# in
0) Main_Usage;;
1) case $1 in
-h|--help) Main_Usage;;
-*a*) CharSet[0]="${CharSet[0],,}";;&
-a|-A) ptrn_gen "${CharSet[0]}";;
-N) ptrn_gen "${CharSet[1]}";;
-AN|-NA|-aN|-Na) ptrn_gen alnum "${CharSet[0]}${CharSet[1]}";;
*) chk -opt;;
esac;;
3|4) case $1 in
-e) NumId=num IdCrypt=id_encrypt;;
-d) NumId=id IdCrypt=id_decrypt;;
*) chk -opt;;
esac;;&
3) chk -ptrn="$2" -${NumId}="$3"; ${IdCrypt} $2 $3;;
4) case $2 in
-R[0-9]) chk -rot="${2#-R}" -ptrn="$3" -${NumId}="$4"; ${IdCrypt} ${2#-R} $3 $4;;
*) chk -opt;;
esac;;
*) chk -opt;;
esac
exit 0
|
#!/bin/bash
set -euxo pipefail
COMBO="./ios-frameworks"
DEVICE="$COMBO/Debug-iphoneos"
SIM="$COMBO/Debug-iphonesimulator"
lipo -create $DEVICE/libChannelLib.a $SIM/libChannelLib.a -output $COMBO/libChannelLib.a
lipo -create $DEVICE/libCommonLib.a $SIM/libCommonLib.a -output $COMBO/libCommonLib.a
lipo -create $DEVICE/libeDistantObject.a $SIM/libeDistantObject.a -output $COMBO/libeDistantObject.a
lipo -create $DEVICE/libTestLib.a $SIM/libTestLib.a -output $COMBO/libTestLib.a
lipo -create $DEVICE/libUILib.a $SIM/libUILib.a -output $COMBO/libUILib.a
cp -RL $DEVICE/AppFramework.framework $COMBO/AppFramework.framework
DEVICE_FRAMEWORK="$DEVICE/AppFramework.framework/AppFramework"
SIM_FRAMEWORK="$SIM/AppFramework.framework/AppFramework"
UNI_FRAMEWORK="$COMBO/AppFramework.framework/AppFramework"
lipo -create \
"$DEVICE_FRAMEWORK" \
"$SIM_FRAMEWORK" \
-output "$UNI_FRAMEWORK"
dsymutil "$UNI_FRAMEWORK" \
--out "$COMBO/AppFramework.framework.dSYM"
|
#!/bin/bash
# ========== Experiment Seq. Idx. 826 / 41.4.2 / N. 51/5/0 - _S=41.4.2 D1_N=51 a=-1 b=1 c=1 d=-1 e=-1 f=1 D3_N=5 g=1 h=-1 i=1 D4_N=0 j=0 ==========
set -u
# Prints header
echo -e '\n\n========== Experiment Seq. Idx. 826 / 41.4.2 / N. 51/5/0 - _S=41.4.2 D1_N=51 a=-1 b=1 c=1 d=-1 e=-1 f=1 D3_N=5 g=1 h=-1 i=1 D4_N=0 j=0 ==========\n\n'
if [[ "No" == "Yes" ]]; then
echo 'FATAL: This treatment included an SVM layer.'>&2
echo ' Something very wrong happened!'>&2
exit 161
fi
# Prepares all environment variables
JBHI_DIR="$HOME/jbhi-special-issue"
DATASET_DIR="$JBHI_DIR/data/fulltrain-seg.598.tfr"
MODEL_DIR="$JBHI_DIR/models/deep.51"
RESULTS_DIR="$JBHI_DIR/results"
RESULTS_PREFIX="$RESULTS_DIR/deep.51.layer.5.test.0.index.2186.nosvm"
RESULTS_PATH="$RESULTS_PREFIX.results.txt"
# ...variables expected by jbhi-checks.include.sh and jbhi-footer.include.sh
SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
LIST_OF_INPUTS="$DATASET_DIR/finish.txt:$MODEL_DIR/finish.txt"
START_PATH="$RESULTS_PREFIX.start.txt"
FINISH_PATH="$RESULTS_PREFIX.finish.txt"
LOCK_PATH="$RESULTS_PREFIX.running.lock"
LAST_OUTPUT="$RESULTS_PATH"
# EXPERIMENT_STATUS=1
# STARTED_BEFORE=No
mkdir -p "$RESULTS_DIR"
#
# Assumes that the following environment variables where initialized
# SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
# LIST_OF_INPUTS="$DATASET_DIR/finish.txt:$MODELS_DIR/finish.txt:"
# START_PATH="$OUTPUT_DIR/start.txt"
# FINISH_PATH="$OUTPUT_DIR/finish.txt"
# LOCK_PATH="$OUTPUT_DIR/running.lock"
# LAST_OUTPUT="$MODEL_DIR/[[[:D1_MAX_NUMBER_OF_STEPS:]]].meta"
EXPERIMENT_STATUS=1
STARTED_BEFORE=No
# Checks if code is stable, otherwise alerts scheduler
pushd "$SOURCES_GIT_DIR" >/dev/null
GIT_STATUS=$(git status --porcelain)
GIT_COMMIT=$(git log | head -n 1)
popd >/dev/null
if [ "$GIT_STATUS" != "" ]; then
echo 'FATAL: there are uncommitted changes in your git sources file' >&2
echo ' for reproducibility, experiments only run on committed changes' >&2
echo >&2
echo ' Git status returned:'>&2
echo "$GIT_STATUS" >&2
exit 162
fi
# The experiment is already finished - exits with special code so scheduler won't retry
if [[ "$FINISH_PATH" != "-" ]]; then
if [[ -e "$FINISH_PATH" ]]; then
echo 'INFO: this experiment has already finished' >&2
exit 163
fi
fi
# The experiment is not ready to run due to dependencies - alerts scheduler
if [[ "$LIST_OF_INPUTS" != "" ]]; then
IFS=':' tokens_of_input=( $LIST_OF_INPUTS )
input_missing=No
for input_to_check in ${tokens_of_input[*]}; do
if [[ ! -e "$input_to_check" ]]; then
echo "ERROR: input $input_to_check missing for this experiment" >&2
input_missing=Yes
fi
done
if [[ "$input_missing" != No ]]; then
exit 164
fi
fi
# Sets trap to return error code if script is interrupted before successful finish
LOCK_SUCCESS=No
FINISH_STATUS=161
function finish_trap {
if [[ "$LOCK_SUCCESS" == "Yes" ]]; then
rmdir "$LOCK_PATH" &> /dev/null
fi
if [[ "$FINISH_STATUS" == "165" ]]; then
echo 'WARNING: experiment discontinued because other process holds its lock' >&2
else
if [[ "$FINISH_STATUS" == "160" ]]; then
echo 'INFO: experiment finished successfully' >&2
else
[[ "$FINISH_PATH" != "-" ]] && rm -f "$FINISH_PATH"
echo 'ERROR: an error occurred while executing the experiment' >&2
fi
fi
exit "$FINISH_STATUS"
}
trap finish_trap EXIT
# While running, locks experiment so other parallel threads won't attempt to run it too
if mkdir "$LOCK_PATH" --mode=u=rwx,g=rx,o=rx &>/dev/null; then
LOCK_SUCCESS=Yes
else
echo 'WARNING: this experiment is already being executed elsewhere' >&2
FINISH_STATUS="165"
exit
fi
# If the experiment was started before, do any cleanup necessary
if [[ "$START_PATH" != "-" ]]; then
if [[ -e "$START_PATH" ]]; then
echo 'WARNING: this experiment is being restarted' >&2
STARTED_BEFORE=Yes
fi
#...marks start
date -u >> "$START_PATH"
echo GIT "$GIT_COMMIT" >> "$START_PATH"
fi
# If the experiment was started before, do any cleanup necessary
if [[ "$STARTED_BEFORE" == "Yes" ]]; then
echo -n
fi
#...gets closest checkpoint file
MODEL_CHECKPOINT=$(ls "$MODEL_DIR/"model.ckpt-*.index | \
sed 's/.*ckpt-\([0-9]*\)\..*/\1/' | \
sort -n | \
awk -v c=1 -v t=30000 \
'NR==1{d=$c-t;d=d<0?-d:d;v=$c;next}{m=$c-t;m=m<0?-m:m}m<d{d=m;v=$c}END{print v}')
MODEL_PATH="$MODEL_DIR/model.ckpt-$MODEL_CHECKPOINT"
echo "$MODEL_PATH" >> "$START_PATH"
#...performs prediction
echo Testing on "$MODEL_PATH"
python \
"$SOURCES_GIT_DIR/predict_image_classifier.py" \
--model_name="resnet_v2_101_seg" \
--checkpoint_path="$MODEL_PATH" \
--dataset_name=skin_lesions \
--task_name=label \
--dataset_split_name=test \
--preprocessing_name=dermatologic \
--aggressive_augmentation="False" \
--add_rotations="False" \
--minimum_area_to_crop="0.05" \
--normalize_per_image="0" \
--batch_size=1 \
--id_field_name=id \
--pool_scores=avg \
--eval_replicas="50" \
--output_file="$RESULTS_PATH" \
--dataset_dir="$DATASET_DIR"
# Tip: leave last the arguments that make the command fail if they're absent,
# so if there's a typo or forgotten \ the entire thing fails
EXPERIMENT_STATUS="$?"
#
#...starts training
if [[ "$EXPERIMENT_STATUS" == "0" ]]; then
if [[ "$LAST_OUTPUT" == "" || -e "$LAST_OUTPUT" ]]; then
if [[ "$FINISH_PATH" != "-" ]]; then
date -u >> "$FINISH_PATH"
echo GIT "$GIT_COMMIT" >> "$FINISH_PATH"
fi
FINISH_STATUS="160"
fi
fi
|
#! /bin/bash
sudo apt-get -y install libuv1-dev libssl-dev libz-dev
git clone https://github.com/uWebSockets/uWebSockets
cd uWebSockets
git checkout e94b6e1
mkdir build
cd build
cmake ..
make
sudo make install
cd ..
cd ..
sudo ln -s /usr/lib64/libuWS.so /usr/lib/libuWS.so
sudo rm -r uWebSockets
|
###############################################################################
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
#!/bin/sh
mvn clean
mvn compile
mvn package
|
#!/usr/bin/env bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
declare -A ORIGINAL_COPYRIGHT_YEAR=(
[centos-7]=2018
[centos-8]=2019
[debian-buster]=2019
[debian-stretch]=2018
[fedora]=2018
[opensuse-leap]=2019
[opensuse-tumbleweed]=2018
[ubuntu-xenial]=2018
[ubuntu-bionic]=2018
)
declare -a FROZEN_FILES=()
BUILD_AND_TEST_PROJECT_FRAGMENT=$(replace_fragments \
"INSTALL_CRC32C_FROM_SOURCE" \
"INSTALL_CPP_CMAKEFILES_FROM_SOURCE" \
"INSTALL_GOOGLETEST_FROM_SOURCE" \
"INSTALL_GOOGLE_CLOUD_CPP_COMMON_FROM_SOURCE" <<'_EOF_'
# #### crc32c
# The project depends on the Crc32c library, we need to compile this from
# source:
# ```bash
@INSTALL_CRC32C_FROM_SOURCE@
# ```
# #### googleapis
# We need a recent version of the Google Cloud Platform proto C++ libraries:
# ```bash
@INSTALL_CPP_CMAKEFILES_FROM_SOURCE@
# ```
# #### googletest
# We need a recent version of GoogleTest to compile the unit and integration
# tests.
# ```bash
@INSTALL_GOOGLETEST_FROM_SOURCE@
# ```
# #### google-cloud-cpp-common
# The project also depends on google-cloud-cpp-common, the libraries shared by
# all the Google Cloud C++ client libraries:
# ```bash
@INSTALL_GOOGLE_CLOUD_CPP_COMMON_FROM_SOURCE@
# ```
FROM devtools AS install
ARG NCPU=4
# #### Compile and install the main project
# We can now compile, test, and install `@GOOGLE_CLOUD_CPP_REPOSITORY@`.
# ```bash
WORKDIR /home/build/project
COPY . /home/build/project
RUN cmake -H. -Bcmake-out
RUN cmake --build cmake-out -- -j "${NCPU:-4}"
WORKDIR /home/build/project/cmake-out
RUN ctest -LE integration-tests --output-on-failure
RUN cmake --build . --target install
# ```
## [END INSTALL.md]
ENV PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig:/usr/local/lib/pkgconfig
# Verify that the installed files are actually usable
WORKDIR /home/build/test-install-plain-make
COPY ci/test-install /home/build/test-install-plain-make
RUN make
WORKDIR /home/build/test-install-cmake-bigtable
COPY ci/test-install/bigtable /home/build/test-install-cmake-bigtable
RUN env -u PKG_CONFIG_PATH cmake -H. -B/i/bigtable
RUN cmake --build /i/bigtable -- -j ${NCPU:-4}
WORKDIR /home/build/test-install-cmake-storage
COPY ci/test-install/storage /home/build/test-install-cmake-storage
RUN env -u PKG_CONFIG_PATH cmake -H. -B/i/storage
RUN cmake --build /i/storage -- -j ${NCPU:-4}
_EOF_
)
|
#!/bin/sh
# Multi-build script for testing compilation of all maintained
# configs of GDB.
# Copyright 2002, 2003 Free Software Foundation, Inc.
# Contributed by Richard Earnshaw ([email protected])
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
usage()
{
cat <<EOF
Usage: gdb_mbuild.sh [ <options> ... ] <srcdir> <builddir>
Options:
-j <makejobs> Run <makejobs> in parallel. Passed to make.
On a single cpu machine, 2 is recommended.
-k Keep going. Do not stop after the first build fails.
--keep Keep builds. Do not remove each build when finished.
-e <regexp> Regular expression for selecting the targets to build.
-f Force rebuild. Even rebuild previously built directories.
-v Be more (and more, and more) verbose.
Arguments:
<srcdir> Source code directory.
<builddir> Build directory.
Environment variables examined (with default if not defined):
MAKE (make)"
EOF
exit 1;
cat <<NOTYET
-b <maxbuilds> Run <maxbuild> builds in parallel.
On a single cpu machine, 1 is recommended.
NOTYET
}
### COMMAND LINE OPTIONS
makejobs=
maxbuilds=1
keepgoing=
force=false
targexp=""
verbose=0
keep=false
while test $# -gt 0
do
case "$1" in
-j )
# Number of parallel make jobs.
shift
test $# -ge 1 || usage
makejobs="-j $1"
;;
-b | -c )
# Number of builds to fire off in parallel.
shift
test $# -ge 1 || usage
maxbuilds=$1
;;
-k )
# Should we soldier on after the first build fails?
keepgoing=-k
;;
--keep )
keep=true
;;
-e )
# A regular expression for selecting targets
shift
test $# -ge 1 || usage
targexp="${targexp} -e ${1}"
;;
-f )
# Force a rebuild
force=true ;
;;
-v )
# Be more, and more, and more, verbose
verbose=`expr ${verbose} + 1`
;;
-* ) usage ;;
*) break ;;
esac
shift
done
### COMMAND LINE PARAMETERS
if test $# -ne 2
then
usage
fi
# Convert these to absolute directory paths.
# Where the sources live
srcdir=`cd $1 && /bin/pwd` || exit 1
# Where the builds occur
builddir=`cd $2 && /bin/pwd` || exit 1
### ENVIRONMENT PARAMETERS
# Version of make to use
make=${MAKE:-make}
MAKE=${make}
export MAKE
# Where to look for the list of targets to test
maintainers=${srcdir}/gdb/MAINTAINERS
if [ ! -r ${maintainers} ]
then
echo Maintainers file ${maintainers} not found
exit 1
fi
# Get the list of targets and the build options
alltarg=`cat ${maintainers} | tr -s '[\t]' '[ ]' | sed -n '
/^[ ]*[-a-z0-9\.]*[ ]*[(]*--target=.*/ !d
s/^.*--target=//
s/).*$//
h
:loop
g
/^[^ ]*,/ !b end
s/,[^ ]*//
p
g
s/^[^,]*,//
h
b loop
:end
p
' | if test "${targexp}" = ""
then
grep -v -e broken -e OBSOLETE
else
grep ${targexp}
fi`
# Usage: fail <message> <test-that-should-succeed>. Should the build
# fail? If the test is true, and we don't want to keep going, print
# the message and shoot everything in sight and abort the build.
fail ()
{
msg="$1" ; shift
if test "$@"
then
echo "${target}: ${msg}"
if test "${keepgoing}" != ""
then
#exit 1
continue
else
kill $$
exit 1
fi
fi
}
# Usage: log <level> <logfile>. Write standard input to <logfile> and
# stdout (if verbose >= level).
log ()
{
if test ${verbose} -ge $1
then
tee $2
else
cat > $2
fi
}
# Warn the user of what is comming, print the list of targets
echo "$alltarg"
echo ""
# For each target, configure, build and test it.
echo "$alltarg" | while read target gdbopts simopts
do
trap "exit 1" 1 2 15
dir=${builddir}/${target}
# Should a scratch rebuild be forced, for perhaps the entire
# build be skipped?
if ${force}
then
echo forcing ${target} ...
rm -rf ${dir}
elif test -f ${dir}
then
echo "${target}"
continue
else
echo ${target} ...
fi
# Did the previous configure attempt fail? If it did
# restart from scratch.
if test -d ${dir} -a ! -r ${dir}/Makefile
then
echo ... removing partially configured ${target}
rm -rf ${dir}
if test -d ${dir}
then
echo "${target}: unable to remove directory ${dir}"
exit 1
fi
fi
# From now on, we're in this target's build directory
mkdir -p ${dir}
cd ${dir} || exit 1
# Configure, if not already. Should this go back to being
# separate and done in parallel?
if test ! -r Makefile
then
# Default SIMOPTS to GDBOPTS.
test -z "${simopts}" && simopts="${gdbopts}"
# The config options
__target="--target=${target}"
__enable_gdb_build_warnings=`test -z "${gdbopts}" \
|| echo "--enable-gdb-build-warnings=${gdbopts}"`
__enable_sim_build_warnings=`test -z "${simopts}" \
|| echo "--enable-sim-build-warnings=${simopts}"`
__configure="${srcdir}/configure \
${__target} \
${__enable_gdb_build_warnings} \
${__enable_sim_build_warnings}"
echo ... ${__configure}
trap "echo Removing partially configured ${dir} directory ...; rm -rf ${dir}; exit 1" 1 2 15
${__configure} 2>&1 | log 2 Config.log
trap "exit 1" 1 2 15
fi
fail "configure failed" ! -r Makefile
# Build, if not built.
if test ! -x gdb/gdb -a ! -x gdb/gdb.exe
then
# Iff the build fails remove the final build target so that
# the follow-on code knows things failed. Stops the follow-on
# code thinking that a failed rebuild succedded (executable
# left around from previous build).
echo ... ${make} ${keepgoing} ${makejobs} ${target}
( ${make} ${keepgoing} ${makejobs} all-gdb || rm -f gdb/gdb gdb/gdb.exe
) 2>&1 | log 1 Build.log
fi
fail "compile failed" ! -x gdb/gdb -a ! -x gdb/gdb.exe
# Check that the built GDB can at least print it's architecture.
echo ... run ${target}
rm -f core gdb.core ${dir}/gdb/x
cat <<EOF > x
maint print architecture
quit
EOF
./gdb/gdb -batch -nx -x x 2>&1 | log 1 Gdb.log
fail "gdb dumped core" -r core -o -r gdb.core
fail "gdb printed no output" ! -s Gdb.log
grep -e internal-error Gdb.log && fail "gdb panic" 1
echo ... cleanup ${target}
# Create a sed script that cleans up the output from GDB.
rm -f mbuild.sed
touch mbuild.sed || exit 1
# Rules to replace <0xNNNN> with the corresponding function's
# name.
sed -n -e '/<0x0*>/d' -e 's/^.*<0x\([0-9a-f]*\)>.*$/0x\1/p' Gdb.log \
| sort -u \
| while read addr
do
func="`addr2line -f -e ./gdb/gdb -s ${addr} | sed -n -e 1p`"
test ${verbose} -gt 0 && echo "${addr} ${func}" 1>&2
echo "s/<${addr}>/<${func}>/g"
done >> mbuild.sed
# Rules to strip the leading paths off of file names.
echo 's/"\/.*\/gdb\//"gdb\//g' >> mbuild.sed
# Run the script
sed -f mbuild.sed Gdb.log > Mbuild.log
# Replace the build directory with a file as semaphore that stops
# a rebuild. (should the logs be saved?)
cd ${builddir}
if ${keep}
then
:
else
rm -f ${target}.tmp
mv ${target}/Mbuild.log ${target}.tmp
rm -rf ${target}
mv ${target}.tmp ${target}
fi
# Success!
echo ... ${target} built
done
exit 0
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3361-1
#
# Security announcement date: 2015-09-18 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:34 UTC
#
# Operating System: Debian 8 (Jessie)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - qemu:1:2.1+dfsg-12+deb8u4
#
# Last versions recommanded by security team:
# - qemu:1:2.1+dfsg-12+deb8u6
#
# CVE List:
# - CVE-2015-5278
# - CVE-2015-5279
# - CVE-2015-6815
# - CVE-2015-6855
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade qemu=1:2.1+dfsg-12+deb8u6 -y
|
#!/bin/bash
mkdir -p /tmp/tsd
export HBASE_VERSION=0.94.16
export COMPRESSION=NONE
export HBASE_HOME=/opt/hbase-$HBASE_VERSION
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
echo "Clearing HBase (temporary) fix for ZEN-10492"
if [ -d /opt/zenoss/var/hbase ] ; then
rm -rf /opt/zenoss/var/hbase
fi
echo "Starting HBase..."
/opt/hbase-$HBASE_VERSION/bin/start-hbase.sh
while [[ `/opt/opentsdb/src/create_table.sh` != *"ERROR: Table already exists: tsdb"* ]]; do
echo `date` ": Waiting for HBase to be ready..."
sleep 2
done
echo "Starting opentsdb..."
exec /opt/opentsdb/build/tsdb tsd --port=4242 --staticroot=/opt/opentsdb/build/staticroot --cachedir=/tmp/tsd --auto-metric
|
autoload colors && colors
# cheers, @ehrenmurdick
# http://github.com/ehrenmurdick/config/blob/master/zsh/prompt.zsh
if (( $+commands[git] ))
then
git="$commands[git]"
else
git="/usr/bin/git"
fi
git_branch() {
echo $($git symbolic-ref HEAD 2>/dev/null | awk -F/ {'print $NF'})
}
git_dirty() {
if $(! $git status -s &> /dev/null)
then
echo ""
else
if [[ $($git status --porcelain) == "" ]]
then
echo "on %{$fg_bold[green]%}$(git_prompt_info)%{$reset_color%}"
else
echo "on %{$fg_bold[red]%}$(git_prompt_info)%{$reset_color%}"
fi
fi
}
git_prompt_info () {
ref=$($git symbolic-ref HEAD 2>/dev/null) || return
# echo "(%{\e[0;33m%}${ref#refs/heads/}%{\e[0m%})"
echo "on ${ref#refs/heads/}"
}
# This assumes that you always have an origin named `origin`, and that you only
# care about one specific origin. If this is not the case, you might want to use
# `$git cherry -v @{upstream}` instead.
need_push () {
if [ $($git rev-parse --is-inside-work-tree 2>/dev/null) ]
then
number=$($git cherry -v origin/$(git symbolic-ref --short HEAD) 2>/dev/null | wc -l | bc)
if [[ $number == 0 ]]
then
echo " "
else
echo " with %{$fg_bold[magenta]%}$number unpushed%{$reset_color%}"
fi
fi
}
directory_name() {
echo "%{$fg_bold[cyan]%}%1/%\/%{$reset_color%}"
}
battery_status() {
if test ! "$(uname)" = "Darwin"
then
exit 0
fi
if [[ $(sysctl -n hw.model) == *"Book"* ]]
then
$ZSH/bin/battery-status
fi
}
export PROMPT=$'\n$(battery_status)in $(directory_name) $(git_dirty)$(need_push)\n› '
set_prompt () {
export RPROMPT="%{$fg_bold[cyan]%}%{$reset_color%}"
}
precmd() {
title "zsh" "%m" "%55<...<%~"
set_prompt
}
|
rsync -uP ~/.leetcode/code/*rs ~/solutions_for_leetcode/题库
|
#!/usr/bin/bash
exp=$1
#### Experiment 0 - S3Fs Caching vs No Caching vs Prefetching
# echo "Executing experiment 0"
#### Experiment 1 - Block Size
if [[ $exp == *"1"* ]]
then
echo "Executing experiment 1"
python nib_read.py --block_size 8388608 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 16777216 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 33554432 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 67108864 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 134217728 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 268435456 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 536870912 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 1073741824 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --block_size 2147483648 --types mem --types prefetch --types s3fs --lazy --reps 10
mv ../results/us-west-2-xlarge/readnib* ../results/us-west-2-xlarge/exp-1
fi
if [[ $exp == *"2"* ]]
then
#### Experiment 2 - Number of files
echo "Executing experiment 2"
python nib_read.py --n_files 1 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --n_files 5 --types mem --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --n_files 10 --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --n_files 15 --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --n_files 20 --types prefetch --types s3fs --lazy --reps 10
python nib_read.py --n_files 25 --types prefetch --types s3fs --lazy --reps 10
mv ../results/us-west-2-xlarge/readnib* ../results/us-west-2-xlarge/exp-2
fi
if [[ $exp == *"3"* ]]
then
#### Experiment 3 - Nibabel vs Python read comparison
echo "Executing experiment 3"
python cmp_read.py
fi
if [[ $exp == *"4"* ]]
then
#### Experiment 4 - Parallel
echo "Executing experiment 4"
fs=( "s3fs" "prefetch" )
for i in 1 5 10 15 20 25
do
for j in {0..10}
do
fs=( $(shuf -e "${fs[@]}") )
for f in "${fs[@]}"
do
if [ "$f" = "s3fs" ]
then
python read_s3fs.py 0 $i $j 4 &
python read_s3fs.py $i $(( i*2 )) $j 4 &
python read_s3fs.py $(( i*2 )) $(( i*3 )) $j 4 &
python read_s3fs.py $(( i*3 )) $(( i*4 )) $j 4 &
else
python read_prefetched.py 0 $i $j 4 &
python read_prefetched.py $i $(( i*2 )) $j 4 &
python read_prefetched.py $(( i*2 )) $(( i*3 )) $j 4 &
python read_prefetched.py $(( i*3 )) $(( i*4 )) $j 4 &
fi
wait < <(jobs -p)
done
done
done
mv ../results/us-west-2-xlarge/read_prefetch* ../results/us-west-2-xlarge/exp-4
mv ../results/us-west-2-xlarge/read_s3fs* ../results/us-west-2-xlarge/exp-4
fi
|
#!/bin/bash
set -o errexit -o pipefail
if [[ ${target_platform} =~ linux.* ]] || [[ ${target_platform} == win-32 ]] || [[ ${target_platform} == win-64 ]] || [[ ${target_platform} == osx-64 ]]; then
export DISABLE_AUTOBREW=1
${R} CMD INSTALL --build .
else
mkdir -p "${PREFIX}"/lib/R/library/ezplot
mv ./* "${PREFIX}"/lib/R/library/ezplot
if [[ ${target_platform} == osx-64 ]]; then
pushd "${PREFIX}"
for libdir in lib/R/lib lib/R/modules lib/R/library lib/R/bin/exec sysroot/usr/lib; do
pushd "${libdir}" || exit 1
while IFS= read -r -d '' SHARED_LIB
do
echo "fixing SHARED_LIB ${SHARED_LIB}"
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5.0-MRO/Resources/lib/libR.dylib "${PREFIX}"/lib/R/lib/libR.dylib "${SHARED_LIB}" || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libR.dylib "${PREFIX}"/lib/R/lib/libR.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/local/clang4/lib/libomp.dylib "${PREFIX}"/lib/libomp.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/local/gfortran/lib/libgfortran.3.dylib "${PREFIX}"/lib/libgfortran.3.dylib "${SHARED_LIB}" || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libquadmath.0.dylib "${PREFIX}"/lib/libquadmath.0.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/local/gfortran/lib/libquadmath.0.dylib "${PREFIX}"/lib/libquadmath.0.dylib "${SHARED_LIB}" || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libgfortran.3.dylib "${PREFIX}"/lib/libgfortran.3.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/lib/libgcc_s.1.dylib "${PREFIX}"/lib/libgcc_s.1.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/lib/libiconv.2.dylib "${PREFIX}"/sysroot/usr/lib/libiconv.2.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/lib/libncurses.5.4.dylib "${PREFIX}"/sysroot/usr/lib/libncurses.5.4.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/lib/libicucore.A.dylib "${PREFIX}"/sysroot/usr/lib/libicucore.A.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/lib/libexpat.1.dylib "${PREFIX}"/lib/libexpat.1.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/lib/libcurl.4.dylib "${PREFIX}"/lib/libcurl.4.dylib "${SHARED_LIB}" || true
install_name_tool -change /usr/lib/libc++.1.dylib "${PREFIX}"/lib/libc++.1.dylib "${SHARED_LIB}" || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libc++.1.dylib "${PREFIX}"/lib/libc++.1.dylib "${SHARED_LIB}" || true
done < <(find . \( -type f -iname "*.dylib" -or -iname "*.so" -or -iname "R" \) -print0)
popd
done
popd
fi
fi
|