content
stringlengths 1
1.02M
⌀ |
---|
#/bin/bash
# wait that the boot finish, otherwise apt-get could fail
until [[ -f /var/lib/cloud/instance/boot-finished ]]; do
sleep 1
done
# install some packages
sudo apt-get -y update
#sudo apt-get -y dist-upgrade
sudo apt-get -y install git
sudo apt-get -y install python3
sudo apt-get -y install python3-pip
pip3 install findspark
pip3 install numpy
pip3 install tensorflow==2.3.0
pip3 install spark-tensorflow-distributor
pip3 install pyspark
pip3 install pandas
pip3 install pyarrow
sudo apt-get -y install openjdk-8-jdk
export JAVA_HOME="/usr/lib/jvm/java-8-openjdk-amd64"
pip3 install --user pydoop
# master and slaves ip (you can add more if needed)
echo '
172.31.0.101 s01
172.31.0.102 s02
172.31.0.103 s03' | sudo tee --append /etc/hosts > /dev/null
sudo chmod 700 /home/ubuntu/.ssh
sudo chmod 600 /home/ubuntu/.ssh/id_rsa
echo '
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$PATH:$JAVA_HOME/bin
export PYSPARK_PYTHON=python3' | sudo tee --append /home/ubuntu/.bashrc > /dev/null
# install hadoop 2.7.7
cd /opt/
sudo wget https://archive.apache.org/dist/hadoop/common/hadoop-2.7.7/hadoop-2.7.7.tar.gz > /dev/null
sudo tar zxvf hadoop-2.7.7.tar.gz > /dev/null
# hadoop configuration files
echo '
export HADOOP_HOME=/opt/hadoop-2.7.7
export PATH=$PATH:$HADOOP_HOME/bin
export HADOOP_CONF_DIR=/opt/hadoop-2.7.7/etc/hadoop' | sudo tee --append /home/ubuntu/.bashrc > /dev/null
echo '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://s01:9000</value>
</property>
</configuration>' | sudo tee /opt/hadoop-2.7.7/etc/hadoop/core-site.xml > /dev/null
echo '<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>s01</value>
</property>
</configuration>' | sudo tee /opt/hadoop-2.7.7/etc/hadoop/yarn-site.xml > /dev/null
sudo cp /opt/hadoop-2.7.7/etc/hadoop/mapred-site.xml.template /opt/hadoop-2.7.7/etc/hadoop/mapred-site.xml
echo '<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.jobtracker.address</name>
<value>s01:54311</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>' | sudo tee /opt/hadoop-2.7.7/etc/hadoop/mapred-site.xml > /dev/null
echo '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///opt/hadoop-2.7.7/hadoop_data/hdfs/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///opt/hadoop-2.7.7/hadoop_data/hdfs/datanode</value>
</property>
</configuration>' | sudo tee /opt/hadoop-2.7.7/etc/hadoop/hdfs-site.xml > /dev/null
echo '
s01' | sudo tee --append /opt/hadoop-2.7.7/etc/hadoop/masters > /dev/null
echo '
s02
s03' | sudo tee /opt/hadoop-2.7.7/etc/hadoop/slaves > /dev/null
sudo sed -i -e 's/export\ JAVA_HOME=\${JAVA_HOME}/export\ JAVA_HOME=\/usr\/lib\/jvm\/java-8-openjdk-amd64/g' /opt/hadoop-2.7.7/etc/hadoop/hadoop-env.sh
sudo mkdir -p /opt/hadoop-2.7.7/hadoop_data/hdfs/namenode
sudo mkdir -p /opt/hadoop-2.7.7/hadoop_data/hdfs/datanode
sudo chown -R ubuntu /opt/hadoop-2.7.7
# spark installation
cd /opt/
sudo wget https://archive.apache.org/dist/spark/spark-3.0.1/spark-3.0.1-bin-hadoop2.7.tgz > /dev/null
sudo tar -xvzf spark-3.0.1-bin-hadoop2.7.tgz > /dev/null
echo '
export SPARK_HOME=/opt/spark-3.0.1-bin-hadoop2.7
export PATH=$PATH:$SPARK_HOME/bin' | sudo tee --append /home/ubuntu/.bashrc > /dev/null
sudo chown -R ubuntu /opt/spark-3.0.1-bin-hadoop2.7
cd spark-3.0.1-bin-hadoop2.7
cp conf/spark-env.sh.template conf/spark-env.sh
# spark configuration files
echo '
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export SPARK_MASTER_HOST=s01
export HADOOP_CONF_DIR=/opt/hadoop-2.7.7/etc/hadoop
export HADOOP_HOME=/opt/hadoop-2.7.7' | sudo tee --append conf/spark-env.sh > /dev/null
echo '
s02
s03' | sudo tee --append conf/slaves > /dev/null
cp conf/spark-defaults.conf.template conf/spark-defaults.conf
echo -e '$HADOOP_HOME/sbin/start-dfs.sh && $HADOOP_HOME/sbin/start-yarn.sh && $HADOOP_HOME/sbin/mr-jobhistory-daemon.sh start historyserver' > /home/ubuntu/hadoop-start-master.sh
echo '$SPARK_HOME/sbin/start-master.sh' > /home/ubuntu/spark-start-master.sh
echo '$SPARK_HOME/sbin/start-slave.sh spark://s01:7077' > /home/ubuntu/spark-start-slave.sh
|
#!/bin/sh
# properties = {"type": "single", "rule": "gatk_call", "local": false, "input": ["/labs/jandr/walter/tb/data/refs/MTB_ancestor_reference.fasta", "process/bams/SHTBRm2_S3_bwa_MTB_ancestor_reference.merged.realn.bam"], "output": ["process/vars/SHTBRm2_S3_bwa_MTB_ancestor_reference_gatk.vcf.gz"], "wildcards": {"samp": "SHTBRm2_S3", "mapper": "bwa", "ref": "MTB_ancestor_reference"}, "params": {"ploidy": "1"}, "log": ["process/vars/SHTBRm2_S3_bwa_MTB_ancestor_reference_gatk.log"], "threads": 1, "resources": {}, "jobid": 596, "cluster": {"time": 7200, "mem": "2G", "account": "jandr"}}
cd /oak/stanford/scg/lab_jandr/walter/tb/capture && \
/home/kwalter/.conda/envs/snakemake/bin/python3.6 \
-m snakemake process/vars/SHTBRm2_S3_bwa_MTB_ancestor_reference_gatk.vcf.gz --snakefile /oak/stanford/scg/lab_jandr/walter/tb/capture/Snakefile \
--force -j --keep-target-files --keep-remote \
--wait-for-files /oak/stanford/scg/lab_jandr/walter/tb/capture/.snakemake/tmp.9oxjlqnb /labs/jandr/walter/tb/data/refs/MTB_ancestor_reference.fasta process/bams/SHTBRm2_S3_bwa_MTB_ancestor_reference.merged.realn.bam --latency-wait 5 \
--attempt 1 --force-use-threads \
--wrapper-prefix https://github.com/snakemake/snakemake-wrappers/raw/ \
--allowed-rules gatk_call --nocolor --notemp --no-hooks --nolock \
--mode 2 && touch "/oak/stanford/scg/lab_jandr/walter/tb/capture/.snakemake/tmp.9oxjlqnb/596.jobfinished" || (touch "/oak/stanford/scg/lab_jandr/walter/tb/capture/.snakemake/tmp.9oxjlqnb/596.jobfailed"; exit 1)
|
#!/bin/sh
# shadowsocks script for HND/AXHND router with kernel 4.1.27/4.1.51 merlin firmware
source /koolshare/scripts/base.sh
LOGFILE_F=/tmp/upload/ssf_status.txt
LOGFILE_C=/tmp/upload/ssc_status.txt
LOGTIME=$(TZ=UTC-8 date -R "+%Y-%m-%d %H:%M:%S")
LOGTIME1=$(TZ=UTC-8 date -R "+%m-%d %H:%M:%S")
CURRENT=$(dbus get ssconf_basic_node)
eval $(dbus export ss_failover_enable)
get_china_status(){
local ret=`httping www.baidu.com -s -Z -c1 -f -t 3 2>/dev/null|sed -n '2p'|sed 's/seq=0//g'|sed 's/([0-9]\+\sbytes),\s//g'`
[ "$ss_failover_enable" == "1" ] && echo $LOGTIME1 $ret >> $LOGFILE_C
local S1=`echo $ret|grep -Eo "200 OK"`
if [ -n "$S1" ]; then
local S2=`echo $ret|sed 's/time=//g'|awk '{printf "%.0f ms\n",$(NF -3)}'`
log2='国内链接 【'$LOGTIME'】 ✓ '$S2''
else
log2='国内链接 【'$LOGTIME'】 <font color='#FF0000'>X</font>'
fi
}
get_foreign_status(){
local ret=`httping www.google.com.tw -s -Z -c1 -f -t 3 2>/dev/null|sed -n '2p'|sed 's/seq=0//g'|sed 's/([0-9]\+\sbytes),\s//g'`
[ "$ss_failover_enable" == "1" ] && echo $LOGTIME1 $ret "[`dbus get ssconf_basic_name_$CURRENT`]" $1 >> $LOGFILE_F
local S1=`echo $ret|grep -Eo "200 OK"`
if [ -n "$S1" ]; then
local S2=`echo $ret|sed 's/time=//g'|awk '{printf "%.0f ms\n",$(NF -3)}'`
log1='国外链接 【'$LOGTIME'】 ✓ '$S2''
else
log1='国外链接 【'$LOGTIME'】 <font color='#FF0000'>X</font>'
fi
}
PIDC="`ps|grep httping|grep baidu|grep -v grep`"
PIDF="`ps|grep httping|grep google.com.tw|grep -v grep`"
[ -n "$PIDC" ] && echo $LOGTIME1 httping China timeout >> $LOGFILE_C && kill -9 $PIDC
[ -n "$PIDF" ] && echo $LOGTIME1 httping foreign timeout "[`dbus get ssconf_basic_name_$CURRENT`]" >> $LOGFILE_F && kill -9 $PIDF
[ -n "`ps|grep ssconfig.sh|grep -v grep`" ] && exit
[ -n "`ps|grep ss_v2ray.sh|grep -v grep`" ] && exit
[ "`dbus get ss_basic_enable`" != "1" ] && exit
get_china_status $1
get_foreign_status $1
if [ "$ss_failover_enable" == "1" ];then
echo "$log1@@$log2" > /tmp/upload/ss_status.txt
else
http_response "$log1@@$log2"
fi
|
#!/usr/bin/env sh
. $(dirname $0)/variables.sh
template_file="$(dirname $0)/httpcheck.json"
if [ $http_method = "GET" ] || [ $http_method = "DELETE" ]; then
echo $(curl -X $http_method -w "@$template_file" -o /dev/null -s $url)
elif [ $http_method = "POST" ] || [ $http_method = "PUT" ]; then
if [ -z "$data" ]; then
echo $(curl -X $http_method -w "@$template_file" -o /dev/null -s $url)
else
echo $(curl -X $http_method -w "@$template_file" -H "Content-Type: $content_type" -o /dev/null -s --data "$data" $url)
fi
else
echo 0
fi
|
#!/bin/bash
#FIRST ROUND (python3 -m subt.main run config/mobos-ros.json --start-paused --side right --speed 0.5 --walldist 0.6 --init-offset="-2.5,0,0" --init-path="3, 0; 3, 3";python3 -m osgar.record config/test-lora.json) &
#SECOND ROUND (python3 -m subt.main run config/mobos-ros.json --side right --speed 0.5 --walldist 0.6 --init-offset="-2.5,0,0";python3 -m osgar.record config/test-lora.json) &
python3 -m subt.main run config/mobos-ros.json --side left --speed 0.5 --walldist 0.6 &
roslaunch robot auto_mob.launch
wait
|
#!/usr/bin/env bash
set -e
addprinc.sh opensearch
addprinc.sh HTTP/localhost
addprinc.sh peppa
addprinc.sh george dino
# Use this as a signal that setup is complete
python3 -m http.server 4444 &
sleep infinity
|
#!/bin/bash
# List of modules to update
# Be carefull with fork repos to point the right origin when openinig PR.
# DNX One public repositories list
# To get an updated list of dnx publix repositores, use the github cli command: gh repo list DNXLabs --public
declare -a StringArray=(
"terraform-aws-ecs-app"
"terraform-aws-ecs-app-front"
"terraform-aws-ecs-app-nlb"
"terraform-aws-ecs-app-worker"
"terraform-aws-ecs-app-scheduler"
"terraform-aws-rds"
"terraform-aws-network"
"terraform-aws-gitlab-runner" # fork
"terraform-aws-jenkins" # fork
"terraform-aws-backup"
"terraform-aws-chatbot"
"terraform-aws-kinesis-stream-es"
"terraform-aws-ecs"
"terraform-aws-client-vpn"
"terraform-aws-account"
"terraform-aws-openvpn"
"terraform-aws-ecr"
"terraform-google-stackdriver-uptime"
"terraform-aws-waf"
"terraform-aws-backend"
"terraform-aws-static-app"
"terraform-aws-vpc-peering"
"terraform-aws-billing"
"terraform-aws-billing-role"
"terraform-aws-organization"
"terraform-aws-guardduty"
"terraform-aws-db-monitoring"
"terraform-aws-idp-gsuite"
"terraform-aws-sns"
"terraform-aws-hostedzone"
"terraform-aws-account-security"
"terraform-aws-lite-account-security"
"terraform-aws-route53healthcheck"
"terraform-aws-audit-root"
"terraform-aws-audit-member"
"terraform-aws-audit-buckets"
"terraform-aws-log-exporter"
"terraform-aws-rds-scheduler"
"terraform-aws-security-baseline"
"terraform-aws-account-identity"
"terraform-aws-eb-windows"
"terraform-aws-eks"
"terraform-aws-template"
"terraform-aws-eks-argocd"
"terraform-aws-media-convert"
"terraform-aws-github-runner"
"terraform-aws-eks-node-termination-handler"
"terraform-aws-eks-ack"
"terraform-aws-transfer-server"
"terraform-aws-eks-calico"
"terraform-aws-couchbase"
"terraform-aws-audit-regional"
"terraform-aws-eks-external-dns"
"terraform-aws-eventbridge-default"
"terraform-aws-security-alarms"
"terraform-aws-eks-cloudwatch-logs"
"terraform-aws-eks-vpc-cni"
"terraform-aws-securityhub"
"terraform-aws-eks-external-secrets"
"terraform-aws-eks-grafana-prometheus"
"terraform-aws-stateful"
"terraform-aws-acm-certificate"
"terraform-mongodbatlas-vpc-peering"
"terraform-aws-eks-istio-operator"
"terraform-aws-vpc-peering-inter-region"
"terraform-aws-eks-dashboard"
"terraform-aws-eks-kiali-operator"
"terraform-aws-redis"
"terraform-aws-eks-github-runner"
"terraform-aws-eks-metrics-server"
"terraform-azure-devops-self-hosted-agent-on-aws"
"terraform-aws-lambda-edge-function"
"terraform-aws-eks-velero"
"terraform-aws-eks-cert-manager"
"terraform-docs"
"terraform-aws-budget"
"terraform-aws-mwaa"
"terraform-aws-eks-efs-csi-driver"
"terraform-aws-sagemaker"
"terraform-aws-eks-cloudwatch-metrics"
"terraform-aws-eks-lb-controller"
"terraform-aws-maskopy"
"terraform-aws-inspector"
"terraform-aws-eks-cluster-autoscaler"
"terraform-aws-bitbucket-oidc"
)
mkdir repos
cd repos
BRANCH_NAME="" # e.g update-tf-versions
COMMIT_MESSAGE="" # e.g Bump TF required version to 0.13
PR_TITLE="" # e.g Set minimum terraform version to 1.13
PR_BODY="" # Explain why you are doing this PR
for val in ${StringArray[@]}; do
git clone [email protected]:$val.git
gh repo clone $val
#git clone https://github.com/DNXLabs/$val.git
cd $val
# Remove original file from upstream
rm versions.tf
# Add new files
cp ../../modifications/versions.tf .
git checkout -b feature/$BRANCH_NAME
git add .
git commit -m "$COMMIT_MESSAGE"
git push origin feature/"$BRANCH_NAME"
gh pr create -R DNXLabs/$val --title "$PR_TITLE" --body "$PR_BODY"
cd ..
read -p "Press enter to continue"
cd ..
rm -rf ./repos
done |
#!/bin/sh
# This is a generated file; do not edit or check into version control.
export "FLUTTER_ROOT=C:\flutter"
export "FLUTTER_APPLICATION_PATH=E:\DSI\2020\mojiBooks\dart-epub\example\flutter_ex"
export "FLUTTER_TARGET=lib\main.dart"
export "FLUTTER_BUILD_DIR=build"
export "SYMROOT=${SOURCE_ROOT}/../build\ios"
export "FLUTTER_BUILD_NAME=1.0.0"
export "FLUTTER_BUILD_NUMBER=1"
export "DART_OBFUSCATION=false"
export "TRACK_WIDGET_CREATION=false"
export "TREE_SHAKE_ICONS=false"
export "PACKAGE_CONFIG=.packages"
|
NAME="server"
CERT="$NAME.crt"
DER="$NAME.der"
P7B="$NAME.p7b"
PFX="$NAME.pfx"
# TO DER
openssl x509 -in $CERT -out $DER -outform DER
# TO PFX
openssl pkcs12 -export -in $CERT -out $PFX -password pass:11223344
#-inkey server.key
#-certfile ca.crt
# TO P7B
openssl crl2pkcs7 -nocrl -certfile $CERT -out $P7B
|
#
# $0 : Filename of current script.
# $n : An argument of the script. (n=1~9)
# $# : Number of arguments to the script
# $* : All arguments of the script. All arguments are double quoted together.
# $@ : All arguments of the script. Each argument is individually double quoted.
# $? : The exit status of last command executed.
# $$ : PID of the current shell.
# $! : PID of the last background command.
#
|
#! /bin/bash
CMAKE_VERSION=3.17.2
CMAKE_URL="https://github.com/Kitware/CMake/releases/download/v$CMAKE_VERSION/cmake-$CMAKE_VERSION.tar.gz"
mkdir "cmake-$CMAKE_VERSION"
travis_retry wget --no-check-certificate -O - $CMAKE_URL | tar --strip-components=1 -xz -C cmake-$CMAKE_VERSION
cd cmake-$CMAKE_VERSION
./bootstrap
make && sudo make install && cd ..
rm -rf cmake-$CMAKE_VERSION.tar.gz
export PATH="$PWD/cmake-$CMAKE_VERSION/bin:$PATH"
|
#!/bin/bash
clear
echo -e "==============================\n===== Blockchain install =====\n=============================="
# git clone https://github.com/hyperledger/fabric-samples.git /hlf-lab
export GITHUBUSERID="hlf"
mkdir -p $HOME/go/src/github.com/$GITHUBUSERID
cd $HOME/go/src/github.com/$GITHUBUSERID
curl -sSL https://bit.ly/2ysbOFE | bash -s
echo -e "-------------------------\n----- Is it all OK? -----\n-------------------------"
|
{{/* =% sh %= */}}
{{ $clearState := flag "clear-state" "bool" "Clear stored states" | prompt "Clear state?" "bool" false }}
{{ $profile := flag "aws-cred-profile" "string" "Profile name" | prompt "Profile for your .aws/credentials?" "string" "default" }}
{{ $region := flag "region" "string" "aws region" | prompt "Region?" "string" "eu-central-1"}}
echo "Clear stale pids"
rm -rf $HOME/.infrakit/plugins/* # remove sockets, pid files, etc.
{{ if $clearState }}
echo "Clear local state from previous runs"
rm -rf $HOME/.infrakit/configs/* # for file based manager
# Since we are using file based leader detection, write the default name (manager1) to the leader file.
echo manager1 > $HOME/.infrakit/leader
{{ end }}
echo "Starting infrakit with aws plugin..."
{{/* Pick a credential from the local ~/.aws folder. You should have this if you use awscli. */}}
{{ $creds := index (source (cat "file://" (env "HOME") "/.aws/credentials" | nospace) | iniDecode ) $profile }}
FOUND="{{ not (empty $creds) }}"
if [ $FOUND = "false" ]; then
echo "no credentials found. bye"
exit 1
fi
{{ $namespace := env `USER` }}
{{ echo "Found your credential for profile" $profile }}
{{ echo "All resources created will be namespaced to " $namespace }}
AWS_ACCESS_KEY_ID={{ $creds.aws_access_key_id }} \
AWS_SECRET_ACCESS_KEY={{ $creds.aws_secret_access_key }} \
INFRAKIT_AWS_REGION={{ $region }} \
INFRAKIT_AWS_NAMESPACE_TAGS="infrakit_namespace={{ $namespace }}" \
INFRAKIT_MANAGER_CONTROLLERS=resource,inventory,pool \
infrakit plugin start manager:mystack vars group resource inventory pool aws \
--log 5 --log-stack --log-debug-V 1000 \
--log-debug-match module=controller/resource \
--log-debug-match module=provider/aws \
--log-debug-match module=core/fsm \
|
#!/bin/sh
#
# Create a file all_players.txt
cd players
ls -1 > ../all_players.txt
|
#!/bin/bash
# Make sure databases exist, if not accessing will create them
mongo --host mongo --eval "use tomatoes_app_development; use tomatoes_app_test; exit;"
rails s
|
# -*- mode: sh; mode: sh-bash -*-
# Constants (様々な箇所から使うので此処に置く)
_ble_term_nl=$'\n'
_ble_term_FS=$'\034'
_ble_term_SOH=$'\001'
_ble_term_DEL=$'\177'
_ble_term_IFS=$' \t\n'
_ble_term_CR=$'\r'
function blehook/declare {
local name=$1
builtin eval "_ble_hook_h_$name=()"
builtin eval "_ble_hook_c_$name=0"
}
# ble.pp
blehook/declare EXIT
blehook/declare INT
blehook/declare ERR
blehook/declare unload
blehook/declare ATTACH
blehook/declare DETACH
# util.sh
blehook/declare DA1R
blehook/declare DA2R
# color.sh
blehook/declare color_defface_load
blehook/declare color_setface_load
# history.sh
blehook/declare ADDHISTORY
blehook/declare history_reset_background
blehook/declare history_onleave
blehook/declare history_delete
blehook/declare history_insert
blehook/declare history_clear
blehook/declare history_message
# edit.sh
blehook/declare WINCH
blehook/declare CHPWD
blehook/declare PRECMD
blehook/declare PREEXEC
blehook/declare POSTEXEC
blehook/declare widget_bell
blehook/declare textarea_render_defer
blehook/declare info_reveal
# deprecated function
function ble-edit/prompt/print { ble/prompt/print "$@"; }
function ble-edit/prompt/process-prompt-string { ble/prompt/process-prompt-string "$@"; }
# keymap
blehook/declare keymap_load
blehook/declare keymap_vi_load
blehook/declare keymap_emacs_load
# core-syntax.sh
blehook/declare syntax_load
# core-complete.sh
blehook/declare complete_load
blehook/declare complete_insert
# for compatibility:
function blehook/.compatibility-ble-0.3 {
blehook keymap_load+='ble/util/invoke-hook _ble_keymap_default_load_hook'
blehook keymap_emacs_load+='ble/util/invoke-hook _ble_keymap_emacs_load_hook'
blehook keymap_vi_load+='ble/util/invoke-hook _ble_keymap_vi_load_hook'
blehook complete_load+='ble/util/invoke-hook _ble_complete_load_hook'
}
function blehook/.compatibility-ble-0.3/check {
if ble/is-array _ble_keymap_default_load_hook ||
ble/is-array _ble_keymap_vi_load_hook ||
ble/is-array _ble_keymap_emacs_load_hook ||
ble/is-array _ble_complete_load_hook
then
ble/bin/cat << EOF
# [Change in ble-0.4.0]
#
# Please update your blerc settings for ble-0.4+.
# In ble-0.4+, use the following form:
#
# blehook/eval-after-load keymap SHELL-COMMAND
# blehook/eval-after-load keymap_vi SHELL-COMMAND
# blehook/eval-after-load keymap_emacs SHELL-COMMAND
# blehook/eval-after-load complete SHELL-COMMAND
#
# instead of the following older form:
#
# ble/array#push _ble_keymap_default_load_hook SHELL-COMMAND
# ble/array#push _ble_keymap_vi_load_hook SHELL-COMMAND
# ble/array#push _ble_keymap_emacs_load_hook SHELL-COMMAND
# ble/array#push _ble_complete_load_hook SHELL-COMMAND
#
# Note: "blehook/eval-after-load" should be called
# after you defined SHELL-COMMAND.
#
EOF
fi
}
|
#!/usr/bin/env bash
if [[ -z $OPM_HOME ]]; then
print_error 'Not defined OPM_HOME variable.'
exit 1
fi
if [[ $VIA_INSTALLATION_SCRIPT -ne 1 ]]; then
print_error 'You have to do it through the installation script.'
exit 1
fi
AUTOMATIC_YES_FLAG=${AUTOMATIC_YES_FLAG:-0}
ALACRITTY_CONFIG=$HOME/.alacritty.yml
SRC_ALACRITTY_CONFIG=$OPM_HOME/etc/alacritty/alacritty.yml
## Backup the previous alacritty config file.
backup_file "$ALACRITTY_CONFIG"
## Remove the previous alacritty config file.
remove_file "$ALACRITTY_CONFIG"
## Install config file.
symbolic_link "$SRC_ALACRITTY_CONFIG" "$ALACRITTY_CONFIG"
print_information "Write config file."
|
sudo ./ssdv -e $1 > /dev/ttyAMA0
|
#! /bin/sh
#value_array=(256 1024 4096 16384 65536)
value_array=(4096)
test_all_size=81920000000 #80G
bench_db_path="/mnt/pmem/test"
bench_mem_path="/mnt/pmem/nvm"
bench_value="4096"
write_buffer_size="64" #unit:MB
nvm_buffer_size="4096" #unit:MB; memtable -> immutable ; allocate nvm_buffer_size*1.5*1.5 ? 4G*1.5*1.5=9G
#bench_benchmarks="fillseq,stats,readseq,readrandom,stats" #"fillrandom,fillseq,readseq,readrandom,stats"
#bench_benchmarks="fillrandom,stats,readseq,readrandom,stats"
#bench_benchmarks="fillrandom,stats,wait,stats,readseq,readrandom,readrandom,readrandom,stats"
#bench_benchmarks="fillrandom,stats,wait,clean_cache,stats,readseq,clean_cache,readrandom,stats"
#bench_benchmarks="fillrandom,stats,sleep20s,clean_cache,stats,readseq,clean_cache,stats,readrandom,stats"
#bench_benchmarks="fillrandom,stats,wait,clean_cache,stats,readrandom,stats"
#bench_benchmarks="fillrandom,stats,wait,clean_cache,stats,readseq,clean_cache,readrandom,stats"
#bench_benchmarks="fillseq,stats,wait,clean_cache,stats,readseq,stats"
bench_benchmarks="fillrandom,stats"
#bench_benchmarks="fillseq,stats"
bench_num="200000"
bench_readnum="1000000"
#report_fillrandom_latency="1"
report_fillrandom_latency="0"
tdate=$(date "+%Y_%m_%d_%H_%M_%S")
bench_file_path="$(dirname $PWD )/out-static/db_bench"
bench_file_dir="$(dirname $PWD )"
if [ ! -f "${bench_file_path}" ];then
bench_file_path="$PWD/out-static/db_bench"
bench_file_dir="$PWD"
fi
if [ ! -f "${bench_file_path}" ];then
echo "Error:${bench_file_path} or $(dirname $PWD )/out-static/db_bench not find!"
exit 1
fi
RUN_ONE_TEST() {
const_params="
--db_disk=$bench_db_path \
--value_size=$bench_value \
--benchmarks=$bench_benchmarks \
--num_levels=2 \
--num=$bench_num \
--reads=$bench_readnum \
--db_mem=$bench_mem_path \
--write_buffer_size=$write_buffer_size \
--nvm_buffer_size=$nvm_buffer_size \
--report_fillrandom_latency=$report_fillrandom_latency \
"
#cmd="sudo $bench_file_path $const_params"
cmd="nohup sudo $bench_file_path $const_params >>out.out 2>&1"
if [ "$1" == "numa" ];then
#cmd="sudo numactl -N 1 $bench_file_path $const_params"
cmd="nohup sudo numactl -N 1 $bench_file_path $const_params >>out.out 2>&1"
fi
echo $cmd >out.out
echo $cmd
eval $cmd
}
CLEAN_CACHE() {
if [ -n "$bench_db_path" ];then
sudo rm -f $bench_db_path/*
fi
if [ -n "$bench_mem_path" ];then
sudo rm -f $bench_mem_path/*
fi
sleep 2
sync
echo 3 | sudo tee -a /proc/sys/vm/drop_caches > /dev/null
sleep 2
}
REDO_MOUNT_SSD() {
sudo umount "/mnt/ssd/"
sudo mkfs.ext4 "/dev/sdb" << EOF
y
EOF
sudo mount "/dev/sdb" "/mnt/ssd"
}
COPY_OUT_FILE(){
mkdir $bench_file_dir/result_novelsm_$tdate > /dev/null 2>&1
res_dir=$bench_file_dir/result_novelsm_$tdate/value-$bench_value
mkdir $res_dir > /dev/null 2>&1
\cp -f $bench_file_dir/compaction.csv $res_dir/
\cp -f $bench_file_dir/OP_DATA $res_dir/
\cp -f $bench_file_dir/OP_TIME.csv $res_dir/
\cp -f $bench_file_dir/out.out $res_dir/
\cp -f $bench_file_dir/Latency.csv $res_dir/
}
RUN_ALL_TEST() {
for value in ${value_array[@]}; do
CLEAN_CACHE
#REDO_MOUNT_SSD
bench_value="$value"
bench_num="`expr $test_all_size / $bench_value`"
RUN_ONE_TEST
if [ $? -ne 0 ];then
exit 1
fi
COPY_OUT_FILE
sleep 5
done
}
RUN_ALL_TEST
|
#!/usr/bin/env bash
VERSIONNAME=`git describe --always --tags | sed -e 's/^v//'`
# Android versionCode from git tag vX.Y.Z-123-gSHA
IFS=. read major minor patch dev sha <<<"${VERSIONNAME//-/.}"
VERSIONCODE=$(($major*100000))
VERSIONCODE=$(($(($minor*10000)) + $VERSIONCODE))
VERSIONCODE=$(($(($patch*1000)) + $VERSIONCODE))
VERSIONCODE=$(($(($dev)) + $VERSIONCODE))
MANIFEST_FILE=android/AndroidManifest.xml
# manifest package
if [ "$2" = "master" ]; then
QGC_PKG_NAME="org.mavlink.qgroundcontroldaily"
sed -i -e 's/package *= *"[^"]*"/package="'$QGC_PKG_NAME'"/' $MANIFEST_FILE
echo "Android package name: $QGC_PKG_NAME"
fi
# android:versionCode
if [ -n "$VERSIONCODE" ]; then
sed -i -e "s/android:versionCode=\"[0-9][0-9]*\"/android:versionCode=\"$VERSIONCODE\"/" $MANIFEST_FILE
echo "Android version: ${VERSIONCODE}"
else
echo "Error versionCode empty"
exit 0 # don't cause the build to fail
fi
# android:versionName
if [ -n "$VERSIONNAME" ]; then
sed -i -e 's/versionName *= *"[^"]*"/versionName="'$VERSIONNAME'"/' $MANIFEST_FILE
echo "Android name: ${VERSIONNAME}"
else
echo "Error versionName empty"
exit 0 # don't cause the build to fail
fi
|
#!/bin/bash
#Copyright (C) 2021 Intel Corporation
#SPDX-License-Identifier: Apache-2.0
if [ -z "$1" ];
then
echo "- Missing mandatory arguments:"
echo " - Usage: ./start_p4ovs.sh <WORKDIR>"
return 1
fi
#export http_proxy=<your proxy>
#export https_proxy=<your proxy>
WORKDIR=$1
SCRIPTS_DIR=/root/scripts
export PATH="/root/scripts/:${PATH}"
export PATH="$WORKDIR/P4-OVS/:${PATH}"
get_p4ovs_repo() {
chmod +x ${SCRIPTS_DIR}/get_p4ovs_repo.sh && \
bash ${SCRIPTS_DIR}/get_p4ovs_repo.sh "$WORKDIR"
}
build_p4sde() {
chmod +x ${SCRIPTS_DIR}/build_p4sde.sh && \
bash ${SCRIPTS_DIR}/build_p4sde.sh "$WORKDIR"
}
install_dependencies() {
cd "$WORKDIR"/P4-OVS && sed -i 's/sudo //g' install_dep_packages.sh && \
bash ./install_dep_packages.sh "$WORKDIR"
#...Removing Dependencies Source Code After Successful Installation...#
rm -rf "${WORKDIR}/P4OVS_DEPS_SRC_CODE" || exit 1
}
build_p4c () {
chmod +x ${SCRIPTS_DIR}/build_p4c.sh && \
bash ${SCRIPTS_DIR}/build_p4c.sh "$WORKDIR"
}
build_p4ovs () {
cd "$WORKDIR"/P4-OVS && bash ./build-p4ovs.sh "$WORKDIR"/p4-sde/install
}
if [ -z "${INSTALL_DEPENDENCIES}" ] || [ "${INSTALL_DEPENDENCIES}" == "y" ]
then
get_p4ovs_repo
build_p4sde
install_dependencies
build_p4c
fi
build_p4ovs
|
#!/bin/bash
docker push williamdrew/jenkins-master
|
#!/bin/bash
yum update -y
amazon-linux-extras install docker -y
service docker start
systemctl enable docker
usermod -a -G docker ec2-user
yum install git -y
mkfs -t ext4 /dev/xvdb
mkdir /usr/local/workspace
mount /dev/xvdb /usr/local/workspace
echo "/dev/xvdb /usr/local/workspace ext4 defaults,nofail 0 2" >> /etc/fstab
git clone https://github.com/richpsharp/nci-ndr-analysis.git /usr/local/workspace/nci-ndr-analysis
docker run --rm -p 8080:8080 -v `pwd`:/var/workspace natcap/nci-ndr-execution:1 nci_ndr_manager.py --app_port 8080 --external_ip 10.0.1.57 > docker_log.txt
|
#!/bin/bash
##################################
for d in ./*/ ; do
cd $d
echo Motif: $d
for line in ./*/ ; do
cd $line
./flysim.out -pro network.pro -conf network.conf -t 4 -s moderate -nmodel LIF
rm ConfsInfo.log
rm network.log
rm flysim.out
cd ..
done
cd ..
done
################################## |
# generated from colcon_zsh/shell/template/prefix_chain.zsh.em
# This script extends the environment with the environment of other prefix
# paths which were sourced when this file was generated as well as all packages
# contained in this prefix path.
# function to source another script with conditional trace output
# first argument: the path of the script
_colcon_prefix_chain_zsh_source_script() {
if [ -f "$1" ]; then
if [ -n "$COLCON_TRACE" ]; then
echo ". \"$1\""
fi
. "$1"
else
echo "not found: \"$1\"" 1>&2
fi
}
# source chained prefixes
# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
COLCON_CURRENT_PREFIX="/opt/ros/foxy"
_colcon_prefix_chain_zsh_source_script "$COLCON_CURRENT_PREFIX/local_setup.zsh"
# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
COLCON_CURRENT_PREFIX="/home/saitama1/ros2_foxy/install"
_colcon_prefix_chain_zsh_source_script "$COLCON_CURRENT_PREFIX/local_setup.zsh"
# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
COLCON_CURRENT_PREFIX="/home/saitama1/adehome/AutowareAuto/install"
_colcon_prefix_chain_zsh_source_script "$COLCON_CURRENT_PREFIX/local_setup.zsh"
# source this prefix
# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
COLCON_CURRENT_PREFIX="$(builtin cd -q "`dirname "${(%):-%N}"`" > /dev/null && pwd)"
_colcon_prefix_chain_zsh_source_script "$COLCON_CURRENT_PREFIX/local_setup.zsh"
unset COLCON_CURRENT_PREFIX
unset _colcon_prefix_chain_zsh_source_script
|
#!/bin/bash
set -euo pipefail
if oc -n "openshift-operators-redhat" get deployment elasticsearch-operator -o name > /dev/null 2>&1 ; then
exit 0
fi
pushd ../elasticsearch-operator
LOCAL_IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY=127.0.0.1:5000/openshift/elasticsearch-operator-registry \
make elasticsearch-catalog-deploy
IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY=image-registry.openshift-image-registry.svc:5000/openshift/elasticsearch-operator-registry \
make -C ../elasticsearch-operator elasticsearch-operator-install
popd
|
#!/bin/bash
APP_ID=581330
wget https://steamcdn-a.akamaihd.net/client/installer/steamcmd_linux.tar.gz
tar xfvz steamcmd_linux.tar.gz
yum -y install expect
ls -la
./steamcmd.exp ${APP_ID}
|
#!/bin/bash
set -e
NAME=$1
REPO=$2
REPO_BRANCH=$3
REPO_UPSTREAM=$4
REPO_UPSTREAM_BRANCH=$5
REPO_DIR=$6
EXPECTED_COMMIT_SHA=$7
# Create the parent directory if it doesn't exists.
if [[ ! -d "${REPO_DIR}" ]]; then
mkdir -p ${REPO_DIR}
fi
# If repo already cloned, remove it.
if [[ -d "${REPO_DIR}/${NAME}" ]]; then
rm -rf ${REPO_DIR}/${NAME}
fi
# Clone git repo and merge with upstream.
echo "Cloning ${REPO} to ${REPO_DIR}..."
cd ${REPO_DIR}
git clone ${REPO}
echo "Adding upstream repo ${REPO_UPSTREAM}..."
cd ${REPO_DIR}/${NAME}
git remote add upstream ${REPO_UPSTREAM}
if [[ ${REPO_BRANCH} != "master" ]]; then
echo "Checking out origin ${REPO_BRANCH}..."
git checkout -b ${REPO_BRANCH} origin/${REPO_BRANCH}
fi
echo "Fetching upstream..."
git fetch upstream
echo "Merging upstream/${REPO_UPSTREAM_BRANCH}..."
git merge upstream/${REPO_UPSTREAM_BRANCH}
# Push change to origin.
set +e
echo "Pushing to origin ${REPO_BRANCH}..."
git push origin ${REPO_BRANCH}
if [[ $? != 0 ]]
then
>&2 echo "ERROR: Unable to push change to origin ${REPO_BRANCH}."
echo "Cleaning up..."
rm -rf ${REPO_DIR}/${NAME}
exit 1
fi
# Cleanup
echo "Cleaning up..."
rm -rf ${REPO_DIR}/${NAME}
|
#!/bin/bash
#SBATCH -J astar # job name
#SBATCH -o Pipeline.o%j # output and error file name (%j expands to jobID)
#SBATCH -n 1 # total number of mpi tasks requested
#SBATCH -N 1 # total number of mpi tasks requested
#SBATCH -p largemem # queue (partition) -- normal, development, etc.
#SBATCH -t 03:00:00 # run time (hh:mm:ss) - 1.5 hours
#SBATCH --mail-type=begin # email me when the job starts
#SBATCH --mail-type=end # email me when the job finishes
#SBATCH -A TG-ASC130023
module swap intel gcc/4.9.1
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
SEQ="../../seqs/Balibase/Ref1/4_medium_med_id/1gdoA.fasta"
CMD="../../bin/msa_pastar"
THREADS="-t 32"
HASH="-y FZORDER"
HASH_SHIFT="-s 12"
OPT="$THREADS $HASH $HASH_SHIFT"
cd $DIR"/../../seqs/Balibase"
strace -ve wait4 /usr/bin/time -v $CMD $OPT $SEQ >> $SEQ.output 2>&1
|
#!/bin/bash
# This script enables kdump on HANA Large Instances(Type 1/2)
ExitIfFailed()
{
if [ "$1" != 0 ]; then
echo "$2 ! Exiting !!!!"
exit 1
fi
}
# operating system supported by this script
supported_os=(
"SLES"
"SLES_SAP"
)
# operating system versions supported by this script
supported_version=( "12-SP2"
"12-SP3"
"12-SP4"
"12-SP5"
"15-SP1"
"15-SP2"
)
# get OS name and OS version
# /etc/os-release file has this information
# in the form of key value pair so these can be
# imported in shell varible
eval $(cat /etc/os-release | sed -e s"@: @=@")
# check if the os and version is supported by this script
supported="false"
for i in "${supported_os[@]}"; do
if [[ "$NAME" == "$i" ]]; then
for j in "${supported_version[@]}"; do
if [[ "$VERSION" == "$j" ]]; then
supported="true"
break
fi
done
break
fi
done
if [[ "$supported" == "false" ]]; then
echo "This script does not support current OS $NAME, VERSION $VERSION. Please raise request to support this OS and Version"
exit 1
fi
# check if the kexec-tool is enabled
rpm -q kexec-tools
ExitIfFailed $? "kxec-tools required to enable kdump, please install"
# check if the GRUB_CMDLINE_LINUX_DEFAULT parameter exist in /etc/default/grub file
# missing command line options will be appended to GRUB_CMDLINE_LINUX_DEFAULT
egrep "^GRUB_CMDLINE_LINUX_DEFAULT" /etc/default/grub
if [[ "$?" == "1" ]]; then # in this case append the parameter to the file
echo "GRUB_CMDLINE_LINUX_DEFAULT=\"\"" >> /etc/default/grub
ExitIfFailed $? "Enable to add GRUB_CMDLINE_LINUX_DEFAULT parameter in /etc/default/grub"
fi
ReplaceLowHighInGrubFile()
{
# get low and high value reported by kdumptool calibrate
# kdumptool calibrate reports key value pair
# so these can be imported in shell environment
eval $(kdumptool calibrate | sed -e s"@: @=@")
ExitIfFailed $? "Failed to run kdumptool calibrate command"
# get system memory in TB
mem=$(free --tera | awk 'FNR == 2 {print $2}')
ExitIfFailed $? "Failed to get memory using free command"
# high memory to use for kdump is calculated according to system
# if the total memory of a system is greater than 1TB
# then the high value to use is (High From kdumptool * RAM in TB + LUNS / 2)
high_to_use=$High
if [ $mem -gt 1 ]; then
high_to_use=$(($High*$mem))
fi
# Add LUNS/2 to high_to_use
high_to_use=$(($high_to_use + $(($(lsblk | grep disk | wc -l)/2))))
# remove high and low value in /etc/default/grub
sed -i "s/crashkernel=[0-9]*[MG],high//gI" /etc/default/grub
sed -i "s/crashkernel=[0-9]*[MG],low//gI" /etc/default/grub
# load /etc/default/grub value in env variables to append crashkernel high, low value
source /etc/default/grub
# append crashkernel high,low value to GRUB_CMDLINE_LINUX_DEFAULT
GRUB_CMDLINE_LINUX_DEFAULT="\"$GRUB_CMDLINE_LINUX_DEFAULT crashkernel=$high_to_use\M,high crashkernel=$Low\M,low\""
# replace GRUB_CMDLINE_LINUX_DEFAULT in /etc/default/grub with new value
# using seperator # because / can already exist in GRUB_CMDLINE_LINUX_DEFAULT then sed command will not work
sed -i "s#^GRUB_CMDLINE_LINUX_DEFAULT=.*#GRUB_CMDLINE_LINUX_DEFAULT=$GRUB_CMDLINE_LINUX_DEFAULT#gI" /etc/default/grub
ExitIfFailed $? "Enable to change crashkernel parameters in /etc/default/grub"
}
# there can be 4 cases for crashkernel parameter in /pro/cmdline
# Case 1: extended kernel parameter for crashkernel
# Case 2: crashkernel parameter specify using high, low value
# Case 3: crashkernel parameter specify using only high value
# Case 4: crashkernel entry does not exist
# in Case 1 parameter can be used at it is.
# in Case 2,3,4 replace these parameter
grep "crashkernel=16G-4096G:512M,4096G-16384G:1G,16384G-32768G:2G,32768G-:3G@4G" /proc/cmdline
if [[ "$?" == "1" ]]; then # can be case 2,3,4
# case 2,3,4
ReplaceLowHighInGrubFile
fi
# commandline parameters which must be present in order to make sure
# that kdump works
commandline_params=(
"splash=verbose"
"mce=ignore_ce"
"nomodeset"
"numa_balancing=disable"
"transparent_hugepage=never"
"intel_idle.max_cstate=1"
"processor.max_cstate=1"
"quiet"
"showopts"
"rw"
)
# load /etc/default/grub value in env variables to append commandline params
source /etc/default/grub
for i in "${commandline_params[@]}"; do
grep $i /proc/cmdline
if [[ "$?" == "1" ]]; then # this option is not present in cmdline
GRUB_CMDLINE_LINUX_DEFAULT="$GRUB_CMDLINE_LINUX_DEFAULT $i"
fi
done
# replace old value of GRUB_CMDLINE_LINUX_DEFAULT with new value
GRUB_CMDLINE_LINUX_DEFAULT="\"$GRUB_CMDLINE_LINUX_DEFAULT\""
sed -i "s#^GRUB_CMDLINE_LINUX_DEFAULT=.*#GRUB_CMDLINE_LINUX_DEFAULT=$GRUB_CMDLINE_LINUX_DEFAULT#gI" /etc/default/grub
ExitIfFailed $? "Enable to change commandline parameters in /etc/default/grub"
# set KDUMP_SAVEDIR to file:///var/crash in /etc/sysconfig/kdump
sed -i "s#^KDUMP_SAVEDIR=\".*\"#KDUMP_SAVEDIR=\"file:\/\/\/var\/crash\"#gI" /etc/sysconfig/kdump
# set KDUMP_DUMPLEVEL to 31(recommended)
sed -i "s/^KDUMP_DUMPLEVEL=[0-9]*/KDUMP_DUMPLEVEL=31/gI" /etc/sysconfig/kdump
# set kernel.sysrq to 184(recommended)
echo 184 > /proc/sys/kernel/sysrq
ExitIfFailed $? "Failed to set kernel.sysrq value to 184"
# update the changes in /boot/grub2/grub.cfg so that after reboot these changes reflect in /proc/cmdline
grub2-mkconfig -o /boot/grub2/grub.cfg
ExitIfFailed $? "Unable to update /boot/grub2/grub.cfg"
# stop kdump service
systemctl stop kdump.service
ExitIfFailed $? "Failed to stop kdump service"
# create new kdump initrd
mkdumprd -f
ExitIfFailed $? "Unable to create kdump initrd"
# enable kdump service so that on system reboot kdump service is automatically start
systemctl enable kdump.service
ExitIfFailed $? "Error in enabling kdump service"
echo "KDUMP is successfully enabled, please reboot the system to apply the change"
exit 0 |
#!/bin/sh
env="MVE"
scenario="yyz3-avoid" # predator-prey
algo="mappo"
exp="MVE-09-20-train-3a-mo-form-avoid2-R"
num_landmarks=1
num_agents=3
num_obstacles=2
seed=1
echo "env is ${env}, scenario is ${scenario}, algo is ${algo}, exp is ${exp}, max seed is ${seed}"
echo "seed is ${seed}:"
CUDA_VISIBLE_DEVICES=1 \
python render/render_mve.py \
--env_name ${env} \
--algorithm_name ${algo} \
--experiment_name ${exp} \
--scenario_name ${scenario} \
--user_name "yuzi" \
--seed ${seed} \
--n_training_threads 1 \
--n_rollout_threads 1 \
--use_render \
--episode_length 10 \
--render_episodes 5 \
--model_dir "/home/yanyz/yanyz/gitlab/onpolicy/onpolicy/scripts/results/MVE/yyz3-avoid/mappo/MVE-09-20-train-3a-mo-form-avoid2/run1/models" \
--use_recurrent_policy \
--usegui \
--num_agents ${num_agents} \
--num_landmarks ${num_landmarks} \
--num_obstacles ${num_obstacles} \
--ideal_side_len 2.0 \
#--save_gifs
#--wandb_name "tartrl"
#--use_wandb |
#!/bin/sh
# If a command fails then the deploy stops
set -e
printf "\033[0;32mDeploying updates to GitHub...\033[0m\n"
# Go To Public folder
cd public
# Add changes to git.
git add .
# Commit changes.
msg="rebuilding site $(date)"
if [ -n "$*" ]; then
msg="$*"
fi
git commit -m "$msg"
# Push source and build repos.
git push origin master
|
wget https://data.cityofchicago.org/download/sgsc-bb4n/application/zip
unzip dsv15pWE6oiZTEMkQEowJ5f4bmF7zvlR_Qh53s1Ub7A\?filename\=WardPrecincts.zip
export OGR_WKT_PRECISION=5
ogr2ogr -f "GeoJSON" -lco COORDINATE_PRECISION=5 ward_precincts.geojson WardPrecincts.shp -t_srs EPSG:4269
|
rsync -vt \
$1:/home/ubuntu/polaq_eval/$2/*.json \
output/$3/
|
#!/bin/sh
#VERSION=Template will replace and run the template version
LOCATION=/home/node/app
INFILE=$LOCATION/webpack.config.template.js
OUTFILE=$LOCATION/webpack.config.run.js
if [[ $VERSION == 'Template' ]]; then
echo "Running the template version:"
echo " SERVER_PORT: " + $SERVER_PORT;
echo " SERVER_HTTPS: " + $SERVER_HTTPS;
echo " SERVER_HOST: " + $SERVER_HOST;
echo " SERVER_PUBLIC: " + $SERVER_PUBLIC;
echo " PROXY_TARGET: " + $PROXY_TARGET;
CMD_SERVER_PORT="s/SERVER_PORT/${SERVER_PORT}/g"
CMD_SERVER_HTTPS="s/SERVER_HTTPS/${SERVER_HTTPS}/g"
CMD_SERVER_HOST="s/SERVER_HOST/${SERVER_HOST}/g"
CMD_SERVER_PUBLIC="s/SERVER_PUBLIC/${SERVER_PUBLIC}/g"
CMD_PROXY_TARGET="s/PROXY_TARGET/${PROXY_TARGET}/g"
sed $CMD_PROXY_TARGET $INFILE | sed $CMD_SERVER_PORT | sed $CMD_SERVER_HTTPS | sed $CMD_SERVER_HOST | sed $CMD_SERVER_PUBLIC > $OUTFILE
echo "<---------------------------------------->"
cat $OUTFILE
echo "<---------------------------------------->"
fi
npm run start$VERSION |
#!/bin/bash
# Copyright 2016 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -z "$1" ]; then
echo "usage: $0 <domain>"
exit 1
fi
SERIAL=$(date +%s)
CA_KEY=/etc/certs/${1#*.}.key
CA_CHAIN=/etc/certs/${1#*.}.chain
CRT_KEY=/etc/certs/${1}.key.clear
CSR_PEM=/etc/certs/${1}.csr
CRT_PEM=/etc/certs/${1}.crt
mkdir -p /etc/certs
# CA cert
openssl req -x509 -nodes -days 365 -newkey rsa:4096 -sha256 \
-keyout $CA_KEY -out $CA_CHAIN \
-subj "/C=FI/ST=Uusimaa/L=Helsinki/O=Nitor Creations Oy/OU=IT/CN=$1"
# CSR
openssl req -nodes -days 365 -newkey rsa:4096 -sha256 \
-keyout $CRT_KEY -out $CSR_PEM \
-subj "/C=FI/ST=Uusimaa/L=Helsinki/O=Nitor Creations Oy/OU=IT/CN=$1"
# Cert
openssl x509 -req -in $CSR_PEM -CA $CA_CHAIN -CAkey $CA_KEY \
-set_serial $SERIAL -out $CRT_PEM
|
#!/usr/bin/env bash
set -eu
function error {
echo "#----------------------------"
echo "# ERROR: $1"
echo "#----------------------------\n"
exit 1
}
if [ $# != 1 ]; then
error "Please specify the version number: npm run finish-release 10.0.1"
fi
npm run lint
npm run test
npm run dist
NEW_VERSION=$1
BRANCH=`git rev-parse --abbrev-ref HEAD`
function change_version {
npm version ${NEW_VERSION}
}
function check_branch {
if [ ${BRANCH} == 'master' ]; then
echo "Master branch"
else
error "Invalid branch name ${BRANCH}"
fi
}
function exists_tag {
if git rev-parse v${NEW_VERSION} >/dev/null 2>&1; then
echo "Found tag"
else
error "Tag not found"
fi
}
function uncommitted_changes {
if [[ `git status --porcelain` ]]; then
error "There are uncommitted changes in the working tree."
fi
}
function publish {
npm publish --access public
}
function gitPush {
git push && git push --tags
}
function generate_release_notes {
npx gren release --username=raulanatol --repo=react-inline-loaders
}
uncommitted_changes
check_branch
change_version
exists_tag
publish
gitPush
generate_release_notes
|
#!/bin/bash
set -eu
cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
source $cur/../_utils/test_prepare
WORK_DIR=$TEST_DIR/$TEST_NAME
function run() {
# 1. test sync fetch binlog met error and reset binlog streamer with remote binlog
# with a 5 rows insert txn: 1 * FormatDesc + 1 * PreviousGTID + 1 * GTID + 1 * BEGIN + 5 * (Table_map + Write_rows) + 1 * XID
# here we fail at the third write rows event, sync should retry and auto recover without any duplicate event
export GO_FAILPOINTS="github.com/pingcap/ticdc/dm/syncer/GetEventErrorInTxn=13*return(3)"
run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
check_contains 'Query OK, 2 rows affected'
run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml
check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT
check_metric $MASTER_PORT 'start_leader_counter' 3 0 2
run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml
check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT
dmctl_operate_source create $cur/conf/source1.yaml $SOURCE_ID1
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
"start-task $cur/conf/dm-task.yaml --remove-meta"
check_metric $WORKER1_PORT "dm_worker_task_state{source_id=\"mysql-replica-01\",task=\"test\",worker=\"worker1\"}" 10 1 3
# wait safe-mode pass
check_log_contain_with_retry "disable safe-mode after task initialization finished" $WORK_DIR/worker1/log/dm-worker.log
run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1
check_log_contain_with_retry "reset replication binlog puller" $WORK_DIR/worker1/log/dm-worker.log
check_log_contain_with_retry "discard event already consumed" $WORK_DIR/worker1/log/dm-worker.log
check_sync_diff $WORK_DIR $cur/conf/diff_config.toml
# 2. test relay log retry relay with GTID
# with a 5 rows insert txn: 1 * FormatDesc + 1 * PreviousGTID + 1 * GTID + 1 * BEGIN + 5 * (Table_map + Write_rows) + 1 * XID
# here we fail at the third write rows event, sync should retry and auto recover without any duplicate event
export GO_FAILPOINTS="github.com/pingcap/ticdc/dm/relay/RelayGetEventFailed=15*return(3);github.com/pingcap/ticdc/dm/relay/retry/RelayAllowRetry=return"
run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml
check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT
cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml
dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2
run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2
check_contains 'Query OK, 2 rows affected'
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
"start-task $cur/conf/dm-task-relay.yaml --remove-meta"
check_metric $WORKER2_PORT "dm_worker_task_state{source_id=\"mysql-replica-02\",task=\"test_relay\",worker=\"worker2\"}" 10 1 3
check_sync_diff $WORK_DIR $cur/conf/diff_relay_config.toml
run_sql_source2 "flush logs;"
run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
"start-relay -s $SOURCE_ID2 worker2" \
"\"result\": true" 1
run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \
"query-status -s $SOURCE_ID2" \
"\"relayCatchUpMaster\": true" 1
run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2
check_log_contain_with_retry "retrying to read binlog" $WORK_DIR/worker2/log/dm-worker.log
check_log_contain_with_retry "discard duplicate event" $WORK_DIR/worker2/log/dm-worker.log
check_sync_diff $WORK_DIR $cur/conf/diff_relay_config.toml
# check relay log binlog file size is the same as master size
run_sql_source2 "show master status;"
binlog_file=$(grep "File" $TEST_DIR/sql_res.$TEST_NAME.txt | awk -F: '{print $2}' | xargs)
binlog_pos=$(grep "Position" $TEST_DIR/sql_res.$TEST_NAME.txt | awk -F: '{print $2}' | xargs)
server_uuid=$(tail -n 1 $WORK_DIR/worker2/relay-dir/server-uuid.index)
relay_log_size=$(ls -al $WORK_DIR/worker2/relay-dir/$server_uuid/$binlog_file | awk '{print $5}')
[ "$binlog_pos" -eq "$relay_log_size" ]
}
# also cleanup dm processes in case of last run failed
cleanup_process $*
cleanup_data dup_event1 dup_event_relay
run
cleanup_process $*
echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"
|
#!/bin/bash
#$ -V ## pass all environment variables to the job, VERY IMPORTANT
#$ -N filt1 ## job name
#$ -S /bin/bash
#$ -cwd ## Execute the job from the current working directory
#####$ -q lowmemory.q ## queue name
#$ -pe multi 16
# extract the variants in gnomAD with PLINK at MAF 5%
# filter for Q30, vqslod >0, PASS, then work in families and filterfor DP > 10 and HET/HOMO filtering
set -e
cd /storage/home/users/av45/exome/reanalysis
gatk="/usr/local/Modules/modulefiles/tools/gatk/3.7.0/"
genome="/storage/home/users/av45/exome/bundle_hg19/"
module load plink/2.0
#plink2 --vcf ../gnomad/gnomad.exomes.r2.1.1.sites.vcf.bgz --extract-if-info "AF_nfe_nwe <= 0.05" \
#--out filter0.05.North_western_euro --write-snplist --threads 24 --set-missing-var-ids "@:#"
#plink2 --vcf recalibrated.filtered.vcf --extract filter0.05.North_western_euro.snplist \
#--export vcf --out recalibrated_MAF0.05_nwetest --allow-extra-chr --set-missing-var-ids "@:#"
# Change VCF v4.3 to VCF v4.2
#sed -i '1s/VCFv4.3/VCFv4.2/' recalibrated_MAF0.05_nwetest.vcf
# NEED TO ACTIVATE VCFTOOLS ENV! # vcftools.yaml has the specs used
#vcftools --vcf recalibrated_MAF0.05_nwetest.vcf --minQ 30 --recode --recode-INFO-all --out test_vcftools_Q30
bcftools view -i 'VQSLOD >= 0.0' test_vcftools_Q30.recode.vcf -o test_vcftools_Q30_VQSLOD.vcf
vcftools --vcf test_vcftools_Q30_VQSLOD.vcf --remove-filtered-all --recode --recode-INFO-all --out test_vcftools_Q30_VQSLOD_PASS
echo -e "\nNow work in families!"
cd annovar
echo "Running ANNOVAR"
perl table_annovar.pl ../test_vcftools_Q30_VQSLOD_PASS -buildver hg19 -out ../test_vcftools_Q30_VQSLOD_PASS.myanno -remove \
-protocol refGene,dbnsfp30a,dbscsnv11,gnomad_exome,tfbsConsSites -operation gx,f,f,f,r -nastring . -vcfinput -polish
cd ../
#bcftools view -i "gnomAD_exome_NFE <= 0.05" test_vcftools_Q30_VQSLOD_PASS.recode.myanno.vcf.hg19_multianno.vcf -o Post_ANNOVAR_MAF_FILTERING_TEST.vcf # This will produce a different result. IN the beginning we filtered for north west EU MAF - now for all non-finnish EU!
|
# GaTech Aliases
# Add GaTech aliases
alias gt-docs="cd /Users/patrickward/Dropbox\ \(Personal\)/GT"
# alias start_bonnie_docker="docker run --interactive --tty -v `pwd`:'/root/gios' -v '$HOME/.bonnie':'/root/.bonnie/' -w '/root/gios' --entrypoint=/bin/bash gtomscs/os"
# export GIOS_GRADER_DIR="$HOME/Code/gt/ios-cs6200/TA-ios-cs8803-02/gt-cs6200-graders"
# alias gt-start-bonnie-docker="docker run --interactive --tty -v $GIOS_GRADER_DIR:'/root/gios' -v '$HOME/.bonnie':'/root/.bonnie/' -w '/root/gios' --entrypoint=/bin/bash gtomscs/os"
# alias gt-docker-bonnie="docker run --interactive --tty --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v $GIOS_GRADER_DIR:'/root/gios' -v '$HOME/.bonnie':'/root/.bonnie/' -w '/root/gios' --entrypoint=/bin/bash patrickward/omscs6200"
export GRPC_EXPLORE_DIR="$HOME/Code/gt/ios-cs6200/grpc/grpc-exploration"
alias gt-start-grpc-docker="docker run --interactive --tty -v $GRPC_EXPLORE_DIR:'/root/grpc' -w '/root/grpc' --entrypoint=/bin/bash fsgeek/omscs6200"
# Note the use of "--cap-add=SYS_PTRACE and --security-opt seccomp=unconfined"
# to get past the following error: "ptrace operation not permitted"
export GIOS_PR4_DIR="$HOME/Code/gt/gios-cs6200/projects/pr4"
# alias gt-docker-pr4="docker run --interactive --tty --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v $GIOS_PR4_DIR:'/root/pr4' -w '/root/pr4' --entrypoint=/bin/bash patrickward/omscs6200:latest"
alias gt-docker-pr4-2="docker run --interactive --tty --cap-add=SYS_ADMIN --device=/dev/fuse --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v $GIOS_PR4_DIR:'/root/pr4' -w '/root/pr4' --entrypoint=/bin/bash fsgeek/omscs6200"
export GIOS_GRADER_DIR="$HOME/Code/gt/gios-cs6200/projects/gt-cs6200-graders"
alias gt-docker-bonnie="docker run --interactive --tty --cap-add=SYS_ADMIN --device=/dev/fuse --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v $GIOS_GRADER_DIR:'/root/pr4' -w '/root/pr4' --entrypoint=/bin/bash patrickward/omscs6200:latest"
export GIOS_GRPC_BUILD_DIR="$HOME/Code/gt/gios-cs6200/grpc-build"
alias gt-grpc-build="docker run --interactive --tty --cap-add=SYS_ADMIN --device=/dev/fuse --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v $GIOS_GRPC_BUILD_DIR:'/root/grpc' -w '/root/grpc' --entrypoint=/bin/bash fsgeek/omscs6200:latest"
# CS6475-CP
# alias gt_cp_dir="cd $HOME/Code/gt/cp-cs6475"
# alias activateCompPhoto="source $HOME/anaconda/bin/activate CompPhoto"
# Alias to go to CS6475-CompPhoto directory and start anaconda
# Note: had to ensure the anaconda path was set properly as well to remove jupyter errors
# alias cplabs="export PATH="$HOME/anaconda/bin:$PATH" && cd $HOME/Code/gt/cp-cs6475/lab_exercises && activateCompPhoto && jupyter notebook"
|
#!/bin/bash
cp vendor/modionut/php-codesniffer-hooks/src/pre-commit .git/hooks/pre-commit
chmod +x .git/hooks/pre-commit
|
#!/bin/bash
# Script that runs in the testing build stage of Travis and is responsible for testing
# the project in different Travis jobs of the current build stage.
# The script should immediately exit if any command in the script fails.
set -e
echo ""
echo "Building sources and running tests. Running mode: ${MODE}"
echo ""
# Go to project dir
cd $(dirname $0)/../..
# Include sources.
source scripts/ci/sources/mode.sh
source scripts/ci/sources/tunnel.sh
# Get the commit diff and skip the build if only .md files have changed.
# Should not apply to master builds.
if [ "$TRAVIS_PULL_REQUEST" = "true" ]; then
fileDiff=$(git diff --name-only $TRAVIS_BRANCH...HEAD)
if [[ ${fileDiff} =~ ^(.*\.md\s*)*$ ]]; then
echo "Skipping tests because only markdown files changed."
exit 0
fi
fi
start_tunnel
wait_for_tunnel
if is_lint; then
$(npm bin)/gulp ci:lint
elif is_e2e; then
# Run e2e tests inside of Xvfb because we need to test the HTML Fullscreen API's that cannot
# be tested within Chrome headless.
xvfb-run -a --server-args='-screen 0, 1024x768x16' $(npm bin)/gulp ci:e2e
elif is_aot; then
$(npm bin)/gulp ci:aot
elif is_payload; then
$(npm bin)/gulp ci:payload
elif is_unit; then
$(npm bin)/gulp ci:test
elif is_prerender; then
$(npm bin)/gulp ci:prerender
fi
# Upload coverage results if those are present.
if [ -f dist/coverage/coverage-summary.json ]; then
$(npm bin)/gulp ci:coverage
fi
teardown_tunnel
|
#!/usr/bin/with-contenv bashio
# ==============================================================================
# Home Assistant Community Add-on: Visual Studio Code
# Pre-configures the MySQL clients, if the service is available
# ==============================================================================
declare host
declare password
declare port
declare username
if bashio::services.available "mysql"; then
host=$(bashio::services "mysql" "host")
password=$(bashio::services "mysql" "password")
port=$(bashio::services "mysql" "port")
username=$(bashio::services "mysql" "username")
mkdir -p /root/.config
{
echo "[client]"
echo "host=${host}"
echo "password=\"${password}\""
echo "port=${port}"
echo "user=\"${username}\""
} > /etc/mysql/conf.d/service.cnf
fi
|
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This contains util code for testing kubectl.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
API_PORT=${API_PORT:-8080}
API_HOST=${API_HOST:-127.0.0.1}
KUBELET_PORT=${KUBELET_PORT:-10250}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="gcr.io/google-containers/perl"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
# Define variables for resource types to prevent typos.
clusterroles="clusterroles"
configmaps="configmaps"
csr="csr"
deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
namespaces="namespaces"
nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims"
persistentvolumes="persistentvolumes"
pods="pods"
podtemplates="podtemplates"
replicasets="replicasets"
replicationcontrollers="replicationcontrollers"
roles="roles"
secrets="secrets"
serviceaccounts="serviceaccounts"
services="services"
statefulsets="statefulsets"
static="static"
storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews"
thirdpartyresources="thirdpartyresources"
daemonsets="daemonsets"
# Stops the running kubectl proxy, if there is one.
function stop-proxy()
{
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
PROXY_PID=
PROXY_PORT=
PROXY_PORT_FILE=
}
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
function start-proxy()
{
stop-proxy
PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX)
kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"
if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
fi
PROXY_PID=$!
PROXY_PORT=
local attempts=0
while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then
kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
fi
sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
attempts=$((attempts+1))
done
kube::log::status "kubectl proxy running on port ${PROXY_PORT}"
# We try checking kubectl proxy 30 times with 1s delays to avoid occasional
# failures.
if [ $# -eq 0 ]; then
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
else
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
fi
}
function cleanup()
{
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
stop-proxy
kube::etcd::cleanup
rm -rf "${KUBE_TEMP}"
kube::log::status "Clean up complete"
}
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
function check-curl-proxy-code()
{
local status
local -r address=$1
local -r desired=$2
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
if [ "${status}" == "${desired}" ]; then
return 0
fi
echo "For address ${full_address}, got ${status} but wanted ${desired}"
return 1
}
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
function kubectl-with-retry()
{
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE-false}
for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
if [ "$preserve_err_file" != true ] ; then
rm "${ERROR_FILE}"
fi
break
fi
done
}
# Waits for the pods with the given label to match the list of names. Don't call
# this function unless you know the exact pod names, or expect no pods.
# $1: label to match
# $2: list of pod names sorted by name
# Example invocation:
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
function wait-for-pods-with-label()
{
for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --template '{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
if [[ $kubeout = $2 ]]; then
return
fi
echo Waiting for pods: $2, found $kubeout
sleep $i
done
kube::log::error_exit "Timeout waiting for pods with label $1"
}
# Code to be run before running the tests.
setup() {
kube::util::trap_add cleanup EXIT SIGINT
kube::util::ensure-temp-dir
# ensure ~/.kube/config isn't loaded by tests
HOME="${KUBE_TEMP}"
kube::etcd::start
# Find a standard sed instance for use with edit scripts
SED=sed
if which gsed &>/dev/null; then
SED=gsed
fi
if ! ($SED --version 2>&1 | grep -q GNU); then
echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'."
exit 1
fi
kube::log::status "Building kubectl"
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl"
# Check kubectl
kube::log::status "Running kubectl with no options"
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
# TODO: we need to note down the current default namespace and set back to this
# namespace after the tests are done.
kubectl config view
CONTEXT="test"
kubectl config set-context "${CONTEXT}"
kubectl config use-context "${CONTEXT}"
kube::log::status "Setup complete"
}
########################################################
# Kubectl version (--short, --client, --output) #
########################################################
run_kubectl_version_tests() {
kube::log::status "Testing kubectl version"
TEMP="${KUBE_TEMP}"
# create version files, one for the client, one for the server.
# these are the files we will use to ensure that the remainder output is correct
kube::test::version::object_to_file "Client" "" "${TEMP}/client_version_test"
kube::test::version::object_to_file "Server" "" "${TEMP}/server_version_test"
kube::log::status "Testing kubectl version: check client only output matches expected output"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/client_only_version_test"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/server_client_only_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_version_test" "the flag '--client' shows correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_version_test" "the flag '--client' correctly has no server version info"
kube::log::status "Testing kubectl version: verify json output"
kube::test::version::json_client_server_object_to_file "" "clientVersion" "${TEMP}/client_json_version_test"
kube::test::version::json_client_server_object_to_file "" "serverVersion" "${TEMP}/server_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_json_version_test" "--output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_json_version_test" "--output json has correct server info"
kube::log::status "Testing kubectl version: verify json output using additional --client flag does not contain serverVersion"
kube::test::version::json_client_server_object_to_file "--client" "clientVersion" "${TEMP}/client_only_json_version_test"
kube::test::version::json_client_server_object_to_file "--client" "serverVersion" "${TEMP}/server_client_only_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_json_version_test" "--client --output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_json_version_test" "--client --output json has no server info"
kube::log::status "Testing kubectl version: compare json output using additional --short flag"
kube::test::version::json_client_server_object_to_file "--short" "clientVersion" "${TEMP}/client_short_json_version_test"
kube::test::version::json_client_server_object_to_file "--short" "serverVersion" "${TEMP}/server_short_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_short_json_version_test" "--short --output client json info is equal to non short result"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_short_json_version_test" "--short --output server json info is equal to non short result"
kube::log::status "Testing kubectl version: compare json output with yaml output"
kube::test::version::json_object_to_file "" "${TEMP}/client_server_json_version_test"
kube::test::version::yaml_object_to_file "" "${TEMP}/client_server_yaml_version_test"
kube::test::version::diff_assert "${TEMP}/client_server_json_version_test" "eq" "${TEMP}/client_server_yaml_version_test" "--output json/yaml has identical information"
}
# Runs all pod related tests.
run_pod_tests() {
kube::log::status "Testing kubectl(v1:pods)"
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
# Repeat above test using jsonpath template
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
# Describe command should print detailed information
kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_object_events_assert pods 'valid-pod'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert pods 'valid-pod' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert pods 'valid-pod' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert pods
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert pods false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert pods true
### Validate Export ###
kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" '<no value> valid-pod' "--export=true"
### Dump current valid-pod POD
output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}")
### Delete POD valid-pod by id
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --now
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --now
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --grace-period=0
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command succeeds without --force by waiting
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from dumped YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
echo "${output_pod}" | $SED '/namespace:/d' | kubectl create -f - "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod from JSON
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod with label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
### Create POD valid-pod from YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with no parameter mustn't kill everything
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with --all and a label selector is not permitted
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete all PODs
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
# Post-condition: no POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
# Detailed tests for describe pod output
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-kubectl-describe-pod
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'
### Create a generic secret
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type'
### Create a generic configmap
# Pre-condition: no CONFIGMAP exists
kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
### Create a pod disruption budget
# Command
kubectl create pdb test-pdb --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '2'
# Command
kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
# Create a pod that consumes secret, configmap, and downward API keys as envs
kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod
kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Describe command (resource only) should print detailed information about environment variables
kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Clean-up
kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
kubectl delete pdb/test-pdb pdb/test-pdb-2 --namespace=test-kubectl-describe-pod
kubectl delete namespace test-kubectl-describe-pod
### Create two PODs
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/redis/redis-proxy.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-proxy PODs are created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
### Delete multiple PODs at once
# Pre-condition: valid-pod and redis-proxy PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 --force # delete multiple pods at once
# Post-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create valid-pod POD
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Label the valid-pod POD
# Pre-condition: valid-pod is not labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
# Command
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
# Post-condition: valid-pod is labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
### Label the valid-pod POD with empty label value
# Pre-condition: valid-pod does not have label "emptylabel"
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
# Command
kubectl label pods valid-pod emptylabel="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptylabel" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
### Annotate the valid-pod POD with empty annotation value
# Pre-condition: valid-pod does not have annotation "emptyannotation"
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '<no value>'
# Command
kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptyannotation" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
### Record label change
# Pre-condition: valid-pod does not have record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
# Command
kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
# Post-condition: valid-pod has record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Do not record label change
# Command
kubectl label pods valid-pod no-record-change=true --record=false "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation still contains command with --record=true
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Record label change with unspecified flag and previous change already recorded
# Command
kubectl label pods valid-pod new-record-change=true "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation contains new change
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create pod-with-precision POD
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'
## Patch preserves precision
# Command
kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
# Post-condition: pod-with-precision POD has patched annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
# Command
kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has label
kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
# Command
kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Cleanup
kubectl delete pod pod-with-precision "${kube_flags[@]}"
### Annotate POD YAML file locally without effecting the live pod.
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Command
kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue "${kube_flags[@]}"
# Pre-condition: annotationkey is annotationvalue
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Command
output_message=$(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
kube::test::if_has_string "${output_message}" "localvalue"
# Cleanup
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
### Create valid-pod POD
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
TEMP=$(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
echo -e "#!/bin/bash\n$SED -i \"s/mock/modified/g\" \$1" > ${TEMP}
chmod +x ${TEMP}
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
rm ${TEMP}
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
## kubectl create --edit won't create anything if user makes no changes
[ "$(EDITOR=cat kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1 | grep 'Edit cancelled')" ]
## Create valid-pod POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## Patch can modify a local object
kubectl patch --local -f pkg/api/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o jsonpath='{.spec.restartPolicy}' | grep -q "Never"
## Patch pod can change image
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# Post-condition: valid-pod has the record annotation
kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation}"
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# prove that yaml input works too
YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
## Patch pod from JSON can change image
# Command
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}'
# Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error"
## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
# Command
# Needs to retry because other party may change the resource.
for count in {0..3}; do
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
rm "${ERROR_FILE}"
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
break
fi
done
## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
((resourceVersion+=100))
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the conflict
if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
## --force replace pod can change other field, e.g., spec.container.name
# Command
kubectl get "${kube_flags[@]}" pod valid-pod -o json | $SED 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
## check replace --grace-period requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
## check replace --timeout requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-timeout must have \-\-force specified'
#cleaning
rm /tmp/tmp-valid-pod.json
## replace of a cluster scoped resource can succeed
# Pre-condition: a node exists
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test"
}
}
__EOF__
kubectl replace -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test",
"annotations": {"a":"b"},
"resourceVersion": "0"
}
}
__EOF__
# Post-condition: the node command succeeds
kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
kubectl delete node node-v1-test "${kube_flags[@]}"
## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
echo -e "#!/bin/bash\n$SED -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
# Pre-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod
# Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:'
# cleaning
rm /tmp/tmp-editor.sh
## kubectl edit should work on Windows
[ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ]
[ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ]
[ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ]
[ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ]
[ "$(EDITOR=cat kubectl edit ns | grep 'kind: List')" ]
### Label POD YAML file locally without effecting the live pod.
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
output_message=$(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: name is still valid-pod in the live pod, but command output is the new value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
kube::test::if_has_string "${output_message}" "localonlyvalue"
### Overwriting an existing label is not permitted
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
### --overwrite must be used to overwrite existing label, can be applied to all resources
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is valid-pod-super-sayan
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two PODs from 1 yaml file
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-proxy PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
### Delete two PODs from 1 yaml file
# Pre-condition: redis-master and redis-proxy PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
# Command
kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: no PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply should update configuration annotations only if apply is already called
## 1. kubectl create doesn't set the annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is applied
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
## 4. kubectl replace updates an existing annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
}
# Runs tests related to kubectl apply.
run_kubectl_apply_tests() {
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl apply -f with label selector should only apply matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
## kubectl apply --prune
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "a" not found'
# cleanup
kubectl delete pods b
# same thing without prune for a sanity check
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check both pods exist
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
# cleanup
kubectl delete pod/a pod/b
## kubectl apply --prune requires a --all flag to select everything
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" \
'all resources selected for prune without explicitly passing --all'
# should apply everything
kubectl apply --all --prune -f hack/testdata/prune
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
kubectl delete pod/a pod/b
## kubectl apply --prune should fallback to delete for non reapable types
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
## kubectl apply --prune --prune-whitelist
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and don't prune pod a by overwriting whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and prune pod a with default whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# cleanup
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
}
# Runs tests related to kubectl create --filename(-f) --selector(-l).
run_kubectl_create_filter_tests() {
## kubectl create -f with label selector should only create matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# create
kubectl create -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
}
run_kubectl_apply_deployments_tests() {
## kubectl apply should propagate user defined null values
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply base deployment
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
# Post-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
}
# Runs tests for --save-config tests.
run_save_config_tests() {
## Configuration annotations should be set when --save-config is enabled
## 1. kubectl create --save-config should generate configuration annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 2. kubectl edit --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 3. kubectl replace --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service exists
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# tests kubectl group prefix matching
output_message=$(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the rc reaper.
kubectl delete hpa frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
}
run_kubectl_run_tests() {
## kubectl run should create deployments or jobs
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By"
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Post-condition: no pods exist.
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx-extensions "--image=$IMAGE_NGINX" "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-extensions "${kube_flags[@]}"
# Command
kubectl run nginx-apps "--image=$IMAGE_NGINX" --generator=deployment/apps.v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-apps "${kube_flags[@]}"
}
run_kubectl_get_tests() {
### Test retrieval of non-existing pods
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of non-existing POD with output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of pods when none exist with non-human readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
### Test retrieval of pods when none exist, with human-readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods --ignore-not-found 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test retrieval of non-existing POD with json output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
# Post-condition: make sure we don't display an empty List
if kube::test::if_has_string "${output_message}" 'List'; then
echo 'Unexpected List output'
echo "${LINENO} $(basename $0)"
exit 1
fi
### Test kubectl get all
output_message=$(kubectl --v=6 --namespace default get all 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get 200 OK from all the url(s)
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1beta1/namespaces/default/statefulsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
### Test --allow-missing-template-keys
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --allow-missing-template-keys defaults to true for jsonpath templates
kubectl get "${kube_flags[@]}" pod valid-pod -o jsonpath='{.missing}'
## check --allow-missing-template-keys defaults to true for go templates
kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{.missing}}'
## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'missing is not found'
## check --allow-missing-template-keys=false results in an error for a missing key with go
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'map has no entry for key "missing"'
### Test kubectl get watch
output_message=$(kubectl get pods -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'STATUS' # headers
kube::test::if_has_string "${output_message}" 'valid-pod' # pod details
output_message=$(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'pods/valid-pod' # resource name
output_message=$(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'name: valid-pod' # yaml
output_message=$(! kubectl get pods/invalid-pod -w --request-timeout=1 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" '"invalid-pod" not found'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: PODs redis-master and redis-proxy exist
# Check that all items in the list are printed
output_message=$(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "redis-master redis-proxy"
# cleanup
kubectl delete pods redis-master redis-proxy "${kube_flags[@]}"
}
run_kubectl_request_timeout_tests() {
### Test global request timeout option
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --request-timeout on 'get pod'
output_message=$(kubectl get pod valid-pod --request-timeout=1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout on 'get pod' with --watch
output_message=$(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
kube::test::if_has_string "${output_message}" 'Timeout exceeded while reading body'
## check --request-timeout value with no time unit
output_message=$(kubectl get pod valid-pod --request-timeout=1 2>&1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout value with invalid time unit
output_message=$(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
kube::test::if_has_string "${output_message}" 'Invalid timeout value'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
}
run_tpr_tests() {
create_and_use_new_namespace
kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "ThirdPartyResource",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "foo.company.com"
},
"versions": [
{
"name": "v1"
}
]
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'foo.company.com:'
kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "ThirdPartyResource",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "bar.company.com"
},
"versions": [
{
"name": "v1"
}
]
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'bar.company.com:foo.company.com:'
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1" "third party api"
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1/foos" "third party api Foo"
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1/bars" "third party api Bar"
# Test that we can list this new third party resource (foos)
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new third party resource (bars)
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Foo
kubectl "${kube_flags[@]}" create -f hack/testdata/TPR/foo.yaml "${kube_flags[@]}"
# Test that we can list this new third party resource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test alternate forms
kube::test::get_object_assert foo "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test all printers, with lists and individual items
kube::log::status "Testing ThirdPartyResource printing"
kubectl "${kube_flags[@]}" get foos
kubectl "${kube_flags[@]}" get foos/test
kubectl "${kube_flags[@]}" get foos -o name
kubectl "${kube_flags[@]}" get foos/test -o name
kubectl "${kube_flags[@]}" get foos -o wide
kubectl "${kube_flags[@]}" get foos/test -o wide
kubectl "${kube_flags[@]}" get foos -o json
kubectl "${kube_flags[@]}" get foos/test -o json
kubectl "${kube_flags[@]}" get foos -o yaml
kubectl "${kube_flags[@]}" get foos/test -o yaml
kubectl "${kube_flags[@]}" get foos -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "jsonpath={.someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
# Test patching
kube::log::status "Testing ThirdPartyResource patching"
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value1"}' --type=merge
kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value2"}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":null}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
# Get local version
TPR_RESOURCE_FILE="${KUBE_TEMP}/tpr-foos-test.json"
kubectl "${kube_flags[@]}" get foos/test -o json > "${TPR_RESOURCE_FILE}"
# cannot apply strategic patch locally
TPR_PATCH_ERROR_FILE="${KUBE_TEMP}/tpr-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${TPR_PATCH_ERROR_FILE}"
if grep -q "try --type merge" "${TPR_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for ThirdPartyResource: $(cat ${TPR_PATCH_ERROR_FILE})"
else
kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${TPR_PATCH_ERROR_FILE})"
exit 1
fi
# can apply merge patch locally
kubectl "${kube_flags[@]}" patch --local -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
# can apply merge patch remotely
kubectl "${kube_flags[@]}" patch --record -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
rm "${TPR_RESOURCE_FILE}"
rm "${TPR_PATCH_ERROR_FILE}"
# Test labeling
kube::log::status "Testing ThirdPartyResource labeling"
kubectl "${kube_flags[@]}" label foos --all listlabel=true
kubectl "${kube_flags[@]}" label foo/test itemlabel=true
# Test annotating
kube::log::status "Testing ThirdPartyResource annotating"
kubectl "${kube_flags[@]}" annotate foos --all listannotation=true
kubectl "${kube_flags[@]}" annotate foo/test itemannotation=true
# Test describing
kube::log::status "Testing ThirdPartyResource describing"
kubectl "${kube_flags[@]}" describe foos
kubectl "${kube_flags[@]}" describe foos/test
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
# Delete the resource
kubectl "${kube_flags[@]}" delete foos test
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Bar
kubectl "${kube_flags[@]}" create -f hack/testdata/TPR/bar.yaml "${kube_flags[@]}"
# Test that we can list this new third party resource
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete bars test
# Make sure it's gone
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create single item via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo.yaml
# Test that we have create a foo named test
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply an empty patch doesn't change fields
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'
# Update a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-updated-subfield.yaml
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'
# Delete a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-deleted-subfield.yaml
# Test that apply has deleted the field
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'
# Test that the field does not exist
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-added-subfield.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/foo.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create list via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list.yaml
# Test that we have create a foo and a bar from a list
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that re-apply an list doesn't change anything
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that the fields have the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Update fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-updated-field.yaml
# Test that apply has updated the fields
kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'
# Delete fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-deleted-field.yaml
# Test that apply has deleted the fields
kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'
# Test that the fields does not exist
kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-added-field.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/multi-tpr-list.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply --prune
# Test that no foo or bar exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on foo.yaml that has foo/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/TPR/foo.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right tprs exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on bar.yaml that has bar/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/TPR/bar.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right tprs exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/bar.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# teardown
kubectl delete thirdpartyresources foo.company.com "${kube_flags[@]}"
kubectl delete thirdpartyresources bar.company.com "${kube_flags[@]}"
}
run_recursive_resources_tests() {
### Create multiple busybox PODs recursively from directory of YAML files
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
echo -e '#!/bin/bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
# The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
# a list but since it contains invalid objects, it will never open.
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# cleaning
rm /tmp/tmp-editor.sh
## Replace multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Describe multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "app=busybox0"
kube::test::if_has_string "${output_message}" "app=busybox1"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Annotate multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Apply multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Convert multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Get multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
kube::test::if_has_string "${output_message}" "busybox0:busybox1:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Label multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Patch multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Create replication controller recursively from directory of YAML files
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
### Autoscale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are autoscaled
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kubectl delete hpa busybox0 "${kube_flags[@]}"
kubectl delete hpa busybox1 "${kube_flags[@]}"
### Expose multiple replication controllers as service recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Scale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Rollout on multiple deployments recursively
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create deployments (revision 1) recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
## Rollback the deployments to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Pause the deployments recursively
PRESERVE_ERR_FILE=true
kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Resume the deployments recursively
kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Retrieve the rollout history of the deployments recursively
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
kube::test::if_has_string "${output_message}" "nginx0-deployment"
kube::test::if_has_string "${output_message}" "nginx1-deployment"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# Clean up
unset PRESERVE_ERR_FILE
rm "${ERROR_FILE}"
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create replication controllers recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
## Attempt to rollback the replication controllers to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Attempt to pause the replication controllers recursively
output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" pausing is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" pausing is not supported'
## Attempt to resume the replication controllers recursively
output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
# Clean up
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
}
run_namespace_tests() {
kube::log::status "Testing kubectl(v1:namespaces)"
### Create a new namespace
# Pre-condition: only the "default" namespace exists
# The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test.
# kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:'
# Command
kubectl create namespace my-namespace
# Post-condition: namespace 'my-namespace' is created.
kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
# Clean up
kubectl delete namespace my-namespace
######################
# Pods in Namespaces #
######################
if kube::test::if_supports_resource "${pods}" ; then
### Create a new namespace
# Pre-condition: the other namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace other
# Post-condition: namespace 'other' is created.
kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'
### Create POD valid-pod in specific namespace
# Pre-condition: no POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: a resource cannot be retrieved by name across all namespaces
output_message=$(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
kube::test::if_has_string "${output_message}" "a resource cannot be retrieved by name across all namespaces"
### Delete POD valid-pod in specific namespace
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Clean up
kubectl delete namespace other
fi
}
run_secrets_test() {
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-secrets
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'
### Create a generic secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a docker-registry secret in a specific namespace
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='[email protected]' --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a tls secret
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Create a secret using stringData
kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": "secret-string-data"
},
"data": {
"k1":"djE=",
"k2":""
},
"stringData": {
"k2":"v2"
}
}
__EOF__
# Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
# Clean up
kubectl delete secret secret-string-data --namespace=test-secrets
### Create a secret using output flags
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no secret exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
[[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]]
## Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Clean up
kubectl delete namespace test-secrets
}
run_configmap_tests() {
kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap'
kubectl delete configmap test-configmap "${kube_flags[@]}"
### Create a new namespace
# Pre-condition: the test-configmaps namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-configmaps
# Post-condition: namespace 'test-configmaps' is created.
kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'
### Create a generic configmap in a specific namespace
# Pre-condition: no configmaps namespace exists
kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
[[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]]
# Clean-up
kubectl delete configmap test-configmap --namespace=test-configmaps
kubectl delete namespace test-configmaps
}
run_service_tests() {
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:services)"
### Create redis-master service from JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Describe command should print detailed information
kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_object_events_assert services 'redis-master'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert services 'redis-master' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert services 'redis-master' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert services
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert services false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert services true
### set selector
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Set command to change the selector.
kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan
# prove role=padawan
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
# Set command to reset the selector back to the original one.
kubectl set selector -f examples/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
### Dump current redis-master service
output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}")
### Delete redis-master-service by id
# Pre-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create redis-master-service from dumped JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
# Post-condition: redis-master service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
### Create redis-master-v1-test service
# Pre-condition: redis-master-service service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "service-v1-test"
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 80
}
]
}
}
__EOF__
# Post-condition: service-v1-test service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
### Identity
kubectl get service "${kube_flags[@]}" service-v1-test -o json | kubectl replace "${kube_flags[@]}" -f -
### Delete services by id
# Pre-condition: service-v1-test exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
kubectl delete service "service-v1-test" "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create two services
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master and redis-slave services are created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
### Custom columns can be specified
# Pre-condition: generate output using custom columns
output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
# Post-condition: should contain name column
kube::test::if_has_string "${output_message}" 'redis-master'
### Delete multiple services at once
# Pre-condition: redis-master and redis-slave services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
# Command
kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create an ExternalName service
# Pre-condition: Only the default kubernetes service exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create service externalname beep-boop --external-name bar.com
# Post-condition: beep-boop service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
### Delete beep-boop service by id
# Pre-condition: beep-boop service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
# Command
kubectl delete service beep-boop "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
}
run_rc_tests() {
kube::log::status "Testing kubectl(v1:replicationcontrollers)"
### Create and stop controller, make sure it doesn't leak pods
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend controller
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replication controller frontend from JSON
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rc 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rc 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rc 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rc 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rc "Name:" "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rc
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rc false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rc true
### Scale replication controller frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with (wrong) current-replicas and replicas
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with replicas only
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
### Scale replication controller from JSON with replicas only
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Clean-up
kubectl delete rc frontend "${kube_flags[@]}"
### Scale multiple replication controllers
kubectl create -f examples/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
# Post-condition: 4 replicas each
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
# Clean-up
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
### Scale a job
kubectl create -f test/fixtures/doc-yaml/user-guide/job.yaml "${kube_flags[@]}"
# Command
kubectl scale --replicas=2 job/pi
# Post-condition: 2 replicas for pi
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
### Scale a deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
### Expose a deployment as a service
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
# Command
kubectl expose deployment/nginx-deployment
# Post-condition: service exists and exposes deployment port (80)
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
# Clean-up
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
### Expose replication controller as service
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl expose rc frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Command
kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
# Create a service using service/v1 generator
kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
# Verify that expose service works without specifying a port.
kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
# Post-condition: service exists with the same port as the original service.
kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
# Cleanup services
kubectl delete pod valid-pod "${kube_flags[@]}"
kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
### Expose negative invalid resource test
# Pre-condition: don't need
# Command
output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
# Post-condition: the error message has "cannot expose" string
kube::test::if_has_string "${output_message}" 'cannot expose'
### Try to generate a service with invalid name (exceeding maximum valid size)
# Pre-condition: use --name flag
output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: should fail due to invalid name
kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
# Pre-condition: default run without --name flag; should succeed by truncating the inherited name
output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: inherited name from pod has been truncated
kube::test::if_has_string "${output_message}" '\"kubernetes-serve-hostname-testing-sixty-three-characters-in-len\" exposed'
# Clean-up
kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len "${kube_flags[@]}"
### Expose multiport object as a new service
# Pre-condition: don't use --port flag
output_message=$(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
# Post-condition: expose succeeded
kube::test::if_has_string "${output_message}" '\"etcd-server\" exposed'
# Post-condition: generated service has both ports from the exposed pod
kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 2379'
# Clean-up
kubectl delete svc etcd-server "${kube_flags[@]}"
### Delete replication controller with id
# Pre-condition: frontend replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replication controllers
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple controllers at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Auto scale replication controller
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, rc specified by file
kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, rc specified by name
kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
## Set resource limits/request of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Set resources of a local file without talking to the server
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's cpu limits
kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set a non-existing container should fail
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
# Set the limit of a specific container in deployment
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set limits/requests of a deployment specified by a file
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Show dry-run works on running deployments
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Clean up
kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
}
run_deployment_tests() {
# Test kubectl create deployment (using default - old generator)
kubectl create deployment test-nginx-extensions --image=gcr.io/google-containers/nginx:test-cmd
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1beta1'
# Clean up
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=gcr.io/google-containers/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1beta1'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"
# Clean up
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
### Test cascading deletion
## Test that rs is deleted when deployment is deleted.
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
# Deleting the deployment should delete the rs.
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
## Test that rs is not deleted when deployment is deleted with cascade set to false.
# Pre-condition: no deployment and rs exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
# Wait for the deployment to be deleted and then verify that rs is not
# deleted.
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Cleanup
# Find the name of the rs to be deleted.
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
kubectl delete rs ${output_message} "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Update the deployment (revision 2)
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
# The resumed deployment can now be rolled back
kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Check that the new replica set has all old revisions stored in an annotation
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | $SED "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
### Set image of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's image
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a deployment specified by file
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a local file without talking to the server
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set image of all containers of the deployment
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
}
run_rs_tests() {
kube::log::status "Testing kubectl(v1:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rs 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rs 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rs 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rs
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rs false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rs true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
### Expose replica set as service
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]}"
# Clean up
kubectl delete rs frontend "${kube_flags[@]}"
fi
}
run_daemonset_tests() {
kube::log::status "Testing kubectl(v1:daemonsets)"
### Create a rolling update DaemonSet
# Pre-condition: no DaemonSet exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should be 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
}
run_multi_resources_tests() {
kube::log::status "Testing kubectl(v1:multiple resources)"
FILES="hack/testdata/multi-resource-yaml
hack/testdata/multi-resource-list
hack/testdata/multi-resource-json
hack/testdata/multi-resource-rclist
hack/testdata/multi-resource-svclist"
YAML=".yaml"
JSON=".json"
for file in $FILES; do
if [ -f $file$YAML ]
then
file=$file$YAML
replace_file="${file%.yaml}-modify.yaml"
else
file=$file$JSON
replace_file="${file%.json}-modify.json"
fi
has_svc=true
has_rc=true
two_rcs=false
two_svcs=false
if [[ "${file}" == *rclist* ]]; then
has_svc=false
two_rcs=true
fi
if [[ "${file}" == *svclist* ]]; then
has_rc=false
two_svcs=true
fi
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
echo "Testing with file ${file} and replace with file ${replace_file}"
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f "${file}" "${kube_flags[@]}"
# Post-condition: mock service (and mock2) exists
if [ "$has_svc" = true ]; then
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:mock2:'
else
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:'
fi
fi
# Post-condition: mock rc (and mock2) exists
if [ "$has_rc" = true ]; then
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Command
kubectl get -f "${file}" "${kube_flags[@]}"
# Command: watching multiple resources should return "not supported" error
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
fi
kubectl describe -f "${file}" "${kube_flags[@]}"
# Command
kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
# Command: kubectl edit multiple resources
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service and mock rc (and mock2) are labeled
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
# Cleanup resources created
kubectl delete -f "${file}" "${kube_flags[@]}"
done
#############################
# Multiple Resources via URL#
#############################
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: service(mock) and rc(mock) exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
# Clean up
kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
}
# Runs all kubectl tests.
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
runTests() {
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
exit 1
fi
kube::log::status "Checking kubectl version"
kubectl version
i=0
create_and_use_new_namespace() {
i=$(($i+1))
kube::log::status "Creating namespace namespace${i}"
kubectl create namespace "namespace${i}"
kubectl config set-context "${CONTEXT}" --namespace="namespace${i}"
}
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
)
if [[ -z "${ALLOW_SKEW:-}" ]]; then
kube_flags+=("--match-server-version")
fi
if kube::test::if_supports_resource "${nodes}" ; then
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
pod_container_name_field="(index .spec.containers 0).name"
container_name_field="(index .spec.template.spec.containers 0).name"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
statefulset_replicas_field=".spec.replicas"
statefulset_observed_generation=".status.observedGeneration"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
deployment_image_field="(index .spec.template.spec.containers 0).image"
deployment_second_image_field="(index .spec.template.spec.containers 1).image"
change_cause_annotation='.*kubernetes.io/change-cause.*'
pdb_min_available=".spec.minAvailable"
template_generation_field=".spec.templateGeneration"
# Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
# Create default namespace
kubectl create "${kube_flags[@]}" ns default
fi
fi
# Make sure "kubernetes" service exists.
if kube::test::if_supports_resource "${services}" ; then
# Attempt to create the kubernetes service, tolerating failure (since it might already exist)
kubectl create "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml || true
# Require the service to exist (either we created it or the API server did)
kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml
fi
#########################
# Kubectl version #
#########################
run_kubectl_version_tests
# Passing no arguments to create is an error
! kubectl create
#######################
# kubectl config set #
#######################
kube::log::status "Testing kubectl(v1:config set)"
kubectl config set-cluster test-cluster --server="https://does-not-work"
# Get the api cert and add a comment to avoid flag parsing problems
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
r_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
encoded=$(echo -n "$cert_data" | base64)
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
e_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
test "$e_writen" == "$r_writen"
#######################
# kubectl local proxy #
#######################
# Make sure the UI can be proxied
start-proxy
check-curl-proxy-code /ui 301
check-curl-proxy-code /api/ui 404
check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /metrics 200
fi
if kube::test::if_supports_resource "${static}" ; then
check-curl-proxy-code /static/ 200
fi
stop-proxy
# Make sure the in-development api is accessible by default
start-proxy
check-curl-proxy-code /apis 200
check-curl-proxy-code /apis/extensions/ 200
stop-proxy
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/ui 301
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /custom/metrics 200
fi
check-curl-proxy-code /custom/api/v1/namespaces 200
stop-proxy
#########################
# RESTMapper evaluation #
#########################
kube::log::status "Testing RESTMapper"
RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error"
### Non-existent resource type should give a recognizeable error
# Pre-condition: None
# Command
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
else
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
exit 1
fi
rm "${RESTMAPPER_ERROR_FILE}"
# Post-condition: None
kubectl get "${kube_flags[@]}" --raw /version
if kube::test::if_supports_resource "${clusterroles}" ; then
# make sure the server was properly bootstrapped with clusterroles and bindings
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
# test `kubectl create clusterrole`
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
# test `kubectl create clusterrolebinding`
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-group --clusterrole=admin --group=the-group
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin -n default
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
kubectl create "${kube_flags[@]}" rolebinding localrole --role=localrole --group=the-group -n default
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl create "${kube_flags[@]}" rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name -n default
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
fi
if kube::test::if_supports_resource "${roles}" ; then
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" role resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
fi
#########################
# Assert short name #
#########################
kube::log::status "Testing propagation of short names for resources"
output_message=$(kubectl get --raw=/api/v1)
## test if a short name is exported during discovery
kube::test::if_has_string "${output_message}" '{"name":"configmaps","singularName":"","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'
###########################
# POD creation / deletion #
###########################
if kube::test::if_supports_resource "${pods}" ; then
run_pod_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
run_save_config_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"
kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the empty string
if grep -q "unexpected nil value for field" "${ERROR_FILE}"; then
kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
fi
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move apply tests to run on rs instead of pods so that they can be
# run for federation apiserver as well.
run_kubectl_apply_tests
run_kubectl_run_tests
run_kubectl_create_filter_tests
fi
if kube::test::if_supports_resource "${deployments}" ; then
run_kubectl_apply_deployments_tests
fi
###############
# Kubectl get #
###############
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move get tests to run on rs instead of pods so that they can be
# TODO: Move get tests to run on rs instead of pods so that they can be
# run for federation apiserver as well.
run_kubectl_get_tests
fi
##################
# Global timeout #
##################
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move request timeout tests to run on rs instead of pods so that they
# can be run for federation apiserver as well.
run_kubectl_request_timeout_tests
fi
#####################################
# Third Party Resources #
#####################################
if kube::test::if_supports_resource "${thirdpartyresources}" ; then
run_tpr_tests
fi
#################
# Run cmd w img #
#################
if kube::test::if_supports_resource "${deployments}" ; then
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment "test1" created'
kubectl delete deployments test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'
fi
#####################################
# Recursive Resources via directory #
#####################################
if kube::test::if_supports_resource "${pods}" ; then
run_recursive_resources_tests
fi
##############
# Namespaces #
##############
if kube::test::if_supports_resource "${namespaces}" ; then
run_namespace_tests
fi
###########
# Secrets #
###########
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${secrets}" ; then
run_secrets_test
fi
fi
######################
# ConfigMap #
######################
if kube::test::if_supports_resource "${namespaces}"; then
if kube::test::if_supports_resource "${configmaps}" ; then
run_configmap_tests
fi
fi
####################
# Client Config #
####################
# Command
# Pre-condition: kubeconfig "missing" is not a file or directory
output_message=$(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: kubeconfig "missing" is not a file or directory
# Command
output_message=$(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
# Post-condition: --user contains a valid / empty value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Command
output_message=$(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
# Post-condition: --cluster contains a "valid" value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: context "missing-context" does not exist
# Command
output_message=$(! kubectl get pod --context="missing-context" 2>&1)
kube::test::if_has_string "${output_message}" 'context "missing-context" does not exist'
# Post-condition: invalid or missing context returns error
# Pre-condition: cluster "missing-cluster" does not exist
# Command
output_message=$(! kubectl get pod --cluster="missing-cluster" 2>&1)
kube::test::if_has_string "${output_message}" 'cluster "missing-cluster" does not exist'
# Post-condition: invalid or missing cluster returns error
# Pre-condition: user "missing-user" does not exist
# Command
output_message=$(! kubectl get pod --user="missing-user" 2>&1)
kube::test::if_has_string "${output_message}" 'auth info "missing-user" does not exist'
# Post-condition: invalid or missing user returns error
# test invalid config
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
kube::test::if_has_string "${output_message}" "Error loading config file"
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
kube::test::if_has_string "${output_message}" 'no such file or directory'
####################
# Service Accounts #
####################
if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then
### Create a new namespace
# Pre-condition: the test-service-accounts namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-service-accounts
# Post-condition: namespace 'test-service-accounts' is created.
kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
### Create a service account in a specific namespace
# Command
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
# Clean-up
kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
# Clean up
kubectl delete namespace test-service-accounts
fi
#################
# Pod templates #
#################
if kube::test::if_supports_resource "${podtemplates}" ; then
### Create PODTEMPLATE
# Pre-condition: no PODTEMPLATE
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
# Post-condition: nginx PODTEMPLATE is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
### Printing pod templates works
kubectl get podtemplates "${kube_flags[@]}"
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
fi
############
# Services #
############
if kube::test::if_supports_resource "${services}" ; then
run_service_tests
fi
##################
# DaemonSets #
##################
if kube::test::if_supports_resource "${daemonsets}" ; then
run_daemonset_tests
fi
###########################
# Replication controllers #
###########################
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
run_rc_tests
fi
fi
######################
# Deployments #
######################
if kube::test::if_supports_resource "${deployments}" ; then
run_deployment_tests
fi
######################
# Replica Sets #
######################
if kube::test::if_supports_resource "${replicasets}" ; then
run_rs_tests
fi
#################
# Stateful Sets #
#################
if kube::test::if_supports_resource "${statefulsets}" ; then
kube::log::status "Testing kubectl(v1:statefulsets)"
### Create and stop statefulset, make sure it doesn't leak pods
# Pre-condition: no statefulset exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create statefulset
kubectl create -f hack/testdata/nginx-statefulset.yaml "${kube_flags[@]}"
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
# Command: Scale up
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
# Post-condition: 1 replica, named nginx-0
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
# doesn't start the scheduler, so pet-0 will block all others.
# TODO: test robust scaling in an e2e.
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
### Clean up
kubectl delete -f hack/testdata/nginx-statefulset.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
fi
######################
# Lists #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${deployments}" ; then
kube::log::status "Testing kubectl(v1:lists)"
### Create a List with objects from multiple versions
# Command
kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}"
### Delete the List with objects from multiple versions
# Command
kubectl delete service/list-service-test deployment/list-deployment-test
fi
fi
######################
# Multiple Resources #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
run_multi_resources_tests
fi
fi
######################
# Persistent Volumes #
######################
if kube::test::if_supports_resource "${persistentvolumes}" ; then
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
fi
############################
# Persistent Volume Claims #
############################
if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
fi
############################
# Storage Classes #
############################
if kube::test::if_supports_resource "${storageclass}" ; then
### Create and delete storage class
# Pre-condition: no storage classes currently exist
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "StorageClass",
"apiVersion": "storage.k8s.io/v1",
"metadata": {
"name": "storage-class-name"
},
"provisioner": "kubernetes.io/fake-provisioner-type",
"parameters": {
"zone":"us-east-1b",
"type":"ssd"
}
}
__EOF__
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kubectl delete storageclass storage-class-name "${kube_flags[@]}"
# Post-condition: no storage classes
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
fi
#########
# Nodes #
#########
if kube::test::if_supports_resource "${nodes}" ; then
kube::log::status "Testing kubectl(v1:nodes)"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_object_events_assert nodes "127.0.0.1"
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert nodes "127.0.0.1" false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert nodes "127.0.0.1" true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert nodes
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert nodes false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert nodes true
### kubectl patch update can mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
# Post-condition: node is unschedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
# Post-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1beta1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false
fi
########################
# authorization.k8s.io #
########################
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
# check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
fi
#####################
# Retrieve multiple #
#####################
if kube::test::if_supports_resource "${nodes}" ; then
if kube::test::if_supports_resource "${services}" ; then
kube::log::status "Testing kubectl(v1:multiget)"
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
fi
fi
#####################
# Resource aliasing #
#####################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
kube::log::status "Testing resource aliasing"
kubectl create -f examples/storage/cassandra/cassandra-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/cassandra/cassandra-service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
# all 4 cassandra's might not be in the request immediately...
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:'
kubectl delete all -l app=cassandra "${kube_flags[@]}"
fi
fi
###########
# Explain #
###########
if kube::test::if_supports_resource "${pods}" ; then
kube::log::status "Testing kubectl(v1:explain)"
kubectl explain pods
# shortcuts work
kubectl explain po
kubectl explain po.status.message
fi
###########
# Swagger #
###########
# Verify schema
file="${KUBE_TEMP}/schema-v1.json"
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]]
[[ "$(grep "List of services" "${file}")" ]]
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
#####################
# Kubectl --sort-by #
#####################
if kube::test::if_supports_resource "${pods}" ; then
### sort-by should not panic if no pod exists
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl get pods --sort-by="{metadata.name}"
kubectl get pods --sort-by="{metadata.creationTimestamp}"
fi
############################
# Kubectl --all-namespaces #
############################
if kube::test::if_supports_resource "${pods}" ; then
# Pre-condition: the "default" namespace exists
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
### Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Verify a specific namespace is ignored when all-namespaces is provided
# Command
kubectl get pods --all-namespaces --namespace=default
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
fi
################
# Certificates #
################
if kube::test::if_supports_resource "${csr}" ; then
# approve
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
# deny
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
fi
kube::test::clear_all
}
|
#!/bin/bash -e
# Copyright 2017 WSO2 Inc. (http://wso2.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Start WSO2 API Manager
# ----------------------------------------------------------------------------
default_heap_size="2G"
heap_size="$default_heap_size"
function usage() {
echo ""
echo "Usage: "
echo "$0 [-m <heap_size>] [-h]"
echo "-m: The heap memory size of API Manager. Default: $default_heap_size."
echo "-h: Display this help and exit."
echo ""
}
while getopts "m:h" opt; do
case "${opt}" in
m)
heap_size=${OPTARG}
;;
h)
usage
exit 0
;;
\?)
usage
exit 1
;;
esac
done
shift "$((OPTIND - 1))"
if [[ -z $heap_size ]]; then
echo "Please provide the heap size for the API Manager."
exit 1
fi
jvm_dir=""
for dir in /usr/lib/jvm/jdk*; do
[ -d "${dir}" ] && jvm_dir="${dir}" && break
done
export JAVA_HOME="${jvm_dir}"
carbon_bootstrap_class=org.wso2.carbon.bootstrap.Bootstrap
if pgrep -f "$carbon_bootstrap_class" >/dev/null; then
echo "Shutting down APIM"
wso2am/bin/api-manager.sh stop
echo "Waiting for API Manager to stop"
while true; do
if ! pgrep -f "$carbon_bootstrap_class" >/dev/null; then
echo "API Manager stopped"
break
else
sleep 10
fi
done
fi
log_files=(wso2am/repository/logs/*)
if [ ${#log_files[@]} -gt 1 ]; then
echo "Log files exists. Moving to /tmp"
mv "${log_files[@]}" /tmp/
fi
echo "Setting Heap to ${heap_size}"
export JVM_MEM_OPTS="-Xms${heap_size} -Xmx${heap_size}"
echo "Enabling GC Logs"
JAVA_COMMAND="$JAVA_HOME/bin/java"
JAVA_VERSION=$("$JAVA_COMMAND" -version 2>&1 | awk -F '"' '/version/ {print $2}')
if [[ $JAVA_VERSION =~ ^1\.8.* ]]; then
export JAVA_OPTS="-XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/ubuntu/wso2am/repository/logs/gc.log"
#export JAVA_OPTS="-XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/ubuntu/wso2am/repository/logs/gc.log -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:StartFlightRecording=delay=90s,duration=10m,name=Profiling,filename=/home/ubuntu/wso2am/repository/logs/recording.jfr,settings=profile "
echo "Starting APIM"
wso2am/bin/api-manager.sh start
else
# for jdk11
export JAVA_OPTS="-Xlog:gc*,safepoint,gc+heap=trace:file=/home/ubuntu/wso2am/repository/logs/gc.log:uptime,utctime,level,tags "
#export JAVA_OPTS="-Xlog:gc*,safepoint,gc+heap=trace:file=/home/ubuntu/wso2am/repository/logs/gc.log:uptime,utctime,level,tags -XX:StartFlightRecording=disk=true,delay=120s,duration=10m,name=Profiling,filename=/home/ubuntu/wso2am/repository/logs/recording.jfr,settings=profile,path-to-gc-roots=true "
fi
# export JAVA_OPTS="-Xlog:gc*,safepoint,gc+heap=trace:file=/home/ubuntu/wso2am/repository/logs/gc.log:uptime,utctime,level,tags "
# export JAVA_OPTS="-XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/ubuntu/wso2am/repository/logs/gc.log"
# Enable this JAVA_OPTS and comment out above JAVA_OPTS to enable JFR recording. To retrive this recording uncomment
# last line in after_execute_test_scenario() method in run-performance-tests.sh file. Set the correct duration and delay (default delay=30s,duration=15m)
# Note: recording file size takes about 600MB. DO NOT ENABLE it for full test runs.
#export JAVA_OPTS="-Xlog:gc*,safepoint,gc+heap=trace:file=/home/ubuntu/wso2am/repository/logs/gc.log:uptime,utctime,level,tags -XX:StartFlightRecording=disk=true,delay=120s,duration=10m,name=Profiling,filename=/home/ubuntu/wso2am/repository/logs/recording.jfr,settings=profile,path-to-gc-roots=true "
echo "Starting APIM"
wso2am/bin/api-manager.sh start
echo "Waiting for API Manager to start"
exit_status=100
n=0
until [ $n -ge 60 ]; do
response_code="$(curl -sk -w "%{http_code}" -o /dev/null https://localhost:8243/services/Version || echo "")"
if [ $response_code -eq 200 ]; then
echo "API Manager started"
exit_status=0
break
fi
sleep 10
n=$(($n + 1))
done
# Wait for another 10 seconds to make sure that the server is ready to accept API requests.
sleep 10
exit $exit_status
|
#!/bin/bash
set -e
tar_gz="https://dotnetcli.blob.core.windows.net/dotnet/Sdk/2.2.401/dotnet-sdk-2.2.401-linux-x64.tar.gz"
curl -SL -o dotnet.tar.gz $tar_gz
sudo mkdir -p /usr/share/dotnet
sudo tar -zxf dotnet.tar.gz -C /usr/share/dotnet
sudo ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet
|
#!/bin/sh
curl -X POST -d @/root/TestPatients/Sweets/Patient_Sweets.json http://127.0.0.1:8080/baseDstu3/Patient -H "Content-Type: application/json"
curl -X POST -d @/root/TestPatients/Sweets/RelatedPerson_Sweets.json http://127.0.0.1:8080/baseDstu3/RelatedPerson -H "Content-Type: application/json"
curl -X POST -d @/root/TestPatients/Sweets/Patient_BabySweets.json http://127.0.0.1:8080/baseDstu3/Patient -H "Content-Type: application/json"
curl -X POST -d @/root/TestPatients/Sweets/Observation_Weight_BabySweets.json http://127.0.0.1:8080/baseDstu3/Observation -H "Content-Type: application/json"
curl -X POST -d @/root/TestPatients/Sweets/Observation_BMI_Sweets.json http://127.0.0.1:8080/baseDstu3/Observation -H "Content-Type: application/json"
curl -X POST -d @/root/TestPatients/Sweets/Claim_Birth_Sweets.json http://127.0.0.1:8080/baseDstu3/Claim -H "Content-Type: application/json"
curl -X POST -d @/root/TestPatients/Sweets/Claim_Delivery_Sweets.json http://127.0.0.1:8080/baseDstu3/Claim -H "Content-Type: application/json"
curl -X POST -d @/root/TestPatients/Sweets/Claim_Risk_Sweets.json http://127.0.0.1:8080/baseDstu3/Claim -H "Content-Type: application/json"
|
#!/bin/bash
SELF_DIRECTORY="$(dirname ${0})"
FILE="${SELF_DIRECTORY}/style/templates/${1}/${1}.scss"
if test -f "$FILE"; then
yarn clear dist/$1
yarn build:webpack $1
yarn clear src/js/ts/colors.ts
TEMPLATE=$1 node src/js/colors-module-factory.js
yarn build:tsc
cp ./src/js/ts-built/* ./dist/$1
yarn clear src/js/ts-built
else
echo "Template $1 does not exists."
fi
|
#!/usr/bin/env bash
BASEDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Install Zsh
sudo apt-get install zsh
# bash
ln -s ${BASEDIR}/bashrc ~/.bashrc
# vim
ln -s ${BASEDIR}/vimrc ~/.vimrc
ln -s ${BASEDIR}/vim ~/.vim
# git
ln -s ${BASEDIR}/gitconfig ~/.gitconfig
# zsh
chsh -s $(which zsh)
ln -s ${BASEDIR}/zshrc ~/.zshrc
# Pure zsh theme
mkdir -p "${BASEDIR}/zsh"
ln -s ${BASEDIR}/zsh ~/.zsh
|
docker run -v `pwd`:/workspace edge_tpu_converter edgetpu_compiler /workspace/models/$1
|
#!/bin/sh
#######################################
# aliases
#######################################
alias python="python3"
#######################################
# functions
#######################################
# Create a new python project using scaffolding templates
function scaffold-python-project {
python3 ${HOME}/.lib/python/python_scaffolding.py
}
|
#!/bin/dash
#set -x
cd /home/pi/raspberryEasySurvillance || exit 1
# rescue old motion images
mkdir -p rescued
mv ftp/*.jpg rescued
# clean up
./src/cleanUpDirs.sh
while true; do
./src/runOnce.sh
done
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
LIBRARIES_DIR=$(dirname "${BASH_SOURCE[0]}")
# shellcheck source=scripts/ci/libraries/_initialization.sh
. "${LIBRARIES_DIR}"/_initialization.sh
# shellcheck source=scripts/ci/libraries/_sanity_checks.sh
. "${LIBRARIES_DIR}"/_sanity_checks.sh
# shellcheck source=scripts/ci/libraries/_build_images.sh
. "${LIBRARIES_DIR}"/_build_images.sh
# shellcheck source=scripts/ci/libraries/_kind.sh
. "${LIBRARIES_DIR}"/_kind.sh
# shellcheck source=scripts/ci/libraries/_local_mounts.sh
. "${LIBRARIES_DIR}"/_local_mounts.sh
# shellcheck source=scripts/ci/libraries/_md5sum.sh
. "${LIBRARIES_DIR}"/_md5sum.sh
# shellcheck source=scripts/ci/libraries/_parameters.sh
. "${LIBRARIES_DIR}"/_parameters.sh
# shellcheck source=scripts/ci/libraries/_permissions.sh
. "${LIBRARIES_DIR}"/_permissions.sh
# shellcheck source=scripts/ci/libraries/_push_pull_remove_images.sh
. "${LIBRARIES_DIR}"/_push_pull_remove_images.sh
# shellcheck source=scripts/ci/libraries/_runs.sh
. "${LIBRARIES_DIR}"/_runs.sh
# shellcheck source=scripts/ci/libraries/_spinner.sh
. "${LIBRARIES_DIR}"/_spinner.sh
# shellcheck source=scripts/ci/libraries/_start_end.sh
. "${LIBRARIES_DIR}"/_start_end.sh
# shellcheck source=scripts/ci/libraries/_verbosity.sh
. "${LIBRARIES_DIR}"/_verbosity.sh
|
#!/bin/bash
#SBATCH -e ./error%j.txt
#SBATCH -o ./output%j.txt
#SBATCH --account=project_2001273
#SBATCH --job-name=flex
#SBATCH --nodes=1
#SBATCH --time=00:30:00
#SBATCH --partition=test
#SBATCH --mem-per-cpu=16000
#SBATCH --mail-type=END
#SBATCH [email protected]
#SBATCH --gres=nvme:400
# first set the environemt
cd /scratch/project_2001273/diego/flexpart_test_run/run_2021-03-14_20-15-10_LONG/2018-05-28
#module purge
module load gcc/9.1.0 intel-mkl/2019.0.4 hpcx-mpi/2.4.0 netcdf-fortran/4.4.4
export NETCDF=$(nf-config --prefix)
#module load allas
source /appl/opt/allas-cli-utils/allas_conf -f -k $OS_PROJECT_NAME
#allas_conf -f -k $OS_PROJECT_NAME
mkdir ${LOCAL_SCRATCH}/./data_in
ln -sf ${LOCAL_SCRATCH}/./data_in ././data_in
mkdir ${LOCAL_SCRATCH}/./data_out
ln -sf ${LOCAL_SCRATCH}/./data_out ././data_out
flex_dir='/users/aliagadi/FLEXPART-WRF_v3.3.2-omp/'
input_flex=/scratch/project_2001273/diego/flexpart_test_run/run_2021-03-14_20-15-10_LONG/2018-05-28/flx_input
#XX run script to load files here.
sh download_files.sh
#cd $flex_dir
#exe=flexwrf33_gnu_mpi
#exe=flexwrf33_gnu_omp
#exe=flexwrf33_gnu_serial
exe=flexwrf33_gnu_omp
## run my MPI executable
#cp ${input_flex} ././data_out/input_flex
#srun ${flex_dir}${exe} ${input_flex}
#srun ${flex_dir}${exe} ././data_out/input_flex
srun ${flex_dir}${exe} ${input_flex}
cd ././data_out
source /appl/opt/allas-cli-utils/allas_conf -f -k $OS_PROJECT_NAME
swift upload run_2021-03-14_20-15-10_LONG/2018-05-28 ./
|
#!/bin/sh
# Run the most recently-modified matterhorn binary in the matterhorn
# working tree.
#
# Portability: Linux, OS X
set -e
HERE=$(cd `dirname $0`; pwd)
# Portability note: -executable is only compatible with GNU find but
# this invocation should be more portable. Also be flexible about old
# and new cabal output locations, preferring new, but avoiding
# slurping up the entire current directory.
OPTIONS=$(find $(for D in dist-newstyle dist; do [ -d $D ] && echo $D; done) -name matterhorn -type f \( -perm -u=x -o -perm -g=x -o -perm -o=x \) )
if [ -z "${OPTIONS}" ] ; then
echo "No matterhorn executable found; try $ cabal new-build"
exit 2
fi
SORTED=$(ls -t ${OPTIONS})
# Run the most recently-modified binary that we found. Note that since
# we use exec, this loop never makes it past one iteration.
for OPTION in ${SORTED}; do
exec "${OPTION}" ${1+$@}
done
echo "No executables found."
|
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Shannon Mitchell ([email protected] )
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Shannon Mitchell #
#[email protected] #
######################################################################
#
# - Created by Shannon Mitchell([email protected])
# to fix RHEL5 EOL date and add support for RHEL6 EOL date.
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-11940
#Group Title: Supported Release
#Rule ID: SV-27049r1_rule
#Severity: CAT I
#Rule Version (STIG-ID): GEN000100
#Rule Title: The operating system must be a supported release.
#
#Vulnerability Discussion: An operating system release is considered
#"supported" if the vendor continues to provide security patches for
#the product. With an unsupported release, it will not be possible
#to resolve security issues discovered in the system software.
#
#Security Override Guidance:
#If an extended support agreement that provides security patches
#for the unsupported product is procured from the vendor, this
#finding may be downgraded to a CAT III.
#
#Responsibility: System Administrator
#IAControls: VIVM-1
#
#Check Content:
#Check the version of the operating system.
#
#Example:
# cat /etc/redhat-release
#
#Vendor End-of-Support Information:
#Red Hat Enterprise 3: 31 Oct 2010
#Red Hat Enterprise 4: 29 Feb 2012
#Red Hat Enterprise 5: 31 Mar 2017
#Red Hat Enterprise 6: 30 Nov 2020
#Check with the vendor for information on other versions.
#
#If the version installed is not supported, this is a finding.
#Fix Text: Upgrade to a supported version of the operating system.
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN000100
CURRENTDATE=`date +%F`
RHEL6EOL="2020-11-30"
RHEL5EOL="2017-03-31"
RHEL4EOL="2012-02-29"
RHEL3EOL="2010-10-31"
RELEASE6=`grep -c "6." /etc/redhat-release`
RELEASE5=`grep -c "5." /etc/redhat-release`
RELEASE4=`grep -c "4." /etc/redhat-release`
RELEASE3=`grep -c "3." /etc/redhat-release`
#Start-Lockdown
if [ $RELEASE6 -eq 1 ]
then
if [[ "$CURRENTDATE" > "$RHEL6EOL" ]]
then
echo "------------------------------" > $PDI-error.log
date >> $PDI-error.log
echo " " >> $PDI-error.log
echo "RHEL 6 is no longer supported." >> $PDI-error.log
echo "THIS IS A CAT I FINDING" >> $PDI-error.log
echo "------------------------------" >> $PDI-error.log
fi
fi
if [ $RELEASE5 -eq 1 ]
then
if [[ "$CURRENTDATE" > "$RHEL5EOL" ]]
then
echo "------------------------------" > $PDI-error.log
date >> $PDI-error.log
echo " " >> $PDI-error.log
echo "RHEL 5 is no longer supported." >> $PDI-error.log
echo "THIS IS A CAT I FINDING" >> $PDI-error.log
echo "------------------------------" >> $PDI-error.log
fi
fi
if [ $RELEASE4 -eq 1 ]
then
if [[ "$CURRENTDATE" > "$RHEL4EOL" ]]
then
echo "------------------------------" > $PDI-error.log
date >> $PDI-error.log
echo " " >> $PDI-error.log
echo "RHEL 4 is no longer supported." >> $PDI-error.log
echo "THIS IS A CAT I FINDING" >> $PDI-error.log
echo "------------------------------" >> $PDI-error.log
fi
fi
if [ $RELEASE3 -eq 1 ]
then
if [[ "$CURRENTDATE" > "$RHEL3EOL" ]]
then
echo "------------------------------" > $PDI-error.log
date >> $PDI-error.log
echo " " >> $PDI-error.log
echo "RHEL 3 is no longer supported." >> $PDI-error.log
echo "THIS IS A CAT I FINDING" >> $PDI-error.log
echo "------------------------------" >> $PDI-error.log
fi
fi
|
#!/bin/bash
echo "hey" >> $HOME/Desktop/hi
|
#!/bin/sh
set -x
set -e
test -n "$1"
rosrun xacro xacro.py $1 > tmp.urdf && check_urdf tmp.urdf && rm tmp.urdf
|
#!/bin/bash
[[ -n "$TRACE" ]] && set -x
set -eo pipefail
# Save service key to a json file as Terraform GCS
# backend only accepts the credential from a file.
echo "$GCP_CREDENTIALS" > $root/gcp_service_account_key.json
export GOOGLE_CREDENTIALS=$root/gcp_service_account_key.json
export GOOGLE_PROJECT=${GCP_PROJECT}
export GOOGLE_REGION=${GCP_REGION}
terraform init \
-backend-config="bucket=${TERRAFORM_STATE_BUCKET}" \
-backend-config="prefix=${DEPLOYMENT_PREFIX}-k8s-clusters" \
automation/lib/pipelines/pcf/install-and-upgrade/terraform/pks-loadbalancers/google
set +e
terraform apply \
-auto-approve \
-var "infrastructure_state_bucket=${TERRAFORM_STATE_BUCKET}" \
-var "infrastructure_state_prefix=${DEPLOYMENT_PREFIX}" \
automation/lib/pipelines/pcf/install-and-upgrade/terraform/pks-loadbalancers/google
# The re-ordering of cluster resources in the enumerations
# can cause load balancer artifacts to be deleted and recreated.
# This can result in duplicate resource failures and will
# go away when terraform apply is rerun. This issue will be
# fixed when more flexible logical constructs are introduced
# Terraform HCL2.
if [[ $? -ne 0 ]]; then
set -e
terraform apply \
-auto-approve \
-var "infrastructure_state_bucket=${TERRAFORM_STATE_BUCKET}" \
-var "infrastructure_state_prefix=${DEPLOYMENT_PREFIX}" \
automation/lib/pipelines/pcf/install-and-upgrade/terraform/pks-loadbalancers/google
fi
|
#
# Contains the public keys for users that should automatically be granted access
# to ALL testnets and datacenter nodes.
#
# To add an entry into this list:
# 1. Run: ssh-keygen -t ecdsa -N '' -f ~/.ssh/id-solana-testnet
# 2. Add an entry to SOLANA_USERS with your username
# 3. Add an entry to SOLANA_PUBKEYS with the contents of ~/.ssh/id-solana-testnet.pub
#
# If you need multiple keys with your username, repeatedly add your username to SOLANA_USERS, once per key
#
SOLANA_USERS=()
SOLANA_PUBKEYS=()
SOLANA_USERS+=('mvines')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=')
SOLANA_USERS+=('carl')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOk4jgcX/VWSk3j//wXeIynSQjsOt+AjYXM/XZUMa7R1Q8lfIJGK/qHLBP86CMXdpyEKJ5i37QLYOL+0VuRy0CI=')
SOLANA_USERS+=('jack')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEB6YLY4oCfm0e1qPswbzryw0hQEMiVDcUxOwT4bdBbui/ysKGQlVY8bO6vET1Te8EYHz5W4RuPfETbcHmw6dr4=')
SOLANA_USERS+=('trent')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEZC/APgZTM1Y/EfNnCHr+BQN+SN4KWfpyGkwMg+nXdC trent@fry')
SOLANA_USERS+=('trent')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDgdbzGLiv9vGo3yaJGzxO3Q2/w5TS4Km2sFGQFWGFIJ trent@farnsworth')
SOLANA_USERS+=('trent')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHD7QmrbCqEFYGmYlHNsfbAqmJ6FRvJUKZap1TWMc7Sz [email protected]')
SOLANA_USERS+=('trent')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN2NCuglephBlrWSpaLkGFdrAz1aA3vYHjBVJamWBCZ3 trent@trent-build')
SOLANA_USERS+=('dan')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKMl07qHaMCmnvRKBCmahbBAR6GTWkR5BVe8jdzDJ7xzjXLZlf1aqfaOjt5Cu2VxvW7lUtpJQGLJJiMnWuD4Zmc= [email protected]')
SOLANA_USERS+=('dan')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLG+2CSMwuSjX1l4ke7ScGOgmE2/ZICvJUg6re5w5znPy1gZ3YenypoBkoj3mWmavJ09OrUAELzYj3YQU9tSVh4= dan@cabbage-patch')
SOLANA_USERS+=('greg')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG3eu2c7DZS+FE3MZmtU+nv1nn9RqW0lno0gyKpGtxT7 [email protected]')
SOLANA_USERS+=('tyera')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDSWMrqTMsML19cDKmxhfwkDfMWwpcVSYJ49cYkZYpZfTvFjV/Wdbpklo0+fp98i5AzfNYnvl0oxVpFg8A8dpYk=')
#valverde/sagan
SOLANA_USERS+=('sakridge')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIxN1jPgVdSqNmGAjFwA1ypcnME8uM/9NjfaUZBpNdMh sakridge@valverde')
#fermi
SOLANA_USERS+=('sakridge')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILADsMxP8ZtWxpuXjqjMcYpw6d9+4rgdYrmrMEvrLtmd [email protected]')
SOLANA_USERS+=('sakridge')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF5JFfLo8rNBDV6OY08n/BWWu/AMCt6KAQ+2syeR+bvY sakridge@curie')
SOLANA_USERS+=('buildkite-agent')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHnXXGKZF1/qjhsoRp+7Dm124nIgUbGJPFoqlSkagZmGmsqqHlxgosxHhg6ucHerqonqBXtfdmA7QkZoKVzf/yg= buildkite-agent@dumoulin')
#ci-testnet-deployer (defunct)
SOLANA_USERS+=('buildkite-agent')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJnESaQpgLM2s3XLW2jvqRrvkBMDd/qGDZCjPR4X/73IwiR+hSw220JaT1JlweRrEh0rodgBTCFsWYSeMbLeGu4= buildkite-agent@ci-testnet-deployer')
#pacman (used by colo-testnet-deployer)
SOLANA_USERS+=('buildkite-agent')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBL5bkSnIRevXXtx/sSvVtioeiLv9GLqchABi8JfMLolyv/az9mJxu77UGsxcK05ebuVQPe3PHne9isQPyrdxaE4= buildkite-agent@pacman')
#bernal
SOLANA_USERS+=('buildkite-agent')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJVDa1pi+Sh5N0xaBqiD+3T1c3eKT9M7Y3NIN/pCLmO9N4AH8GBVg2SeqRk4bDfPqDO6MCvSpEeOO7EBuOPVANM= buildkite-agent@bernal')
#valverde
SOLANA_USERS+=('buildkite-agent')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNYFFHaMi+XaHzAstLgk46kvyn2/gC/f2rCCHqbgdBqHQxyxTGZc/DlAJIqd/lQZiGhHVFRW7olnIkyQJZy5FXU= buildkite-agent@valverde')
#achilles
SOLANA_USERS+=('buildkite-agent')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOp9DIT0f3e/YJK3kCRXunrIAVxuy+5aOzP2jpSPDIzy/9/QAu9P0ZccHQRZTamMtEwB1g4MeafM8yFYzMf8XGU= buildkite-agent@achille')
SOLANA_USERS+=('jstarry')
SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCdpItheyXVow+4j1D4Y8Xh+dsS9GwFLRNiEYjvnonV3FqVO4hss6gmXPk2aiOAZc6QW3IXBt/YebWFNsxBW2xU= [email protected]')
SOLANA_USERS+=('ryoqun')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAsOLWUu8wbe6C5IdyB+gy1KwPCggiWv2UwhWRNOI6kV ryoqun@ubuqun')
SOLANA_USERS+=('aeyakovenko')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEl4U9VboeYbZNOVn8SB1NM29HeI3SwqsbM22Jmw6975 aeyakovenko@valverde')
SOLANA_USERS+=('alexander')
SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINHM/Rdj1UtrqPWMWjgXkjr5xFkyV0yRseM/uHxlHmxe [email protected]')
|
#!/bin/bash
set -x
set -eu
set -o pipefail
# This script can be used to create a bare database directory for use by
# ddev startup. It can be run from the host with:
# docker run -t -u $(id -u) -v "$PWD/files/var/tmp/mysqlbase:/mysqlbase" --rm --entrypoint=/create_base_db.sh drud/ddev-dbserver:<your_version>
SOCKET=/var/tmp/mysql.sock
OUTDIR=/mysqlbase
if [ ! -d $OUTDIR ] ; then
echo "The required output directory $OUTDIR does not seem to exist."
exit 1
fi
# For this script we don't want the defaults in .my.cnf
# However, this script is never run on a normal startup, so we can just throw it away.
sudo rm -f /home/.my.cnf
sudo chmod ugo+w /var/tmp
sudo mkdir -p /var/lib/mysql /mnt/ddev_config/mysql && sudo rm -f /var/lib/mysql/* && sudo chmod -R ugo+w /var/lib/mysql
echo 'Initializing mysql'
mysql_install_db
echo 'Starting mysqld --skip-networking'
mysqld --skip-networking &
pid="$!"
# Wait for the server to respond to mysqladmin ping, or fail if it never does,
# or if the process dies.
for i in {60..0}; do
if mysqladmin ping -uroot --socket=$SOCKET; then
break
fi
# Test to make sure we got it started in the first place. kill -s 0 just tests to see if process exists.
if ! kill -s 0 $pid 2>/dev/null; then
echo "MariaDB initialization startup failed"
exit 3
fi
echo "MariaDB initialization startup process in progress... Try# $i"
sleep 1
done
if [ "$i" -eq 0 ]; then
echo 'MariaDB initialization startup process timed out.'
exit 4
fi
mysql_tzinfo_to_sql /usr/share/zoneinfo | mysql -uroot mysql
mysql -uroot <<EOF
CREATE DATABASE IF NOT EXISTS $MYSQL_DATABASE;
CREATE USER IF NOT EXISTS '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD';
CREATE USER IF NOT EXISTS '$MYSQL_USER'@'localhost' IDENTIFIED BY '$MYSQL_PASSWORD';
GRANT ALL ON $MYSQL_DATABASE.* TO '$MYSQL_USER'@'%';
GRANT ALL ON $MYSQL_DATABASE.* TO '$MYSQL_USER'@'localhost';
CREATE USER IF NOT EXISTS 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION;
GRANT ALL ON *.* to 'root'@'localhost' IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
FLUSH PRIVILEGES;
FLUSH TABLES;
EOF
sudo rm -rf $OUTDIR/*
mariabackup --backup --target-dir=$OUTDIR --user root --password root --socket=$SOCKET
# Initialize with current mariadb_version
my_mariadb_version=$(mysql -V | awk '{sub( /\.[0-9]+-MariaDB,/, ""); print $5 }')
echo $my_mariadb_version >$OUTDIR/db_mariadb_version.txt
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'Mariadb initialization process failed.'
exit 5
fi
echo "The startup database files (in mariabackup format) are now in $OUTDIR"
|
#!/bin/bash
usage() {
echo "Usage: $(basename $0) <jar> <task_class> <topology_name> <system_config> <app_config>" && exit 1
}
[ "$#" -ge 4 ] || usage
SCRIPT_PATH=$( cd $(dirname $0) ; pwd -P )
BASE_DIR=$(dirname $0)
JAR_PATH=$1
JAR_FILE=$(basename $JAR_PATH)
JAR_DIR=$(dirname $JAR_PATH)
TASK_CLASS=$2
TOPOLOGY_NAME=$3
SYSTEM_CONFIG=$4
APP_CONFIG=$5
# prepare configuration string
# serialize properties file
SYSTEM_CONFIG_STR=$(java -jar $BASE_DIR/lib/properties-serializer.jar $SYSTEM_CONFIG)
APP_CONFIG_STR=$(java -jar $BASE_DIR/lib/properties-serializer.jar $APP_CONFIG)
# functions
function readProperty() {
FILE=$1
PROPERTY=$2
PROPERTY_VALUE=`sed '/^\#/d' $FILE | grep $PROPERTY | tail -n 1 | cut -d "=" -f2- | sed 's/^[[:space:]]*//;s/[[:space:]]*$//'`
echo $PROPERTY_VALUE
}
echo "Deploying to $PLATFORM"
echo "$SYSTEM_CONFIG_STR"
echo "java -Xms6G -Xmx6G -cp $JAR_PATH org.dspbench.topology.impl.LocalTaskRunner -task $TASK_CLASS -name $TOPOLOGY_NAME -config $SYSTEM_CONFIG_STR"
java -server -XX:+UseG1GC -Xms4g -Xmx6g -XX:+UseCompressedOops -cp $JAR_PATH org.dspbench.topology.impl.LocalTaskRunner -task $TASK_CLASS -name $TOPOLOGY_NAME -config $SYSTEM_CONFIG_STR
|
#!/bin/bash
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#export DNNL_MAX_CPU_ISA=AVX512_CORE_AMX
ARGS="--benchmark"
precision=fp32
if [[ "$1" == *"avx"* ]]; then
unset DNNL_MAX_CPU_ISA
fi
if [[ "$1" == "bf16" ]]
then
precision=bf16
ARGS="$ARGS --bf16"
echo "### running bf16 mode"
elif [[ "$1" == "int8" || "$1" == "avx-int8" ]]
then
precision=int8
ARGS="$ARGS --int8"
echo "### running int8 mode"
elif [[ "$1" == "fp32" || "$1" == "avx-fp32" ]]
then
precision=fp32
echo "### running fp32 mode"
fi
rm -rf ${OUTPUT_DIR}/latency_log*
export OMP_NUM_THREADS=4
CORES=`lscpu | grep Core | awk '{print $4}'`
SOCKETS=`lscpu | grep Socket | awk '{print $2}'`
INT8_CONFIG=${INT8_CONFIG:-"configure.json"}
BATCH_SIZE=${BATCH_SIZE:-1}
EVAL_DATA_FILE=${EVAL_DATA_FILE:-"${PWD}/squad1.1/dev-v1.1.json"}
FINETUNED_MODEL=${FINETUNED_MODEL:-bert_squad_model}
OUTPUT_DIR=${OUTPUT_DIR:-${PWD}}
EVAL_SCRIPT=${EVAL_SCRIPT:-"./transformers/examples/question-answering/run_squad.py"}
work_space=${work_space:-${OUTPUT_DIR}}
python -m intel_extension_for_pytorch.cpu.launch --ninstance ${SOCKETS} --log_path=${OUTPUT_DIR} --log_file_prefix="./latency_log_${precision}" ${EVAL_SCRIPT} $ARGS --model_type bert --model_name_or_path ${FINETUNED_MODEL} --tokenizer_name bert-large-uncased-whole-word-masking-finetuned-squad --do_eval --do_lower_case --predict_file $EVAL_DATA_FILE --per_gpu_eval_batch_size $BATCH_SIZE --learning_rate 3e-5 --num_train_epochs 2.0 --max_seq_length 384 --doc_stride 128 --output_dir ./tmp --perf_begin_iter 20 --perf_run_iters 100 --use_jit --int8_config ${INT8_CONFIG} --use_share_weight --total_cores ${CORES}
CORES_PER_INSTANCE=4
TOTAL_CORES=`expr $CORES \* $SOCKETS`
INSTANCES=`expr $TOTAL_CORES / $CORES_PER_INSTANCE`
INSTANCES_PER_SOCKET=`expr $INSTANCES / $SOCKETS`
throughput=$(grep 'Throughput:' ${OUTPUT_DIR}/latency_log* |sed -e 's/.*Throughput//;s/[^0-9.]//g' |awk -v INSTANCES_PER_SOCKET=$INSTANCES_PER_SOCKET '
BEGIN {
sum = 0;
i = 0;
}
{
sum = sum + $1;
i++;
}
END {
sum = sum / i * INSTANCES_PER_SOCKET;
printf("%.2f", sum);
}')
echo $INSTANCES_PER_SOCKET
echo ""BERT";"latency";${precision}; ${BATCH_SIZE};${throughput}" | tee -a ${OUTPUT_DIR}/summary.log
|
#!/bin/sh
cd $(dirname $0)
pandoc --from markdown --to latex --output user_guide.tex ../../README.md
# Removes "doc/report" path from figures
sed -i s/doc\\/report\\///g user_guide.tex
# Convert links to footnotes
sed -i "s/\\\\href{\\([^}]*\\)}{\\([^}]*\\)}/\2\\\\footnote{\\\\url{\1}}/" user_guide.tex
pandoc --from markdown --to latex --output code_source_org.tex code_source_org.md
# Use non numbered section for code source organization
sed -i "s/section/section*/" code_source_org.tex
|
#!/bin/sh
hotkeysRC="$HOME/.config/kglobalshortcutsrc"
# Remove application launching shortcuts.
sed -i 's/_launch=[^,]*/_launch=none/g' $hotkeysRC
# Remove other global shortcuts.
sed -i 's/^\([^_].*\)=[^,]*/\1=none/g' $hotkeysRC
# Reload hotkeys.
kquitapp5 kglobalaccel && sleep 2s && kglobalaccel5 &
|
#! /bin/bash
set -eu
DOCKER_IMAGE=$1
NETWORK_NAME=$2
N=$3
APP_PROXY=$4
set +u
SEEDS=$5
if [[ "$SEEDS" != "" ]]; then
echo "Seeds: $SEEDS"
SEEDS="--p2p.seeds $SEEDS"
fi
set -u
cd "$GOPATH/src/github.com/tendermint/tendermint"
# create docker network
docker network create --driver bridge --subnet 172.57.0.0/16 "$NETWORK_NAME"
for i in $(seq 1 "$N"); do
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$i" "$APP_PROXY" "$SEEDS --p2p.pex --rpc.unsafe"
done
|
#!/bin/bash
python3 -m pip install -U beautifulsoup4==4.9.1
python3 -m pip install -U jsoncodable==0.0.11
python3 -m pip install -U kcu==0.0.55
python3 -m pip install -U ksimpleapi==0.0.35
python3 -m pip install -U noraise==0.0.6
python3 -m pip install -U requests==2.25.0 |
#!/bin/bash
##########
#The MIT License (MIT)
#
# Copyright (c) 2015 Aiden Lab
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
# MegaMap script.
#
#
# [topDir] - Should contain the results of all base experiments
#
# From the top-level directory, the following two directory is created:
#
# [topDir]/mega - Location of result of processing the mega map
#
# Juicer version 2.0
juicer_version="2.0"
## Set the following variables to work with your system
# Aiden Lab specific check
isRice=$(hostname | awk '{if ($1~/rice/){print 1}else {print 0}}')
isBCM=$(hostname | awk '{if ($1~/bcm/){print 1}else {print 0}}')
isVoltron=0
## path additionals, make sure paths are correct for your system
## use cluster load commands
if [ $isRice -eq 1 ]
then
myPath=/bin:$PATH
load_bwa="module load BioBuilds/2015.04"
load_java="module load Java/8.0.3.22"
load_gpu="module load gcccuda/2016a;module load CUDA/8.0.54;"
# Juicer directory, contains scripts/, references/, and restriction_sites/
# can also be set in options via -D
juiceDir="/projects/ea14/juicer" ### RICE
# default queue, can also be set in options via -q
queue="commons"
# default long queue, can also be set in options via -l
long_queue="commons"
long_queue_time="1440"
elif [ $isBCM -eq 1 ]
then
# Juicer directory, contains scripts/, references/, and restriction_sites/
# can also be set in options via -D
juiceDir="/storage/aiden/juicer/"
# default queue, can also be set in options via -q
queue="mhgcp"
queue_time="1200"
# default long queue, can also be set in options via -l
long_queue="mhgcp"
long_queue_time="3600"
else
isVoltron=1
export PATH=/gpfs0/biobuild/biobuilds-2016.11/bin:$PATH
#unset MALLOC_ARENA_MAX
load_gpu="CUDA_VISIBLE_DEVICES=0,1,2,3"
# Juicer directory, contains scripts/, references/, and restriction_sites/
# can also be set in options via -D
juiceDir="/gpfs0/juicer2/"
# default queue, can also be set in options
queue="commons"
queue_time="1400"
# default long queue, can also be set in options
long_queue="long"
long_queue_time="10080"
fi
# unique name for jobs in this run
groupname="a$(date +%s)"
## Default options, overridden by command line arguments
# top level directory, can also be set in options
topDir=$(pwd)
# restriction enzyme, can also be set in options
site="none"
# genome ID, default to human, can also be set in options
genomeID="hg19"
# by default exclude fragment delimited maps
exclude=1
## Read arguments
usageHelp="Usage: ${0##*/} -g genomeID [-d topDir] [-s site] [-S stage] [-D Juicer scripts directory] [-q queue] [-l long queue] [-Q queue time] [-L long queue time] [-T threadsHic] [-y sitee_file] [-f] [-h]"
genomeHelp=" genomeID is either defined in the script, e.g. \"hg19\" or \"mm10\" or the path to the chrom.sizes file"
dirHelp=" [topDir] is the top level directory (default \"$topDir\") and must contain links to all merged files underneath it"
siteHelp=" [site] must be defined in the script, e.g. \"HindIII\" or \"MboI\" (default \"$site\"); alternatively, this can be the restriction site file"
siteFileHelp="* [restriction site file]: enter path for restriction site file (locations of\n restriction sites in genome; can be generated with the script\n misc/generate_site_positions.py)"
threadsHicHelp="* [threads for hic file creation]: number of threads when building hic file"
stageHelp="* [stage]: must be one of \"final\", \"postproc\", or \"early\".\n -Use \"final\" when the reads have been combined into merged but the\n final stats and hic files have not yet been created.\n -Use \"postproc\" when the hic files have been created and only\n postprocessing feature annotation remains to be completed.\n -Use \"early\" for an early exit, before the final creation of the stats and\n hic files"
scriptDirHelp="* [Juicer scripts directory]: set the Juicer directory,\n which should have scripts/ references/ and restriction_sites/ underneath it\n (default ${juiceDir})"
excludeHelp=" -f: include fragment-delimited maps from Hi-C mega map (will run slower)"
helpHelp=" -h: print this help and exit"
printHelpAndExit() {
echo "$usageHelp"
echo "$genomeHelp"
echo "$dirHelp"
echo "$siteHelp"
echo "$siteFileHelp"
echo "$stageHelp"
echo "$threadsHicHelp"
echo "$excludeHelp"
echo "$helpHelp"
exit "$1"
}
while getopts "d:g:hfs:S:l:L:q:Q:D:y:T:" opt; do
case $opt in
g) genomeID=$OPTARG ;;
h) printHelpAndExit 0;;
d) topDir=$OPTARG ;;
s) site=$OPTARG ;;
f) exclude=0 ;;
y) site_file=$OPTARG ;;
S) stage=$OPTARG ;;
l) long_queue=$OPTARG ;;
L) long_queue_time=$OPTARG ;;
q) queue=$OPTARG ;;
Q) queue_time=$OPTARG ;;
D) juiceDir=$OPTARG ;;
T) threadsHic=$OPTARG ;;
[?]) printHelpAndExit 1;;
esac
done
## If DNAse-type experiment, no fragment maps; or way to get around site file
if [[ "$site" == "none" ]]
then
exclude=1;
fi
if [ -z "$site_file" ]
then
site_file="${juiceDir}/restriction_sites/${genomeID}_${site}.txt"
fi
## Check that site file exists, needed for fragment number for merged_nodups
if [[ ! -e "$site_file" ]] && [[ "$site" != "none" ]] && [[ ! "$site_file" =~ "none" ]]
then
echo "***! $site_file does not exist. It must be created before running this script."
exit 1
elif [[ "$site" != "none" ]] && [[ ! "$site_file" =~ "none" ]]
then
echo "Using $site_file as site file"
fi
if [ ! -z "$stage" ]
then
case $stage in
final) final=1 ;;
early) early=1 ;;
postproc) postproc=1 ;;
*) echo "$usageHelp"
echo "$stageHelp"
exit 1
esac
fi
## Directories to be created and regex strings for listing files
megadir=${topDir}"/mega"
outputdir=${megadir}"/aligned"
tmpdir=${megadir}"/HIC_tmp"
export TMPDIR=${tmpdir}
#output messages
logdir="$megadir/debug"
touchfile1=${megadir}/touch1
touchfile2=${megadir}/touch2
touchfile3=${megadir}/touch3
touchfile4=${megadir}/touch4
## Check for existing merged files:
merged_count=`find -L ${topDir} | grep merged1.txt | wc -l`
if [ "$merged_count" -lt "1" ]
then
echo "***! Failed to find at least one merged file under ${topDir}"
exit 1
fi
merged_names=$(find -L ${topDir} | grep merged1.txt.gz | awk '{print "<(gunzip -c",$1")"}' | tr '\n' ' ')
if [ ${#merged_names} -eq 0 ]
then
merged_names=$(find -L ${topDir} | grep merged1.txt | tr '\n' ' ')
fi
merged_names30=$(find -L ${topDir} | grep merged30.txt.gz | awk '{print "<(gunzip -c",$1")"}' | tr '\n' ' ')
if [ ${#merged_names30} -eq 0 ]
then
merged_names30=$(find -L ${topDir} | grep merged30.txt | tr '\n' ' ')
fi
inter_names=$(find -L ${topDir} | grep inter.txt | tr '\n' ' ')
## Create output directory, exit if already exists
if [[ -d "${outputdir}" ]] && [ -z $final ] && [ -z $postproc ]
then
echo "***! Move or remove directory \"${outputdir}\" before proceeding."
exit 1
else
mkdir -p ${outputdir}
fi
## Create temporary directory
if [ ! -d "$tmpdir" ]; then
mkdir $tmpdir
chmod 777 $tmpdir
fi
## Create output directory, used for reporting commands output
if [ ! -d "$logdir" ]; then
mkdir "$logdir"
chmod 777 "$logdir"
fi
## Arguments have been checked and directories created. Now begins
## the real work of the pipeline
# Not in final or postproc
if [ -z $final ] && [ -z $postproc ]
then
# Create top statistics file from all inter.txt files found under current dir
jid1=`sbatch <<- TOPSTATS | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p $queue
#SBATCH -t 1440
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/topstats-%j.out
#SBATCH -e $logdir/topstats-%j.err
#SBATCH -J "${groupname}_topstats"
export LC_COLLATE=C
if ! awk -f ${juiceDir}/scripts/makemega_addstats.awk ${inter_names} > ${outputdir}/inter.txt
then
echo "***! Some problems occurred somewhere in creating top stats files."
exit 100
else
echo "(-: Finished creating top stats files."
cp ${outputdir}/inter.txt ${outputdir}/inter_30.txt
fi
touch $touchfile1
TOPSTATS`
dependtopstats="afterok:$jid1"
# Merge all merged1.txt files found under current dir
jid2=`sbatch <<- MRGSRT | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t ${long_queue_time}
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/merge-%j.out
#SBATCH -e $logdir/merge-%j.err
#SBATCH -J "${groupname}_merge"
#SBATCH -d "${dependtopstats}"
if [ ! -f "${touchfile1}" ]
then
echo "***! Top stats job failed, type \"scontrol show job $jid1\" to see what happened."
exit 1
fi
if [ $isRice -eq 1 ]
then
if ! ${juiceDir}/scripts/sort --parallel=48 -S8G -T ${tmpdir} -m -k2,2d -k6,6d ${merged_names} > ${outputdir}/merged1.txt
then
echo "***! Some problems occurred somewhere in creating sorted merged_nodups files."
exit 1
fi
else
if ! sort --parallel=40 -T ${tmpdir} -m -k2,2d -k6,6d ${merged_names} > ${outputdir}/merged1.txt
then
echo "***! Some problems occurred somewhere in creating sorted merged1 file."
exit 1
else
echo "(-: Finished sorting all merged files into a single merge."
fi
fi
touch $touchfile2
MRGSRT`
dependmerge1="#SBATCH -d afterok:$jid2"
jid22=`sbatch <<- MRGSRT2 | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t ${long_queue_time}
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/merge30-%j.out
#SBATCH -e $logdir/merge30-%j.err
#SBATCH -J "${groupname}_merge30"
#SBATCH -d "${dependtopstats}"
if [ ! -f "${touchfile1}" ]
then
echo "***! Top stats job failed, type \"scontrol show job $jid1\" to see what happened."
exit 1
fi
if [ $isRice -eq 1 ]
then
if ! ${juiceDir}/scripts/sort --parallel=48 -S8G -T ${tmpdir} -m -k2,2d -k6,6d ${merged_names30} > ${outputdir}/merged30.txt
then
echo "***! Some problems occurred somewhere in creating sorted merged files."
exit 1
fi
else
if ! sort --parallel=40 -T ${tmpdir} -m -k2,2d -k6,6d ${merged_names30} > ${outputdir}/merged30.txt
then
echo "***! Some problems occurred somewhere in creating sorted merged30 file."
exit 1
else
echo "(-: Finished sorting all merged files into a single merge."
rm -r ${tmpdir}
fi
fi
touch $touchfile2
MRGSRT2`
dependmerge2="#SBATCH -d afterok:$jid22"
else
touch $touchfile1
touch $touchfile2
fi
if [ -z $postproc ] && [ -z $early ]
then
# Create statistics files for MQ > 0
jid3=`sbatch <<- INTER1 | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t ${long_queue_time}
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/inter1-%j.out
#SBATCH -e $logdir/inter1-%j.err
#SBATCH -J "${groupname}_inter1"
#SBATCH --mem=10G
${dependmerge1}
if [ ! -f "${touchfile2}" ]
then
echo "***! Sort job failed."
exit 1
fi
$load_java
export IBM_JAVA_OPTIONS="-Xmx10000m -Xgcthreads1"
export _JAVA_OPTIONS="-Xms10000m -Xmx10000m"
if ${juiceDir}/scripts/juicer_tools statistics $site_file $outputdir/inter.txt $outputdir/merged1.txt $genomeID
then
touch $touchfile3
fi
INTER1`
dependinter1="afterok:$jid3"
# Create statistics files for MQ > 30
jid4=`sbatch <<- INTER30 | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t ${long_queue_time}
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/inter30-%j.out
#SBATCH -e $logdir/inter30-%j.err
#SBATCH -J "${groupname}_inter30"
#SBATCH --mem=10G
${dependmerge2}
if [ ! -f "${touchfile2}" ]
then
echo "***! Sort job failed."
exit 1
fi
$load_java
export IBM_JAVA_OPTIONS="-Xmx10000m -Xgcthreads1"
export _JAVA_OPTIONS="-Xms10000m -Xmx10000m"
if ${juiceDir}/scripts/juicer_tools statistics $site_file $outputdir/inter_30.txt $outputdir/merged30.txt $genomeID
then
touch $touchfile4
fi
INTER30`
dependinter30="afterok:$jid4"
if [ -z "$threadsHic" ]
then
threadsHic=1
threadHicString=""
threadHic30String=""
threadNormString=""
else
threadHicString="--threads $threadsHic -i ${outputdir}/merged1_index.txt -t ${outputdir}/HIC_tmp"
threadHic30String="--threads $threadsHic -i ${outputdir}/merged30_index.txt -t ${outputdir}/HIC30_tmp"
threadNormString="--threads $threadsHic"
fi
# Create HIC maps file for MQ >= 1
jid5=`sbatch <<- HIC1 | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t ${long_queue_time}
#SBATCH -c 8
#SBATCH --ntasks=1
#SBATCH -o $logdir/hic1-%j.out
#SBATCH -e $logdir/hic1-%j.err
#SBATCH -J "${groupname}_hic1"
#SBATCH -d "${dependinter1}"
#SBATCH --mem=150G
#source $usePath
$load_java
export IBM_JAVA_OPTIONS="-Xmx150000m -Xgcthreads1"
export _JAVA_OPTIONS="-Xms150000m -Xmx150000m"
if [ ! -f "${touchfile3}" ]
then
echo "***! Statistics q=1 job failed."
exit 1
fi
mkdir ${outputdir}"/HIC_tmp"
# multithreaded and index doesn't exist yet
if [[ $threadsHic -gt 1 ]] && [[ ! -s ${outputdir}/merged1_index.txt ]]
then
time ${juiceDir}/scripts/index_by_chr.awk ${outputdir}/merged1.txt 500000 > ${outputdir}/merged1_index.txt
fi
if [ "$exclude" -eq 1 ]
then
time ${juiceDir}/scripts/juicer_tools pre -n -s $outputdir/inter.txt -g $outputdir/inter_hists.m -q 1 -r 2500000,1000000,500000,250000,100000,50000,25000,10000,5000,2000,1000,500,200,100 $threadHicString $outputdir/merged1.txt $outputdir/inter.hic $genomeID
else
time ${juiceDir}/scripts/juicer_tools pre -n -f $site_file -s $outputdir/inter.txt -g $outputdir/inter_hists.m -q 1 -r 2500000,1000000,500000,250000,100000,50000,25000,10000,5000,2000,1000,500,200,100 $threadHicString $outputdir/merged1.txt $outputdir/inter.hic $genomeID
fi
time ${juiceDir}/scripts/juicer_tools addNorm $threadNormString ${outputdir}/inter.hic
rm -Rf ${outputdir}"/HIC_tmp"
HIC1`
dependhic1="afterok:$jid5"
# Create HIC maps file for MQ > 30
jid6=`sbatch <<- HIC30 | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t ${long_queue_time}
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/hic30-%j.out
#SBATCH -e $logdir/hic30-%j.err
#SBATCH -J "${groupname}_hic30"
#SBATCH -d "${dependinter30}"
#SBATCH --mem=150G
#source $usePath
$load_java
export IBM_JAVA_OPTIONS="-Xmx150000m -Xgcthreads1"
export _JAVA_OPTIONS="-Xms150000m -Xmx150000m"
if [ ! -f "${touchfile4}" ]
then
echo "***! Statistics q=30 job failed."
exit 1
fi
mkdir ${outputdir}"/HIC30_tmp"
# multithreaded and index doesn't exist yet
if [[ $threadsHic -gt 1 ]] && [[ ! -s ${outputdir}/merged30_index.txt ]]
then
time ${juiceDir}/scripts/index_by_chr.awk ${outputdir}/merged30.txt 500000 > ${outputdir}/merged30_index.txt
fi
if [ "$exclude" -eq 1 ]
then
time ${juiceDir}/scripts/juicer_tools pre -n -s $outputdir/inter_30.txt -g $outputdir/inter_30_hists.m -q 30 -r 2500000,1000000,500000,250000,100000,50000,25000,10000,5000,2000,1000,500,200,100 $threadHic30String $outputdir/merged30.txt $outputdir/inter_30.hic $genomeID
else
time ${juiceDir}/scripts/juicer_tools pre -n -f $site_file -s $outputdir/inter_30.txt -g $outputdir/inter_30_hists.m -q 30 -r 2500000,1000000,500000,250000,100000,50000,25000,10000,5000,2000,1000,500,200,100 $threadHic30String $outputdir/merged30.txt $outputdir/inter_30.hic $genomeID
fi
time ${juiceDir}/scripts/juicer_tools addNorm $threadNormString ${outputdir}/inter_30.hic
rm -Rf ${outputdir}"/HIC30_tmp"
HIC30`
dependhic30only="afterok:$jid6"
sbatchdepend="#SBATCH -d ${dependhic30only}"
dependhic30="${dependhic1}:$jid6"
else
touch $touchfile3 $touchfile4
sbatchdepend=""
fi
if [ -z $early ]
then
# Create loop lists file for MQ > 30
if [ $isRice -eq 1 ] || [ $isVoltron -eq 1 ]
then
if [ $isRice -eq 1 ]
then
sbatch_req="#SBATCH --gres=gpu:kepler:1"
fi
jid7=`sbatch <<- HICCUPS | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t 1440
#SBATCH -c 2
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=2G
#SBATCH -o $logdir/hiccups-%j.out
#SBATCH -e $logdir/hiccups-%j.err
#SBATCH -J "${groupname}_hiccups"
${sbatchdepend}
${sbatch_req}
$load_java
${load_gpu}
${juiceDir}/scripts/juicer_hiccups.sh -j ${juiceDir}/scripts/juicer_tools -i $outputdir/inter_30.hic -m ${juiceDir}/references/motif -g $genomeID
HICCUPS`
dependhic30="${dependhic30}:$jid7"
fi
# Create domain lists for MQ > 30
jid8=`sbatch <<- ARROWHEAD | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${long_queue}
#SBATCH -t ${long_queue_time}
#SBATCH -c 2
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=4G
#SBATCH -o $logdir/arrowhead-%j.out
#SBATCH -e $logdir/arrowhead-%j.err
#SBATCH -J "${groupname}_arrowhead"
${sbatchdepend}
$load_java
${juiceDir}/scripts/juicer_arrowhead.sh -j ${juiceDir}/scripts/juicer_tools -i $outputdir/inter_30.hic
ARROWHEAD`
dependhic30="${dependhic1}:$jid8"
# Final checks
jid9=`sbatch <<- FINAL | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${queue}
#SBATCH -t 100
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/done-%j.out
#SBATCH -e $logdir/done-%j.err
#SBATCH -J "${groupname}_done"
#SBATCH -d "${dependhic30}"
rm -r ${tmpdir}
rm $touchfile1 $touchfile2 $touchfile3 $touchfile4
if [ -s ${outputdir}/inter.hic ] && [ -s ${outputdir}/inter_30.hic ]
then
echo "(-: Successfully completed making mega maps. Done. :-)"
else
echo "!*** Error: one or both hic files are empty. Check debug directory for hic logs"
fi
if [ $isRice -eq 1 ]
then
echo $topDir, $site, $genomeID | mail -r [email protected] -s \"Mega Juicer pipeline finished successfully @ Rice\" -t [email protected];
fi
FINAL`
else
jid9=`sbatch <<- FINAL | egrep -o -e "\b[0-9]+$"
#!/bin/bash -l
#SBATCH -p ${queue}
#SBATCH -t 100
#SBATCH -c 1
#SBATCH --ntasks=1
#SBATCH -o $logdir/done-%j.out
#SBATCH -e $logdir/done-%j.err
#SBATCH -J "${groupname}_done"
#SBATCH -d "${dependmerge}"
rm -fr ${tmpdir}
rm -f $touchfile1 $touchfile2 $touchfile3 $touchfile4
echo "(-: Successfully completed making mega map. Done. :-)"
FINAL`
fi
echo "(-: Finished adding all jobs... please wait while processing."
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function hadoop_error
{
# NOTE: This function is not user replaceable.
echo "$*" 1>&2
}
function hadoop_debug
{
if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
echo "DEBUG: $*" 1>&2
fi
}
function hadoop_bootstrap_init
{
# NOTE: This function is not user replaceable.
# the root of the Hadoop installation
# See HADOOP-6255 for the expected directory structure layout
# By now, HADOOP_LIBEXEC_DIR should have been defined upstream
# We can piggyback off of that to figure out where the default
# HADOOP_FREFIX should be. This allows us to run without
# HADOOP_PREFIX ever being defined by a human! As a consequence
# HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
# env var within Hadoop.
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
hadoop_error "HADOOP_LIBEXEC_DIR is not defined. Exiting."
exit 1
fi
HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
export HADOOP_PREFIX
#
# short-cuts. vendors may redefine these as well, preferably
# in hadoop-layouts.sh
#
HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
# setup a default TOOL_PATH
TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
# defaults
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
}
function hadoop_find_confdir
{
# NOTE: This function is not user replaceable.
local conf_dir
# Look for the basic hadoop configuration area.
#
#
# An attempt at compatibility with some Hadoop 1.x
# installs.
if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
conf_dir="conf"
else
conf_dir="etc/hadoop"
fi
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
}
function hadoop_verify_confdir
{
# Check only log4j.properties by default.
# --loglevel does not work without logger settings in log4j.log4j.properties.
if [[ ! -f "${HADOOP_CONF_DIR}/log4j.properties" ]]; then
hadoop_error "WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete."
fi
}
function hadoop_exec_hadoopenv
{
# NOTE: This function is not user replaceable.
if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
export HADOOP_ENV_PROCESSED=true
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
fi
}
function hadoop_exec_userfuncs
{
# NOTE: This function is not user replaceable.
if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
. "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
fi
}
function hadoop_exec_hadooprc
{
# Read the user's settings. This provides for users to override
# and/or append hadoop-env.sh. It is not meant as a complete system override.
if [[ -f "${HOME}/.hadooprc" ]]; then
hadoop_debug "Applying the user's .hadooprc"
. "${HOME}/.hadooprc"
fi
}
function hadoop_basic_init
{
# Some of these are also set in hadoop-env.sh.
# we still set them here just in case hadoop-env.sh is
# broken in some way, set up defaults, etc.
#
# but it is important to note that if you update these
# you also need to update hadoop-env.sh as well!!!
# CLASSPATH initially contains $HADOOP_CONF_DIR
CLASSPATH="${HADOOP_CONF_DIR}"
hadoop_debug "Initial CLASSPATH=${HADOOP_CONF_DIR}"
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
export HADOOP_COMMON_HOME="${HADOOP_PREFIX}"
fi
# default policy file for service-level authorization
HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
# define HADOOP_HDFS_HOME
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
fi
# define HADOOP_YARN_HOME
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
fi
# define HADOOP_MAPRED_HOME
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
fi
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
HADOOP_NICENESS=${HADOOP_NICENESS:-0}
HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp}
HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}
HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA}
HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS:-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
}
function hadoop_populate_slaves_file()
{
# NOTE: This function is not user replaceable.
local slavesfile=$1
shift
if [[ -f "${slavesfile}" ]]; then
# shellcheck disable=2034
HADOOP_SLAVES="${slavesfile}"
elif [[ -f "${HADOOP_CONF_DIR}/${slavesfile}" ]]; then
# shellcheck disable=2034
HADOOP_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
# shellcheck disable=2034
YARN_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
else
hadoop_error "ERROR: Cannot find hosts file \"${slavesfile}\""
hadoop_exit_with_usage 1
fi
}
function hadoop_rotate_log
{
#
# log rotation (mainly used for .out files)
# Users are likely to replace this one for something
# that gzips or uses dates or who knows what.
#
# be aware that &1 and &2 might go through here
# so don't do anything too crazy...
#
local log=$1;
local num=${2:-5};
if [[ -f "${log}" ]]; then # rotate logs
while [[ ${num} -gt 1 ]]; do
#shellcheck disable=SC2086
let prev=${num}-1
if [[ -f "${log}.${prev}" ]]; then
mv "${log}.${prev}" "${log}.${num}"
fi
num=${prev}
done
mv "${log}" "${log}.${num}"
fi
}
function hadoop_actual_ssh
{
# we are passing this function to xargs
# should get hostname followed by rest of command line
local slave=$1
shift
# shellcheck disable=SC2086
ssh ${HADOOP_SSH_OPTS} ${slave} $"${@// /\\ }" 2>&1 | sed "s/^/$slave: /"
}
function hadoop_connect_to_hosts
{
# shellcheck disable=SC2124
local params="$@"
#
# ssh (or whatever) to a host
#
# User can specify hostnames or a file where the hostnames are (not both)
if [[ -n "${HADOOP_SLAVES}" && -n "${HADOOP_SLAVE_NAMES}" ]] ; then
hadoop_error "ERROR: Both HADOOP_SLAVES and HADOOP_SLAVE_NAME were defined. Aborting."
exit 1
fi
if [[ -n "${HADOOP_SLAVE_NAMES}" ]] ; then
SLAVE_NAMES=${HADOOP_SLAVE_NAMES}
else
SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
fi
# if pdsh is available, let's use it. otherwise default
# to a loop around ssh. (ugh)
if [[ -e '/usr/bin/pdsh' ]]; then
if [[ -z "${HADOOP_SLAVE_NAMES}" ]] ; then
# if we were given a file, just let pdsh deal with it.
# shellcheck disable=SC2086
PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
-f "${HADOOP_SSH_PARALLEL}" -w ^"${SLAVE_FILE}" $"${@// /\\ }" 2>&1
else
# no spaces allowed in the pdsh arg host list
# shellcheck disable=SC2086
SLAVE_NAMES=$(echo ${SLAVE_NAMES} | tr -s ' ' ,)
PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
-f "${HADOOP_SSH_PARALLEL}" -w "${SLAVE_NAMES}" $"${@// /\\ }" 2>&1
fi
else
if [[ -z "${SLAVE_NAMES}" ]]; then
SLAVE_NAMES=$(sed 's/#.*$//;/^$/d' "${SLAVE_FILE}")
fi
# quoting here gets tricky. it's easier to push it into a function
# so that we don't have to deal with it. However...
# xargs can't use a function so instead we'll export it out
# and force it into a subshell
# moral of the story: just use pdsh.
export -f hadoop_actual_ssh
export HADOOP_SSH_OPTS
# xargs is used with option -I to replace the placeholder in arguments
# list with each hostname read from stdin/pipe. But it consider one
# line as one argument while reading from stdin/pipe. So place each
# hostname in different lines while passing via pipe.
SLAVE_NAMES=$(echo "$SLAVE_NAMES" | tr ' ' '\n' )
echo "${SLAVE_NAMES}" | \
xargs -n 1 -P"${HADOOP_SSH_PARALLEL}" \
-I {} bash -c -- "hadoop_actual_ssh {} ${params}"
wait
fi
}
function hadoop_validate_classname
{
local class=$1
shift 1
if [[ ! ${class} =~ \. ]]; then
# assuming the arg is typo of command if it does not conatain ".".
# class belonging to no package is not allowed as a result.
hadoop_error "ERROR: ${class} is not COMMAND nor fully qualified CLASSNAME."
return 1
fi
return 0
}
function hadoop_add_param
{
#
# general param dedupe..
# $1 is what we are adding to
# $2 is the name of what we want to add (key)
# $3 is the key+value of what we're adding
#
# doing it this way allows us to support all sorts of
# different syntaxes, just so long as they are space
# delimited
#
if [[ ! ${!1} =~ $2 ]] ; then
# shellcheck disable=SC2086
eval $1="'${!1} $3'"
hadoop_debug "$1 accepted $3"
else
hadoop_debug "$1 declined $3"
fi
}
function hadoop_add_classpath
{
# two params:
# $1 = directory, file, wildcard, whatever to add
# $2 = before or after, which determines where in the
# classpath this object should go. default is after
# return 0 = success (added or duplicate)
# return 1 = failure (doesn't exist, whatever)
# However, with classpath (& JLP), we can do dedupe
# along with some sanity checking (e.g., missing directories)
# since we have a better idea of what is legal
#
# for wildcard at end, we can
# at least check the dir exists
if [[ $1 =~ ^.*\*$ ]]; then
local mp=$(dirname "$1")
if [[ ! -d "${mp}" ]]; then
hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
return 1
fi
# no wildcard in the middle, so check existence
# (doesn't matter *what* it is)
elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
hadoop_debug "Rejected CLASSPATH: $1 (does not exist)"
return 1
fi
if [[ -z "${CLASSPATH}" ]]; then
CLASSPATH=$1
hadoop_debug "Initial CLASSPATH=$1"
elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
if [[ "$2" = "before" ]]; then
CLASSPATH="$1:${CLASSPATH}"
hadoop_debug "Prepend CLASSPATH: $1"
else
CLASSPATH+=:$1
hadoop_debug "Append CLASSPATH: $1"
fi
else
hadoop_debug "Dupe CLASSPATH: $1"
fi
return 0
}
function hadoop_add_colonpath
{
# two params:
# $1 = directory, file, wildcard, whatever to add
# $2 = before or after, which determines where in the
# classpath this object should go
# return 0 = success
# return 1 = failure (duplicate)
# this is CLASSPATH, JLP, etc but with dedupe but no
# other checking
if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
if [[ -z "${!1}" ]]; then
# shellcheck disable=SC2086
eval $1="'$2'"
hadoop_debug "Initial colonpath($1): $2"
elif [[ "$3" = "before" ]]; then
# shellcheck disable=SC2086
eval $1="'$2:${!1}'"
hadoop_debug "Prepend colonpath($1): $2"
else
# shellcheck disable=SC2086
eval $1+="'$2'"
hadoop_debug "Append colonpath($1): $2"
fi
return 0
fi
hadoop_debug "Rejected colonpath($1): $2"
return 1
}
function hadoop_add_javalibpath
{
# specialized function for a common use case
hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2"
}
function hadoop_add_ldlibpath
{
# specialized function for a common use case
hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
# note that we export this
export LD_LIBRARY_PATH
}
function hadoop_add_to_classpath_common
{
#
# get all of the common jars+config in the path
#
# developers
if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
fi
if [[ -d "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}/webapps" ]]; then
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"
fi
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
}
function hadoop_add_to_classpath_hdfs
{
#
# get all of the hdfs jars+config in the path
#
# developers
if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
fi
# put hdfs in classpath if present
if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
fi
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
}
function hadoop_add_to_classpath_yarn
{
local i
#
# get all of the yarn jars+config in the path
#
# developers
if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
yarn-server/yarn-server-nodemanager \
yarn-server/yarn-server-common \
yarn-server/yarn-server-resourcemanager; do
hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
done
hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
fi
if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
fi
hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
}
function hadoop_add_to_classpath_mapred
{
#
# get all of the mapreduce jars+config in the path
#
# developers
if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
fi
if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
fi
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
}
function hadoop_add_to_classpath_userpath
{
# Add the user-specified HADOOP_CLASSPATH to the
# official CLASSPATH env var if HADOOP_USE_CLIENT_CLASSLOADER
# is not set.
# Add it first or last depending on if user has
# set env-var HADOOP_USER_CLASSPATH_FIRST
# we'll also dedupe it, because we're cool like that.
#
local c
local array
local i
local j
let c=0
if [[ -n "${HADOOP_CLASSPATH}" ]]; then
# I wonder if Java runs on VMS.
for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
array[$c]=$i
let c+=1
done
let j=c-1
if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
for ((i=j; i>=0; i--)); do
hadoop_add_classpath "${array[$i]}" before
done
else
for ((i=0; i<=j; i++)); do
hadoop_add_classpath "${array[$i]}" after
done
fi
fi
fi
}
function hadoop_os_tricks
{
local bindv6only
# Some OSes have special needs. Here's some out of the box examples for OS X,
# Linux and Windows on Cygwin.
# Vendors, replace this with your special sauce.
HADOOP_IS_CYGWIN=false
case ${HADOOP_OS_TYPE} in
Darwin)
if [[ -z "${JAVA_HOME}" ]]; then
if [[ -x /usr/libexec/java_home ]]; then
export JAVA_HOME="$(/usr/libexec/java_home)"
else
export JAVA_HOME=/Library/Java/Home
fi
fi
;;
Linux)
bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
# NOTE! HADOOP_ALLOW_IPV6 is a developer hook. We leave it
# undocumented in hadoop-env.sh because we don't want users to
# shoot themselves in the foot while devs make IPv6 work.
if [[ -n "${bindv6only}" ]] &&
[[ "${bindv6only}" -eq "1" ]] &&
[[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 "
hadoop_error "ERROR: Hadoop networking could be broken. Aborting."
hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
exit 1
fi
# Newer versions of glibc use an arena memory allocator that
# causes virtual # memory usage to explode. This interacts badly
# with the many threads that we use in Hadoop. Tune the variable
# down to prevent vmem explosion.
export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
;;
CYGWIN*)
# Flag that we're running on Cygwin to trigger path translation later.
HADOOP_IS_CYGWIN=true
;;
esac
}
function hadoop_java_setup
{
# Bail if we did not detect it
if [[ -z "${JAVA_HOME}" ]]; then
hadoop_error "ERROR: JAVA_HOME is not set and could not be found."
exit 1
fi
if [[ ! -d "${JAVA_HOME}" ]]; then
hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
exit 1
fi
JAVA="${JAVA_HOME}/bin/java"
if [[ ! -x "$JAVA" ]]; then
hadoop_error "ERROR: $JAVA is not executable."
exit 1
fi
}
function hadoop_finalize_libpaths
{
if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
hadoop_translate_cygwin_path JAVA_LIBRARY_PATH
hadoop_add_param HADOOP_OPTS java.library.path \
"-Djava.library.path=${JAVA_LIBRARY_PATH}"
export LD_LIBRARY_PATH
fi
}
function hadoop_finalize_hadoop_heap
{
if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then
if [[ "${HADOOP_HEAPSIZE_MAX}" =~ ^[0-9]+$ ]]; then
HADOOP_HEAPSIZE_MAX="${HADOOP_HEAPSIZE_MAX}m"
fi
hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE_MAX}"
fi
# backwards compatibility
if [[ -n "${HADOOP_HEAPSIZE}" ]]; then
if [[ "${HADOOP_HEAPSIZE}" =~ ^[0-9]+$ ]]; then
HADOOP_HEAPSIZE="${HADOOP_HEAPSIZE}m"
fi
hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE}"
fi
if [[ -n "${HADOOP_HEAPSIZE_MIN}" ]]; then
if [[ "${HADOOP_HEAPSIZE_MIN}" =~ ^[0-9]+$ ]]; then
HADOOP_HEAPSIZE_MIN="${HADOOP_HEAPSIZE_MIN}m"
fi
hadoop_add_param HADOOP_OPTS Xms "-Xms${HADOOP_HEAPSIZE_MIN}"
fi
}
# Accepts a variable name. If running on Cygwin, sets the variable value to the
# equivalent translated Windows path by running the cygpath utility. If the
# second argument is true, then the variable is treated as a path list.
function hadoop_translate_cygwin_path
{
if [[ "${HADOOP_IS_CYGWIN}" = "true" ]]; then
if [[ "$2" = "true" ]]; then
#shellcheck disable=SC2016
eval "$1"='$(cygpath -p -w "${!1}" 2>/dev/null)'
else
#shellcheck disable=SC2016
eval "$1"='$(cygpath -w "${!1}" 2>/dev/null)'
fi
fi
}
#
# fill in any last minute options that might not have been defined yet
#
function hadoop_finalize_hadoop_opts
{
hadoop_translate_cygwin_path HADOOP_LOG_DIR
hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
HADOOP_HOME=${HADOOP_PREFIX}
hadoop_translate_cygwin_path HADOOP_HOME
export HADOOP_HOME
hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}"
hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}"
hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
}
function hadoop_finalize_classpath
{
hadoop_add_classpath "${HADOOP_CONF_DIR}" before
# user classpath gets added at the last minute. this allows
# override of CONF dirs and more
hadoop_add_to_classpath_userpath
hadoop_translate_cygwin_path CLASSPATH true
}
function hadoop_finalize_catalina_opts
{
local prefix=${HADOOP_CATALINA_PREFIX}
hadoop_add_param CATALINA_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
hadoop_add_param CATALINA_OPTS java.library.path "-Djava.library.path=${JAVA_LIBRARY_PATH}"
fi
hadoop_add_param CATALINA_OPTS "${prefix}.home.dir" "-D${prefix}.home.dir=${HADOOP_PREFIX}"
hadoop_add_param CATALINA_OPTS "${prefix}.config.dir" "-D${prefix}.config.dir=${HADOOP_CATALINA_CONFIG}"
hadoop_add_param CATALINA_OPTS "${prefix}.log.dir" "-D${prefix}.log.dir=${HADOOP_CATALINA_LOG}"
hadoop_add_param CATALINA_OPTS "${prefix}.temp.dir" "-D${prefix}.temp.dir=${HADOOP_CATALINA_TEMP}"
hadoop_add_param CATALINA_OPTS "${prefix}.admin.port" "-D${prefix}.admin.port=${HADOOP_CATALINA_ADMIN_PORT}"
hadoop_add_param CATALINA_OPTS "${prefix}.http.port" "-D${prefix}.http.port=${HADOOP_CATALINA_HTTP_PORT}"
hadoop_add_param CATALINA_OPTS "${prefix}.max.threads" "-D${prefix}.max.threads=${HADOOP_CATALINA_MAX_THREADS}"
hadoop_add_param CATALINA_OPTS "${prefix}.ssl.keystore.file" "-D${prefix}.ssl.keystore.file=${HADOOP_CATALINA_SSL_KEYSTORE_FILE}"
}
function hadoop_finalize
{
# user classpath gets added at the last minute. this allows
# override of CONF dirs and more
hadoop_finalize_classpath
hadoop_finalize_libpaths
hadoop_finalize_hadoop_heap
hadoop_finalize_hadoop_opts
hadoop_translate_cygwin_path HADOOP_PREFIX
hadoop_translate_cygwin_path HADOOP_CONF_DIR
hadoop_translate_cygwin_path HADOOP_COMMON_HOME
hadoop_translate_cygwin_path HADOOP_HDFS_HOME
hadoop_translate_cygwin_path HADOOP_YARN_HOME
hadoop_translate_cygwin_path HADOOP_MAPRED_HOME
}
function hadoop_exit_with_usage
{
# NOTE: This function is not user replaceable.
local exitcode=$1
if [[ -z $exitcode ]]; then
exitcode=1
fi
if declare -F hadoop_usage >/dev/null ; then
hadoop_usage
elif [[ -x /usr/bin/cowsay ]]; then
/usr/bin/cowsay -f elephant "Sorry, no help available."
else
hadoop_error "Sorry, no help available."
fi
exit $exitcode
}
function hadoop_verify_secure_prereq
{
# if you are on an OS like Illumos that has functional roles
# and you are using pfexec, you'll probably want to change
# this.
# ${EUID} comes from the shell itself!
if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
exit 1
else
return 0
fi
}
function hadoop_setup_secure_service
{
# need a more complicated setup? replace me!
HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR}
HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
}
function hadoop_verify_piddir
{
if [[ -z "${HADOOP_PID_DIR}" ]]; then
hadoop_error "No pid directory defined."
exit 1
fi
if [[ ! -w "${HADOOP_PID_DIR}" ]] && [[ ! -d "${HADOOP_PID_DIR}" ]]; then
hadoop_error "WARNING: ${HADOOP_PID_DIR} does not exist. Creating."
mkdir -p "${HADOOP_PID_DIR}" > /dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to create ${HADOOP_PID_DIR}. Aborting."
exit 1
fi
fi
touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
exit 1
fi
rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
}
function hadoop_verify_logdir
{
if [[ -z "${HADOOP_LOG_DIR}" ]]; then
hadoop_error "No log directory defined."
exit 1
fi
if [[ ! -w "${HADOOP_LOG_DIR}" ]] && [[ ! -d "${HADOOP_LOG_DIR}" ]]; then
hadoop_error "WARNING: ${HADOOP_LOG_DIR} does not exist. Creating."
mkdir -p "${HADOOP_LOG_DIR}" > /dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to create ${HADOOP_LOG_DIR}. Aborting."
exit 1
fi
fi
touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
exit 1
fi
rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
}
function hadoop_status_daemon()
{
#
# LSB 4.1.0 compatible status command (1)
#
# 0 = program is running
# 1 = dead, but still a pid (2)
# 2 = (not used by us)
# 3 = not running
#
# 1 - this is not an endorsement of the LSB
#
# 2 - technically, the specification says /var/run/pid, so
# we should never return this value, but we're giving
# them the benefit of a doubt and returning 1 even if
# our pid is not in in /var/run .
#
local pidfile=$1
shift
local pid
if [[ -f "${pidfile}" ]]; then
pid=$(cat "${pidfile}")
if ps -p "${pid}" > /dev/null 2>&1; then
return 0
fi
return 1
fi
return 3
}
function hadoop_java_exec
{
# run a java command. this is used for
# non-daemons
local command=$1
local class=$2
shift 2
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
export CLASSPATH
#shellcheck disable=SC2086
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
}
function hadoop_start_daemon
{
# this is our non-privileged daemon starter
# that fires up a daemon in the *foreground*
# so complex! so wow! much java!
local command=$1
local class=$2
local pidfile=$3
shift 3
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
# this is for the non-daemon pid creation
#shellcheck disable=SC2086
echo $$ > "${pidfile}" 2>/dev/null
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot write ${command} pid ${pidfile}."
fi
export CLASSPATH
#shellcheck disable=SC2086
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
}
function hadoop_start_daemon_wrapper
{
# this is our non-privileged daemon start
# that fires up a daemon in the *background*
local daemonname=$1
local class=$2
local pidfile=$3
local outfile=$4
shift 4
local counter
hadoop_rotate_log "${outfile}"
hadoop_start_daemon "${daemonname}" \
"$class" \
"${pidfile}" \
"$@" >> "${outfile}" 2>&1 < /dev/null &
# we need to avoid a race condition here
# so let's wait for the fork to finish
# before overriding with the daemonized pid
(( counter=0 ))
while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
sleep 1
(( counter++ ))
done
# this is for daemon pid creation
#shellcheck disable=SC2086
echo $! > "${pidfile}" 2>/dev/null
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot write ${daemonname} pid ${pidfile}."
fi
# shellcheck disable=SC2086
renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
fi
# shellcheck disable=SC2086
disown %+ >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
fi
sleep 1
# capture the ulimit output
ulimit -a >> "${outfile}" 2>&1
# shellcheck disable=SC2086
if ! ps -p $! >/dev/null 2>&1; then
return 1
fi
return 0
}
function hadoop_start_secure_daemon
{
# this is used to launch a secure daemon in the *foreground*
#
local daemonname=$1
local class=$2
# pid file to create for our deamon
local daemonpidfile=$3
# where to send stdout. jsvc has bad habits so this *may* be &1
# which means you send it to stdout!
local daemonoutfile=$4
# where to send stderr. same thing, except &2 = stderr
local daemonerrfile=$5
local privpidfile=$6
shift 6
hadoop_rotate_log "${daemonoutfile}"
hadoop_rotate_log "${daemonerrfile}"
jsvc="${JSVC_HOME}/jsvc"
if [[ ! -f "${jsvc}" ]]; then
hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
hadoop_error "or privileged daemons. Please download and install jsvc from "
hadoop_error "http://archive.apache.org/dist/commons/daemon/binaries/ "
hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary."
exit 1
fi
# note that shellcheck will throw a
# bogus for-our-use-case 2086 here.
# it doesn't properly support multi-line situations
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
#shellcheck disable=SC2086
echo $$ > "${privpidfile}" 2>/dev/null
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot write ${daemonname} pid ${privpidfile}."
fi
exec "${jsvc}" \
"-Dproc_${daemonname}" \
-outfile "${daemonoutfile}" \
-errfile "${daemonerrfile}" \
-pidfile "${daemonpidfile}" \
-nodetach \
-user "${HADOOP_SECURE_USER}" \
-cp "${CLASSPATH}" \
${HADOOP_OPTS} \
"${class}" "$@"
}
function hadoop_start_secure_daemon_wrapper
{
# this wraps hadoop_start_secure_daemon to take care
# of the dirty work to launch a daemon in the background!
local daemonname=$1
local class=$2
# same rules as hadoop_start_secure_daemon except we
# have some additional parameters
local daemonpidfile=$3
local daemonoutfile=$4
# the pid file of the subprocess that spawned our
# secure launcher
local jsvcpidfile=$5
# the output of the subprocess that spawned our secure
# launcher
local jsvcoutfile=$6
local daemonerrfile=$7
shift 7
local counter
hadoop_rotate_log "${jsvcoutfile}"
hadoop_start_secure_daemon \
"${daemonname}" \
"${class}" \
"${daemonpidfile}" \
"${daemonoutfile}" \
"${daemonerrfile}" \
"${jsvcpidfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
# we need to avoid a race condition here
# so let's wait for the fork to finish
# before overriding with the daemonized pid
(( counter=0 ))
while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do
sleep 1
(( counter++ ))
done
# this is for the daemon pid creation
#shellcheck disable=SC2086
echo $! > "${jsvcpidfile}" 2>/dev/null
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot write ${daemonname} pid ${daemonpidfile}."
fi
sleep 1
#shellcheck disable=SC2086
renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
fi
if [[ -f "${daemonpidfile}" ]]; then
#shellcheck disable=SC2046
renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}" 2>/dev/null) >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot set priority of ${daemonname} process $(cat "${daemonpidfile}" 2>/dev/null)"
fi
fi
#shellcheck disable=SC2046
disown %+ >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
fi
# capture the ulimit output
su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
#shellcheck disable=SC2086
if ! ps -p $! >/dev/null 2>&1; then
return 1
fi
return 0
}
function hadoop_stop_daemon
{
local cmd=$1
local pidfile=$2
shift 2
local pid
if [[ -f "${pidfile}" ]]; then
pid=$(cat "$pidfile")
kill "${pid}" >/dev/null 2>&1
sleep "${HADOOP_STOP_TIMEOUT}"
if kill -0 "${pid}" > /dev/null 2>&1; then
hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9"
kill -9 "${pid}" >/dev/null 2>&1
fi
if ps -p "${pid}" > /dev/null 2>&1; then
hadoop_error "ERROR: Unable to kill ${pid}"
else
rm -f "${pidfile}" >/dev/null 2>&1
fi
fi
}
function hadoop_stop_secure_daemon
{
local command=$1
local daemonpidfile=$2
local privpidfile=$3
shift 3
local ret
hadoop_stop_daemon "${command}" "${daemonpidfile}"
ret=$?
rm -f "${daemonpidfile}" "${privpidfile}" 2>/dev/null
return ${ret}
}
function hadoop_daemon_handler
{
local daemonmode=$1
local daemonname=$2
local class=$3
local daemon_pidfile=$4
local daemon_outfile=$5
shift 5
case ${daemonmode} in
status)
hadoop_status_daemon "${daemon_pidfile}"
exit $?
;;
stop)
hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}"
exit $?
;;
##COMPAT -- older hadoops would also start daemons by default
start|default)
hadoop_verify_piddir
hadoop_verify_logdir
hadoop_status_daemon "${daemon_pidfile}"
if [[ $? == 0 ]]; then
hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first."
exit 1
else
# stale pid file, so just remove it and continue on
rm -f "${daemon_pidfile}" >/dev/null 2>&1
fi
##COMPAT - differenticate between --daemon start and nothing
# "nothing" shouldn't detach
if [[ "$daemonmode" = "default" ]]; then
hadoop_start_daemon "${daemonname}" "${class}" "${daemon_pidfile}" "$@"
else
hadoop_start_daemon_wrapper "${daemonname}" \
"${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@"
fi
;;
esac
}
function hadoop_secure_daemon_handler
{
local daemonmode=$1
local daemonname=$2
local classname=$3
local daemon_pidfile=$4
local daemon_outfile=$5
local priv_pidfile=$6
local priv_outfile=$7
local priv_errfile=$8
shift 8
case ${daemonmode} in
status)
hadoop_status_daemon "${daemon_pidfile}"
exit $?
;;
stop)
hadoop_stop_secure_daemon "${daemonname}" \
"${daemon_pidfile}" "${priv_pidfile}"
exit $?
;;
##COMPAT -- older hadoops would also start daemons by default
start|default)
hadoop_verify_piddir
hadoop_verify_logdir
hadoop_status_daemon "${daemon_pidfile}"
if [[ $? == 0 ]]; then
hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first."
exit 1
else
# stale pid file, so just remove it and continue on
rm -f "${daemon_pidfile}" >/dev/null 2>&1
fi
##COMPAT - differenticate between --daemon start and nothing
# "nothing" shouldn't detach
if [[ "${daemonmode}" = "default" ]]; then
hadoop_start_secure_daemon "${daemonname}" "${classname}" \
"${daemon_pidfile}" "${daemon_outfile}" \
"${priv_errfile}" "${priv_pidfile}" "$@"
else
hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \
"${daemon_pidfile}" "${daemon_outfile}" \
"${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@"
fi
;;
esac
}
function hadoop_verify_user
{
local command=$1
local uservar="HADOOP_${command}_USER"
if [[ -n ${!uservar} ]]; then
if [[ ${!uservar} != ${USER} ]]; then
hadoop_error "ERROR: ${command} can only be executed by ${!uservar}."
exit 1
fi
fi
}
function hadoop_do_classpath_subcommand
{
if [[ "$#" -gt 0 ]]; then
CLASS=org.apache.hadoop.util.Classpath
else
hadoop_finalize
echo "${CLASSPATH}"
exit 0
fi
}
|
#!/bin/bash -e
# Copyright 2019. IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IBM_POWERAI_VISION_LICENSE_ACCEPT=yes /opt/powerai-vision/bin/accept-powerai-vision-license.sh
/opt/powerai-vision/bin/powerai_vision_start.sh
|
#!/bin/bash
JAVA_HOME="/usr/local/opt/openjdk" exec "/usr/local/Cellar/hadoop/3.2.1_1/libexec/sbin/mr-jobhistory-daemon.sh" "$@"
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-shuffled-N-VB/7-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-shuffled-N-VB/7-1024+0+512-LMPI-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_within_sentences_low_pmi_first_two_thirds_full --eval_function last_element_eval |
APP_CREDENTIALS_FILE=~/.config/gcloud/application_default_credentials.json
PROJECT_ID=store-csv-file
ENTRY_POINT=moveCsvToHistoryFunction
TIMEOUT=540
FUNCTION_MEMORY=2048MB
TRIGGER_BUCKET_NAME=input-csv-storage-europe-west2
REGION=europe-west2
if [ ! -f "$APP_CREDENTIALS_FILE" ]; then
echo -e "Application default credentials ($APP_CREDENTIALS_FILE) don't exist, please finish the flow.\n"
gcloud auth application-default login
fi
rm -rf dist
yarn build
gcloud config set functions/region $REGION
gcloud config set project $PROJECT_ID
gcloud functions deploy $ENTRY_POINT \
--runtime=nodejs12 \
--memory=$FUNCTION_MEMORY \
--timeout=$TIMEOUT \
--trigger-resource=$TRIGGER_BUCKET_NAME \
--trigger-event=google.storage.object.finalize \
--region=$REGION \
--project=$PROJECT_ID
|
#!/bin/bash
python scripts/breeds/data_preprocessing.py && python scripts/breeds/build_model.py
|
#!/bin/bash
# Instructions!
# cd ~
# wget https://raw.githubusercontent.com/adafruit/Raspberry-Pi-Installer-Scripts/master/adafruit-pitft.sh
# chmod +x adafruit-pitft.sh
# sudo ./adafruit-pitft.sh
if [ $(id -u) -ne 0 ]; then
echo "Installer must be run as root."
echo "Try 'sudo bash $0'"
exit 1
fi
UPDATE_DB=false
############################ CALIBRATIONS ############################
# For TSLib
POINTERCAL_28r0="4232 11 -879396 1 5786 -752768 65536"
POINTERCAL_28r90="33 -5782 21364572 4221 35 -1006432 65536"
POINTERCAL_28r180="-4273 61 16441290 4 -5772 21627524 65536"
POINTERCAL_28r270="-9 5786 -784608 -4302 19 16620508 65536"
POINTERCAL_35r0="5724 -6 -1330074 26 8427 -1034528 65536"
POINTERCAL_35r90="5 8425 -978304 -5747 61 22119468 65536"
POINTERCAL_35r180="-5682 -1 22069150 13 -8452 32437698 65536"
POINTERCAL_35r270="3 -8466 32440206 5703 -1 -1308696 65536"
POINTERCAL_28c="320 65536 0 -65536 0 15728640 65536"
# for PIXEL desktop
TRANSFORM_28r0="0.988809 -0.023645 0.060523 -0.028817 1.003935 0.034176 0 0 1"
TRANSFORM_28r90="0.014773 -1.132874 1.033662 1.118701 0.009656 -0.065273 0 0 1"
TRANSFORM_28r180="-1.115235 -0.010589 1.057967 -0.005964 -1.107968 1.025780 0 0 1"
TRANSFORM_28r270="-0.033192 1.126869 -0.014114 -1.115846 0.006580 1.050030 0 0 1"
TRANSFORM_35r0="-1.098388 0.003455 1.052099 0.005512 -1.093095 1.026309 0 0 1"
TRANSFORM_35r90="-0.000087 1.094214 -0.028826 -1.091711 -0.004364 1.057821 0 0 1"
TRANSFORM_35r180="1.102807 0.000030 -0.066352 0.001374 1.085417 -0.027208 0 0 1"
TRANSFORM_35r270="0.003893 -1.087542 1.025913 1.084281 0.008762 -0.060700 0 0 1"
TRANSFORM_28c0="-1 0 1 0 -1 1 0 0 1"
TRANSFORM_28c90="0 1 0 -1 0 1 0 0 1"
TRANSFORM_28c180="1 0 0 0 1 0 0 0 1"
TRANSFORM_28c270="0 -1 1 1 0 0 0 0 1"
warning() {
echo WARNING : $1
}
############################ Script assisters ############################
# Given a list of strings representing options, display each option
# preceded by a number (1 to N), display a prompt, check input until
# a valid number within the selection range is entered.
selectN() {
for ((i=1; i<=$#; i++)); do
echo $i. ${!i}
done
echo
REPLY=""
while :
do
echo -n "SELECT 1-$#: "
read
if [[ $REPLY -ge 1 ]] && [[ $REPLY -le $# ]]; then
return $REPLY
fi
done
}
function print_version() {
echo "Adafruit PiTFT Helper v2.0.0"
exit 1
}
function print_help() {
echo "Usage: $0 "
echo " -h Print this help"
echo " -v Print version information"
echo " -u [homedir] Specify path of primary user's home directory (defaults to /home/pi)"
exit 1
}
group=ADAFRUIT
function info() {
system="$1"
group="${system}"
shift
FG="1;32m"
BG="40m"
echo -e "[\033[${FG}\033[${BG}${system}\033[0m] $*"
}
function bail() {
FG="1;31m"
BG="40m"
echo -en "[\033[${FG}\033[${BG}${group}\033[0m] "
if [ -z "$1" ]; then
echo "Exiting due to error"
else
echo "Exiting due to error: $*"
fi
exit 1
}
function ask() {
# http://djm.me/ask
while true; do
if [ "${2:-}" = "Y" ]; then
prompt="Y/n"
default=Y
elif [ "${2:-}" = "N" ]; then
prompt="y/N"
default=N
else
prompt="y/n"
default=
fi
# Ask the question
read -p "$1 [$prompt] " REPLY
# Default?
if [ -z "$REPLY" ]; then
REPLY=$default
fi
# Check if the reply is valid
case "$REPLY" in
Y*|y*) return 0 ;;
N*|n*) return 1 ;;
esac
done
}
function has_repo() {
# Checks for the right raspbian repository
# http://mirrordirector.raspbian.org/raspbian/ stretch main contrib non-free rpi firmware
if [[ $(grep -h ^deb /etc/apt/sources.list /etc/apt/sources.list.d/* | grep "mirrordirector.raspbian.org") ]]; then
return 0
else
return 1
fi
}
progress() {
count=0
until [ $count -eq $1 ]; do
echo -n "..." && sleep 1
((count++))
done
echo
}
sysupdate() {
if ! $UPDATE_DB; then
# echo "Checking for correct software repositories..."
# has_repo || { warning "Missing Apt repo, please add deb http://mirrordirector.raspbian.org/raspbian/ stretch main contrib non-free rpi firmware to /etc/apt/sources.list.d/raspi.list" && exit 1; }
echo "Updating apt indexes..." && progress 3 &
sudo apt-get update 1> /dev/null || { warning "Apt failed to update indexes!" && exit 1; }
echo "Reading package lists..."
progress 3 && UPDATE_DB=true
fi
}
# Given a filename, a regex pattern to match and a replacement string,
# perform replacement if found, else append replacement to end of file.
# (# $1 = filename, $2 = pattern to match, $3 = replacement)
reconfig() {
grep $2 $1 >/dev/null
if [ $? -eq 0 ]; then
# Pattern found; replace in file
sed -i "s/$2/$3/g" $1 >/dev/null
else
# Not found; append (silently)
echo $3 | sudo tee -a $1 >/dev/null
fi
}
############################ Sub-Scripts ############################
function softwareinstall() {
echo "Installing Pre-requisite Software...This may take a few minutes!"
apt-get install -y bc fbi git python-dev python-pip python-smbus python-spidev evtest tslib libts-bin 1> /dev/null || { warning "Apt failed to install software!" && exit 1; }
pip install evdev 1> /dev/null || { warning "Pip failed to install software!" && exit 1; }
}
# update /boot/config.txt with appropriate values
function update_configtxt() {
if grep -q "adafruit-pitft-helper" "/boot/config.txt"; then
echo "Already have an adafruit-pitft-helper section in /boot/config.txt."
echo "Removing old section..."
cp /boot/config.txt /boot/configtxt.bak
sed -i -e "/^# --- added by adafruit-pitft-helper/,/^# --- end adafruit-pitft-helper/d" /boot/config.txt
fi
if [ "${pitfttype}" == "22" ]; then
overlay="dtoverlay=pitft22,rotate=${pitftrot},speed=64000000,fps=30"
fi
if [ "${pitfttype}" == "28r" ]; then
overlay="dtoverlay=pitft28-resistive,rotate=${pitftrot},speed=64000000,fps=30"
fi
if [ "${pitfttype}" == "28c" ]; then
overlay="dtoverlay=pitft28-capacitive,rotate=${pitftrot},speed=64000000,fps=30"
fi
if [ "${pitfttype}" == "35r" ]; then
overlay="dtoverlay=pitft35-resistive,rotate=${pitftrot},speed=20000000,fps=20"
fi
date=`date`
cat >> /boot/config.txt <<EOF
# --- added by adafruit-pitft-helper $date ---
dtparam=spi=on
dtparam=i2c1=on
dtparam=i2c_arm=on
$overlay
# --- end adafruit-pitft-helper $date ---
EOF
}
function update_udev() {
cat > /etc/udev/rules.d/95-touchmouse.rules <<EOF
SUBSYSTEM=="input", ATTRS{name}=="touchmouse", ENV{DEVNAME}=="*event*", SYMLINK+="input/touchscreen"
EOF
cat > /etc/udev/rules.d/95-ftcaptouch.rules <<EOF
SUBSYSTEM=="input", ATTRS{name}=="EP0110M09", ENV{DEVNAME}=="*event*", SYMLINK+="input/touchscreen"
EOF
cat > /etc/udev/rules.d/95-stmpe.rules <<EOF
SUBSYSTEM=="input", ATTRS{name}=="*stmpe*", ENV{DEVNAME}=="*event*", SYMLINK+="input/touchscreen"
EOF
}
function update_pointercal() {
if [ "${pitfttype}" == "28r" ] || [ "${pitfttype}" == "35r" ]; then
echo $(eval echo "\$POINTERCAL_$pitfttype$pitftrot") > /etc/pointercal
fi
if [ "${pitfttype}" == "28c" ]; then
echo $(eval echo "\$POINTERCAL_$pitfttype") > /etc/pointercal
fi
}
function install_console() {
echo "Set up main console turn on"
if ! grep -q 'fbcon=map:10 fbcon=font:VGA8x8' /boot/cmdline.txt; then
echo "Updating /boot/cmdline.txt"
sed -i 's/rootwait/rootwait fbcon=map:10 fbcon=font:VGA8x8/g' "/boot/cmdline.txt"
else
echo "/boot/cmdline.txt already updated"
fi
echo "Turning off console blanking"
# pre-stretch this is what you'd do:
if [ -e /etc/kbd/config ]; then
sed -i 's/BLANK_TIME=.*/BLANK_TIME=0/g' "/etc/kbd/config"
fi
# as of stretch....
# removing any old version
sed -i -e '/^# disable console blanking.*/d' /etc/rc.local
sed -i -e '/^sudo sh -c "TERM=linux setterm -blank.*/d' /etc/rc.local
sed -i -e "s|^exit 0|# disable console blanking on PiTFT\\nsudo sh -c \"TERM=linux setterm -blank 0 >/dev/tty0\"\\nexit 0|" /etc/rc.local
reconfig /etc/default/console-setup "^.*FONTFACE.*$" "FONTFACE=\"Terminus\""
reconfig /etc/default/console-setup "^.*FONTSIZE.*$" "FONTSIZE=\"6x12\""
echo "Setting raspi-config to boot to console w/o login..."
(cd ~pi && raspi-config nonint do_boot_behaviour B2)
# remove fbcp
sed -i -e "/^.*fbcp.*$/d" /etc/rc.local
}
function uninstall_console() {
echo "Removing console fbcon map from /boot/cmdline.txt"
sed -i 's/rootwait fbcon=map:10 fbcon=font:VGA8x8/rootwait/g' "/boot/cmdline.txt"
echo "Screen blanking time reset to 10 minutes"
if [ -e "/etc/kbd/config" ]; then
sed -i 's/BLANK_TIME=0/BLANK_TIME=10/g' "/etc/kbd/config"
fi
sed -i -e '/^# disable console blanking.*/d' /etc/rc.local
sed -i -e '/^sudo sh -c "TERM=linux.*/d' /etc/rc.local
}
function install_fbcp() {
echo "Installing cmake..."
apt-get --yes --force-yes install cmake 1> /dev/null || { warning "Apt failed to install software!" && exit 1; }
echo "Downloading rpi-fbcp..."
cd /tmp
#curl -sLO https://github.com/tasanakorn/rpi-fbcp/archive/master.zip
curl -sLO https://github.com/adafruit/rpi-fbcp/archive/master.zip
echo "Uncompressing rpi-fbcp..."
rm -rf /tmp/rpi-fbcp-master
unzip master.zip 1> /dev/null || { warning "Failed to uncompress fbcp!" && exit 1; }
cd rpi-fbcp-master
mkdir build
cd build
echo "Building rpi-fbcp..."
echo -e "\nset (CMAKE_C_FLAGS \"-std=gnu99 ${CMAKE_C_FLAGS}\")" >> ../CMakeLists.txt
cmake .. 1> /dev/null || { warning "Failed to cmake fbcp!" && exit 1; }
make 1> /dev/null || { warning "Failed to make fbcp!" && exit 1; }
echo "Installing rpi-fbcp..."
install fbcp /usr/local/bin/fbcp
cd ~
rm -rf /tmp/rpi-fbcp-master
# Start fbcp in the appropriate place, depending on init system:
if [ "$SYSTEMD" == "0" ]; then
# Add fbcp to /etc/rc.local:
echo "We have sysvinit, so add fbcp to /etc/rc.local..."
grep fbcp /etc/rc.local >/dev/null
if [ $? -eq 0 ]; then
# fbcp already in rc.local, but make sure correct:
sed -i "s|^.*fbcp.*$|/usr/local/bin/fbcp \&|g" /etc/rc.local >/dev/null
else
# Insert fbcp into rc.local before final 'exit 0':
sed -i "s|^exit 0|/usr/local/bin/fbcp \&\\nexit 0|g" /etc/rc.local >/dev/null
fi
else
# Install fbcp systemd unit, first making sure it's not in rc.local:
uninstall_fbcp_rclocal
echo "We have systemd, so install fbcp systemd unit..."
install_fbcp_unit || bail "Unable to install fbcp unit file"
sudo systemctl enable fbcp.service
fi
# if there's X11 installed...
if [ -e /etc/lightdm ]; then
echo "Setting raspi-config to boot to desktop w/o login..."
raspi-config nonint do_boot_behaviour B4
fi
# Disable overscan compensation (use full screen):
raspi-config nonint do_overscan 1
# Set up HDMI parameters:
echo "Configuring boot/config.txt for forced HDMI"
reconfig /boot/config.txt "^.*hdmi_force_hotplug.*$" "hdmi_force_hotplug=1"
reconfig /boot/config.txt "^.*hdmi_group.*$" "hdmi_group=2"
reconfig /boot/config.txt "^.*hdmi_mode.*$" "hdmi_mode=87"
# if there's X11 installed...
if [ -e /etc/lightdm ]; then
if [ "${pitfttype}" == "35r" ]; then
echo "Using x1.5 resolution"
SCALE=1.5
else
echo "Using x2 resolution"
SCALE=2.0
fi
else
echo "Using native resolution"
SCALE=1
fi
WIDTH=`python -c "print(int(${WIDTH_VALUES[PITFT_SELECT-1]} * ${SCALE}))"`
HEIGHT=`python -c "print(int(${HEIGHT_VALUES[PITFT_SELECT-1]} * ${SCALE}))"`
reconfig /boot/config.txt "^.*hdmi_cvt.*$" "hdmi_cvt=${WIDTH} ${HEIGHT} 60 1 0 0 0"
if [ "${pitftrot}" == "90" ] || [ "${pitftrot}" == "270" ]; then
# dont rotate HDMI on 90 or 270
reconfig /boot/config.txt "^.*display_hdmi_rotate.*$" ""
fi
if [ "${pitftrot}" == "0" ]; then
reconfig /boot/config.txt "^.*display_hdmi_rotate.*$" "display_hdmi_rotate=1"
# this is a hack but because we rotate HDMI we have to 'unrotate' the TFT!
pitftrot=90
update_configtxt || bail "Unable to update /boot/config.txt"
pitftrot=0
fi
if [ "${pitftrot}" == "180" ]; then
reconfig /boot/config.txt "^.*display_hdmi_rotate.*$" "display_hdmi_rotate=3"
# this is a hack but because we rotate HDMI we have to 'unrotate' the TFT!
pitftrot=90
update_configtxt || bail "Unable to update /boot/config.txt"
pitftrot=180
fi
}
function install_fbcp_unit() {
cat > /etc/systemd/system/fbcp.service <<EOF
[Unit]
Description=Framebuffer copy utility for PiTFT
After=network.target
[Service]
Type=simple
ExecStart=/usr/local/bin/fbcp
[Install]
WantedBy=multi-user.target
EOF
}
function uninstall_fbcp() {
uninstall_fbcp_rclocal
# Enable overscan compensation
raspi-config nonint do_overscan 0
# Set up HDMI parameters:
echo "Configuring boot/config.txt for default HDMI"
reconfig /boot/config.txt "^.*hdmi_force_hotplug.*$" "hdmi_force_hotplug=0"
sed -i -e '/^hdmi_group=2.*$/d' /boot/config.txt
sed -i -e '/^hdmi_mode=87.*$/d' /boot/config.txt
sed -i -e '/^hdmi_cvt=.*$/d' /boot/config.txt
}
function uninstall_fbcp_rclocal() {
# Remove fbcp from /etc/rc.local:
echo "Remove fbcp from /etc/rc.local, if it's there..."
sed -i -e '/^.*fbcp.*$/d' /etc/rc.local
}
function update_xorg() {
if [ "${pitfttype}" == "28r" ] || [ "${pitfttype}" == "35r" ]; then
matrix=$(eval echo "\$TRANSFORM_$pitfttype$pitftrot")
transform="Option \"TransformationMatrix\" \"${matrix}\""
cat > /usr/share/X11/xorg.conf.d/20-calibration.conf <<EOF
Section "InputClass"
Identifier "STMPE Touchscreen Calibration"
MatchProduct "stmpe"
MatchDevicePath "/dev/input/event*"
Driver "libinput"
${transform}
EndSection
EOF
fi
if [ "${pitfttype}" == "28c" ]; then
matrix=$(eval echo "\$TRANSFORM_$pitfttype$pitftrot")
transform="Option \"TransformationMatrix\" \"${matrix}\""
cat > /usr/share/X11/xorg.conf.d/20-calibration.conf <<EOF
Section "InputClass"
Identifier "FocalTech Touchscreen Calibration"
MatchProduct "EP0110M09"
MatchDevicePath "/dev/input/event*"
Driver "libinput"
${transform}
EndSection
EOF
fi
}
####################################################### MAIN
target_homedir="/home/pi"
clear
echo "This script downloads and installs"
echo "PiTFT Support using userspace touch"
echo "controls and a DTO for display drawing."
echo "one of several configuration files."
echo "Run time of up to 5 minutes. Reboot required!"
echo
echo "Select configuration:"
selectN "PiTFT 2.4\", 2.8\" or 3.2\" resistive (240x320)" \
"PiTFT 2.2\" no touch (240x320)" \
"PiTFT 2.8\" capacitive touch (240x320)" \
"PiTFT 3.5\" resistive touch (320x480)" \
"Quit without installing"
PITFT_SELECT=$?
if [ $PITFT_SELECT -gt 4 ]; then
exit 1
fi
echo "Select rotation:"
selectN "90 degrees (landscape)" \
"180 degrees (portait)" \
"270 degrees (landscape)" \
"0 degrees (portait)"
PITFT_ROTATE=$?
if [ $PITFT_ROTATE -gt 4 ]; then
exit 1
fi
PITFT_ROTATIONS=("90" "180" "270" "0")
PITFT_TYPES=("28r" "22" "28c" "35r")
WIDTH_VALUES=(320 320 320 480)
HEIGHT_VALUES=(240 240 240 320)
HZ_VALUES=(64000000 64000000 64000000 32000000)
args=$(getopt -uo 'hvri:o:b:u:' -- $*)
[ $? != 0 ] && print_help
set -- $args
for i
do
case "$i"
in
-h)
print_help
;;
-v)
print_version
;;
-u)
target_homedir="$2"
echo "Homedir = ${2}"
shift
shift
;;
esac
done
# check init system (technique borrowed from raspi-config):
info PITFT 'Checking init system...'
if command -v systemctl > /dev/null && systemctl | grep -q '\-\.mount'; then
echo "Found systemd"
SYSTEMD=1
elif [ -f /etc/init.d/cron ] && [ ! -h /etc/init.d/cron ]; then
echo "Found sysvinit"
SYSTEMD=0
else
bail "Unrecognised init system"
fi
if grep -q boot /proc/mounts; then
echo "/boot is mounted"
else
echo "/boot must be mounted. if you think it's not, quit here and try: sudo mount /dev/mmcblk0p1 /boot"
if ask "Continue?"; then
echo "Proceeding."
else
bail "Aborting."
fi
fi
if [[ ! -e "$target_homedir" || ! -d "$target_homedir" ]]; then
bail "$target_homedir must be an existing directory (use -u /home/foo to specify)"
fi
pitfttype=${PITFT_TYPES[$PITFT_SELECT-1]}
pitftrot=${PITFT_ROTATIONS[$PITFT_ROTATE-1]}
if [ "${pitfttype}" != "28r" ] && [ "${pitfttype}" != "28c" ] && [ "${pitfttype}" != "35r" ] && [ "${pitfttype}" != "22" ]; then
echo "Type must be one of:"
echo " '28r' (2.8\" resistive, PID 1601)"
echo " '28c' (2.8\" capacitive, PID 1983)"
echo " '35r' (3.5\" Resistive)"
echo " '22' (2.2\" no touch)"
echo
print_help
fi
info PITFT "System update"
sysupdate || bail "Unable to apt-get update"
info PITFT "Installing Python libraries & Software..."
softwareinstall || bail "Unable to install software"
info PITFT "Updating /boot/config.txt..."
update_configtxt || bail "Unable to update /boot/config.txt"
if [ "${pitfttype}" == "28r" ] || [ "${pitfttype}" == "35r" ] || [ "${pitfttype}" == "28c" ] ; then
info PITFT "Updating SysFS rules for Touchscreen..."
update_udev || bail "Unable to update /etc/udev/rules.d"
info PITFT "Updating TSLib default calibration..."
update_pointercal || bail "Unable to update /etc/pointercal"
fi
# ask for console access
if ask "Would you like the console to appear on the PiTFT display?"; then
info PITFT "Updating console to PiTFT..."
uninstall_fbcp || bail "Unable to uninstall fbcp"
install_console || bail "Unable to configure console"
else
info PITFT "Making sure console doesn't use PiTFT"
uninstall_console || bail "Unable to configure console"
if ask "Would you like the HDMI display to mirror to the PiTFT display?"; then
info PITFT "Adding FBCP support..."
install_fbcp || bail "Unable to configure fbcp"
if [ -e /etc/lightdm ]; then
info PITFT "Updating X11 default calibration..."
update_xorg || bail "Unable to update calibration"
fi
fi
fi
#info PITFT "Updating X11 setup tweaks..."
#update_x11profile || bail "Unable to update X11 setup"
#if [ "${pitfttype}" != "35r" ]; then
# # ask for 'on/off' button
# if ask "Would you like GPIO #23 to act as a on/off button?"; then
# info PITFT "Adding GPIO #23 on/off to PiTFT..."
# install_onoffbutton || bail "Unable to add on/off button"
# fi
#fi
# update_bootprefs || bail "Unable to set boot preferences"
info PITFT "Success!"
echo
echo "Settings take effect on next boot."
echo
echo -n "REBOOT NOW? [y/N] "
read
if [[ ! "$REPLY" =~ ^(yes|y|Y)$ ]]; then
echo "Exiting without reboot."
exit 0
fi
echo "Reboot started..."
reboot
exit 0
|
#!/bin/bash
docker-compose up # Démarrage du conteneur
|
#!/bin/bash
# Set bash to 'debug' mode, it will exit on :
# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
set -e
set -u
set -o pipefail
function xrun () {
set -x
$@
set +x
}
script_dir=$(cd $(dirname ${BASH_SOURCE:-$0}); pwd)
NNSVS_ROOT=$script_dir/../../../
NNSVS_COMMON_ROOT=$NNSVS_ROOT/egs/_common/spsvs
. $NNSVS_ROOT/utils/yaml_parser.sh || exit 1;
eval $(parse_yaml "./config.yaml" "")
train_set="train_no_dev"
dev_set="dev"
eval_set="eval"
datasets=($train_set $dev_set $eval_set)
testsets=($eval_set)
dumpdir=dump
dump_org_dir=$dumpdir/$spk/org
dump_norm_dir=$dumpdir/$spk/norm
stage=0
stop_stage=0
. $NNSVS_ROOT/utils/parse_options.sh || exit 1;
# exp name
if [ -z ${tag:=} ]; then
expname=${spk}
else
expname=${spk}_${tag}
fi
expdir=exp/$expname
if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
if [ ! -e downloads/HTS-demo_NIT-SONG070-F001 ]; then
echo "stage -1: Downloading data"
mkdir -p downloads
cd downloads
curl -LO http://hts.sp.nitech.ac.jp/archives/2.3/HTS-demo_NIT-SONG070-F001.tar.bz2
tar jxvf HTS-demo_NIT-SONG070-F001.tar.bz2
cd $script_dir
fi
fi
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
echo "stage 0: Data preparation"
# the following three directories will be created
# 1) data/timelag 2) data/duration 3) data/acoustic
python local/data_prep.py $db_root $out_dir --gain-normalize
# Pitch data augmentation (in cent)
for cent in -100 100
do
# timelag
for typ in label_phone_align label_phone_score
do
python $NNSVS_COMMON_ROOT/../pitch_augmentation.py $out_dir/timelag/$typ $out_dir/timelag/$typ \
$cent --filter_augmented_files
done
# duration
for typ in label_phone_align
do
python $NNSVS_COMMON_ROOT/../pitch_augmentation.py $out_dir/duration/$typ $out_dir/duration/$typ \
$cent --filter_augmented_files
done
# acoustic
for typ in wav label_phone_align label_phone_score
do
python $NNSVS_COMMON_ROOT/../pitch_augmentation.py $out_dir/acoustic/$typ $out_dir/acoustic/$typ \
$cent --filter_augmented_files
done
done
# Tempo data augmentation
for tempo in 0.9 1.1
do
# timelag
for typ in label_phone_align label_phone_score
do
python $NNSVS_COMMON_ROOT/../tempo_augmentation.py $out_dir/timelag/$typ $out_dir/timelag/$typ \
$tempo --filter_augmented_files
done
# duration
for typ in label_phone_align
do
python $NNSVS_COMMON_ROOT/../tempo_augmentation.py $out_dir/duration/$typ $out_dir/duration/$typ \
$tempo --filter_augmented_files
done
# acoustic
for typ in wav label_phone_align label_phone_score
do
python $NNSVS_COMMON_ROOT/../tempo_augmentation.py $out_dir/acoustic/$typ $out_dir/acoustic/$typ \
$tempo --filter_augmented_files
done
done
echo "train/dev/eval split"
mkdir -p data/list
find data/acoustic/ -type f -name "*.wav" -exec basename {} .wav \; \
| sort > data/list/utt_list.txt
grep _003 data/list/utt_list.txt > data/list/$eval_set.list
grep _004 data/list/utt_list.txt > data/list/$dev_set.list
grep -v _003 data/list/utt_list.txt | grep -v _004 > data/list/$train_set.list
fi
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
echo "stage 1: Feature generation"
. $NNSVS_COMMON_ROOT/feature_generation.sh
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
echo "stage 2: Training time-lag model"
. $NNSVS_COMMON_ROOT/train_timelag.sh
fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
echo "stage 3: Training duration model"
. $NNSVS_COMMON_ROOT/train_duration.sh
fi
if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
echo "stage 4: Training acoustic model"
. $NNSVS_COMMON_ROOT/train_acoustic.sh
fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
echo "stage 5: Generate features from timelag/duration/acoustic models"
. $NNSVS_COMMON_ROOT/generate.sh
fi
if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
echo "stage 6: Synthesis waveforms"
. $NNSVS_COMMON_ROOT/synthesis.sh
fi
|
package set summary "High performance key/value database"
package set webpage "https://www.oracle.com/database/berkeley-db"
package set src.url "https://fossies.org/linux/misc/db-18.1.40.tar.gz"
package set src.sum "0cecb2ef0c67b166de93732769abdeba0555086d51de1090df325e18ee8da9c8"
package set license "BSD 3-Clause"
package set bsystem "configure"
package set dep.pkg "openssl"
package set sourced "dist"
prepare() {
sed_in_place 's| install_docs||' Makefile.in
}
build() {
configure \
--enable-cxx \
--enable-stl \
--enable-dbm \
--enable-hash \
--enable-compat185 \
--enable-smallbuild \
db_cv_atomic=gcc-builtin
}
|
#!/bin/sh
set -x
export SIMPLE_SETTINGS=settings
$WORKER worker --web-port=$WORKER_PORT
|
#!/bin/bash
bold=$(tput bold)
normal=$(tput sgr0)
screen_minor=$(screen --version | cut -d . -f 2)
if [ "$screen_minor" -gt 5 ]; then
screen_with_log="sudo screen -L -Logfile"
elif [ "$screen_minor" -eq 5 ]; then
screen_with_log="sudo screen -L"
else
screen_with_log="sudo screen -L -t"
fi
. ./config.txt
./stop_flash.sh >/dev/null
pushd scripts >/dev/null || exit
. ./setup_checks.sh
echo "======================================================"
echo -n " Starting AP in a screen"
$screen_with_log smarthack-wifi.log -S smarthack-wifi -m -d ./setup_ap.sh
while ! ping -c 1 -W 1 -n "$GATEWAY" &> /dev/null; do
printf .
done
echo
sleep 5
echo " Starting web server in a screen"
$screen_with_log smarthack-web.log -S smarthack-web -m -d ./fake-registration-server.py
echo " Starting Mosquitto in a screen"
$screen_with_log smarthack-mqtt.log -S smarthack-mqtt -m -d mosquitto -v
echo " Starting PSK frontend in a screen"
$screen_with_log smarthack-psk.log -S smarthack-psk -m -d ./psk-frontend.py -v
echo " Starting Tuya Discovery in a screen"
$screen_with_log smarthack-udp.log -S smarthack-udp -m -d ./tuya-discovery.py
echo
REPLY=y
while [[ $REPLY =~ ^[Yy]$ ]]; do
echo "======================================================"
echo
echo "IMPORTANT"
echo "1. Connect any other device (a smartphone or something) to the WIFI $AP"
echo " This step is IMPORTANT otherwise the smartconfig may not work!"
echo "2. Put your IoT device in autoconfig/smartconfig/pairing mode (LED will blink fast). This is usually done by pressing and holding the primary button of the device"
echo " Make sure nothing else is plugged into your IoT device while attempting to flash."
echo "3. Press ${bold}ENTER${normal} to continue"
read -r
echo
echo "======================================================"
echo "Starting smart config pairing procedure"
./smartconfig/main.py &
echo "Waiting for the device to install the intermediate firmware"
i=120
while ! ping -c 1 -W 1 -n 10.42.42.42 &> /dev/null; do
printf .
if (( --i == 0 )); then
echo
echo "Device did not appear with the intermediate firmware"
echo "Check the *.log files in the scripts folder"
pkill -f smartconfig/main.py && echo "Stopping smart config"
read -p "Do you want to try flashing another device? [y/N] " -n 1 -r
echo
continue 2
fi
done
echo
echo "IoT-device is online with ip 10.42.42.42"
pkill -f smartconfig/main.py && echo "Stopping smart config"
echo "Fetching firmware backup"
sleep 2
timestamp=$(date +%Y%m%d_%H%M%S)
backupfolder="../backups/$timestamp"
mkdir -p "$backupfolder"
pushd "$backupfolder" >/dev/null || exit
curl -JO http://10.42.42.42/backup
echo "======================================================"
echo "Getting Info from IoT-device"
curl -s http://10.42.42.42 | tee device-info.txt
popd >/dev/null || exit
echo "======================================================"
echo "Ready to flash third party firmware!"
echo
echo "For your convenience, the following firmware images are already included in this repository:"
echo " Tasmota v7.0.0.3 (wifiman)"
echo " ESPurna 1.13.5 (base)"
echo
echo "You can also provide your own image by placing it in the /files directory"
echo "Please ensure the firmware fits the device and includes the bootloader"
echo "MAXIMUM SIZE IS 512KB"
./firmware_picker.sh
echo "======================================================"
read -p "Do you want to flash another device? [y/N] " -n 1 -r
echo
sudo mv *.log "$backupfolder/"
done
echo "Exiting..."
popd >/dev/null || exit
./stop_flash.sh >/dev/null
|
#!/bin/sh
# description: Testing polecat script
ROOTFS=$SHARED_DIR/rootfs
if [ ! -d $ROOTFS ]; then
echo "No rootfs found"
test_unresolved
fi
OUTSH=$(mktemp /tmp/polecat-XXXXX.sh)
./polecat -o $OUTSH $ROOTFS ps
$OUTSH | grep '^[ \t]*1.*ps$'
rm $OUTSH
|
#!/bin/bash -e
test_dirs="megengine test"
TEST_PLAT=$1
if [[ "$TEST_PLAT" == cpu ]]; then
echo "only test cpu pytest"
elif [[ "$TEST_PLAT" == cuda ]]; then
echo "test both cpu and gpu pytest"
else
echo "Argument must cpu or cuda"
exit 1
fi
export MEGENGINE_LOGGING_LEVEL="ERROR"
pushd $(dirname "${BASH_SOURCE[0]}")/.. >/dev/null
PYTHONPATH="." PY_IGNORE_IMPORTMISMATCH=1 python3 -m pytest -v $test_dirs -m 'not isolated_distributed'
if [[ "$TEST_PLAT" == cuda ]]; then
echo "test GPU pytest now"
PYTHONPATH="." PY_IGNORE_IMPORTMISMATCH=1 python3 -m pytest -v $test_dirs -m 'isolated_distributed'
fi
popd >/dev/null
|
# Static parameters
WORKSPACE=./
BOX_PLAYBOOK=$WORKSPACE/box.yml
BOX_NAME=seventeen
BOX_ADDRESS=192.168.0.20
BOX_USER=slavko
BOX_PWD=
prudentia ssh <<EOF
unregister $BOX_NAME
register
$BOX_PLAYBOOK
$BOX_NAME
$BOX_ADDRESS
$BOX_USER
$BOX_PWD
verbose 4
set box_address $BOX_ADDRESS
provision $BOX_NAME
EOF
|
#!/bin/bash
set -o errexit
set -o xtrace
protoc -I . --python_out . frames.proto
|
PARAMS="s_m0=6.9 b2=3"
nohup python gillespie.py lo gillespie/sl_quiet_lo_72.h5 $PARAMS s_m=0.3 V=2000 &
nohup python gillespie.py lo gillespie/sl_quiet_lo_75.h5 $PARAMS s_m=0.6 V=2000 &
nohup python gillespie.py lo gillespie/sl_quiet_lo_78.h5 $PARAMS s_m=0.9 V=2000 &
nohup python gillespie.py lo gillespie/sl_med_lo_72.h5 $PARAMS s_m=0.3 V=1000 &
nohup python gillespie.py lo gillespie/sl_med_lo_75.h5 $PARAMS s_m=0.6 V=1000 &
nohup python gillespie.py lo gillespie/sl_med_lo_78.h5 $PARAMS s_m=0.9 V=1000 &
nohup python gillespie.py lo gillespie/sl_loud_lo_72.h5 $PARAMS s_m=0.3 V=500 &
nohup python gillespie.py lo gillespie/sl_loud_lo_75.h5 $PARAMS s_m=0.6 V=500 &
nohup python gillespie.py lo gillespie/sl_loud_lo_78.h5 $PARAMS s_m=0.9 V=500 &
nohup python gillespie.py lo gillespie/sl_vloud_lo_72.h5 $PARAMS s_m=0.3 V=75 &
nohup python gillespie.py lo gillespie/sl_vloud_lo_75.h5 $PARAMS s_m=0.6 V=75 &
nohup python gillespie.py lo gillespie/sl_vloud_lo_78.h5 $PARAMS s_m=0.9 V=75 &
nohup python gillespie.py hi gillespie/sl_quiet_hi_72.h5 $PARAMS s_m=0.3 V=2000 &
nohup python gillespie.py hi gillespie/sl_quiet_hi_75.h5 $PARAMS s_m=0.6 V=2000 &
nohup python gillespie.py hi gillespie/sl_quiet_hi_78.h5 $PARAMS s_m=0.9 V=2000 &
nohup python gillespie.py hi gillespie/sl_med_hi_72.h5 $PARAMS s_m=0.3 V=1000 &
nohup python gillespie.py hi gillespie/sl_med_hi_75.h5 $PARAMS s_m=0.6 V=1000 &
nohup python gillespie.py hi gillespie/sl_med_hi_78.h5 $PARAMS s_m=0.9 V=1000 &
nohup python gillespie.py hi gillespie/sl_loud_hi_72.h5 $PARAMS s_m=0.3 V=500 &
nohup python gillespie.py hi gillespie/sl_loud_hi_75.h5 $PARAMS s_m=0.6 V=500 &
nohup python gillespie.py hi gillespie/sl_loud_hi_78.h5 $PARAMS s_m=0.9 V=500 &
nohup python gillespie.py hi gillespie/sl_vloud_hi_72.h5 $PARAMS s_m=0.3 V=75 &
nohup python gillespie.py hi gillespie/sl_vloud_hi_75.h5 $PARAMS s_m=0.6 V=75 &
nohup python gillespie.py hi gillespie/sl_vloud_hi_78.h5 $PARAMS s_m=0.9 V=75 & |
#!/bin/bash
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
## include setttings file
. $SCRIPT_DIR/env.sh
rebuild=0
for i in "$@"; do
case $i in
--rebuild) rebuild=1
shift # past argument with no value
;;
*) ;; # unknown option
esac
done
echo "Params: $@"
if [ $rebuild -ne 0 ]; then
echo "Cleaning"
$SCRIPT_DIR/clean.sh
$SCRIPT_DIR/configure.sh
fi
if [ ! -e $RELEASE_DIR ]; then
$SCRIPT_DIR/configure.sh
fi
if [ ! -e $DEBUG_DIR ]; then
$SCRIPT_DIR/configure.sh
fi
cd $RELEASE_DIR
make $@
echo ""
cd $DEBUG_DIR
make $@
echo -e "\nDone"
|
#!/bin/bash
set -x
fission fn delete --name fortune
fission fn delete --name whalesay
fission fn delete --name fortunewhale
kubectl -n fission-function delete $(kubectl -n fission-function get po -o name | grep workflow)
kubectl -n fission delete $(kubectl -n fission get po -o name | grep router)
kubectl -n fission delete $(kubectl -n fission get po -o name | grep executor)
|
#!/usr/bin/env bash
set -euxo pipefail
Output_path=$1
samtools_path=$2
fasta_name=$3
tabix_path=$4
gff3_name=$5
cd $Output_path
echo "Indexing fasta..."
$samtools_path faidx $fasta_name
echo "Indexing gff3 file..."
sort -k1,1 -k4,4n $gff3_name > ${gff3_name}.sorted
rm -f $gff3_name
mv ${gff3_name}.sorted ${gff3_name}
bgzip < ${gff3_name} > ${gff3_name}.gz
$tabix_path -C -p gff ${gff3_name}.gz
|
#!/usr/bin/env bash
./gradlew clean buildNeeded javadocJar dependencyUpdates -Drevision=release |
#!/bin/bash
for model_carbon in bcc_csm1_1_m/rcp45 bcc_csm1_1_m/rcp85 BNU_ESM/rcp45 BNU_ESM/rcp85 CanESM2/rcp45 CanESM2/rcp85 CNRM_CM5/rcp45 CNRM_CM5/rcp85 GFDL_ESM2G/rcp45 GFDL_ESM2G/rcp85 GFDL_ESM2M/rcp45 GFDL_ESM2M/rcp85
do
cd /home/hnoorazar/analog_codes/03_find_analogs/R_codes/matchit/$model_carbon
for (( i = 1; i <= 295; i++ ))
do
qsub ./qsub_set$i
done
cd ..
done
|
#!/bin/bash
FN="HEEBOdata_1.24.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/experiment/src/contrib/HEEBOdata_1.24.0.tar.gz"
"https://bioarchive.galaxyproject.org/HEEBOdata_1.24.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-heebodata/bioconductor-heebodata_1.24.0_src_all.tar.gz"
)
MD5="ff65a1bed30a5296542b7d0f81ab2f95"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
#!/bin/bash
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
if [[ -z "${PROJECT_ROOT:-}" ]]; then
PROJECT_ROOT="github/python-dataproc"
fi
cd "${PROJECT_ROOT}"
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
# Debug: show build environment
env | grep KOKORO
# Setup service account credentials.
export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json
# Setup project id.
export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
# Remove old nox
python3 -m pip uninstall --yes --quiet nox-automation
# Install nox
python3 -m pip install --upgrade --quiet nox
python3 -m nox --version
# If this is a continuous build, send the test log to the FlakyBot.
# See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot.
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then
cleanup() {
chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
$KOKORO_GFILE_DIR/linux_amd64/flakybot
}
trap cleanup EXIT HUP
fi
# If NOX_SESSION is set, it only runs the specified session,
# otherwise run all the sessions.
if [[ -n "${NOX_SESSION:-}" ]]; then
python3 -m nox -s ${NOX_SESSION:-}
else
python3 -m nox
fi
|
#!/bin/sh
# open-onedrive.sh
#
#
# Created by Jacob F. Grant
#
# Created: 08/04/17
# Updated: 08/28/17
#
if
[ -d "/Applications/OneDrive.app" ] &&
[ -d /Users/$USER/OneDrive* ] ||
[ -d /Users/$USER/Documents/OneDrive* ]
then
open "/Applications/OneDrive.app"
fi
|
# platform = Red Hat Enterprise Linux 7,Red Hat Enterprise Linux 8,multi_platform_fedora,multi_platform_ol
{{{ bash_dconf_settings("org/gnome/login-screen", "allowed-failures", "3", "gdm.d", "00-security-settings") }}}
{{{ bash_dconf_lock("org/gnome/login-screen", "allowed-failures", "gdm.d", "00-security-settings-lock") }}}
|
#!/usr/bin/env bash
# manual Grafana installation for WLANPi RPi edition
if [ $EUID -ne 0 ]; then
echo "This script must be run as root"
exit 1
fi
set -e
source .env
echo ""
echo "* ========================="
echo "* Installing Grafana..."
echo "* ========================="
echo "* Installing pre-req packages."
sudo apt-get install -y adduser libfontconfig1
echo "* Downloading Grafana."
wget https://dl.grafana.com/oss/release/grafana_8.0.5_armhf.deb
echo "* Installing Grafana."
sudo dpkg -i grafana_8.0.5_armhf.deb
# remove requirement to set default admin pwd & change default user/pwd to wlanpi/wlanpi
echo "* Customizing Grafana."
sudo sed -i 's/;disable_initial_admin_creation/disable_initial_admin_creation/g' /etc/grafana/grafana.ini
sudo sed -i 's/;admin_user = admin/admin_user = '"$GRAFANA_USER"'/g' /etc/grafana/grafana.ini
sudo sed -i 's/;admin_password = admin/admin_password = '"$GRAFANA_PWD"'/g' /etc/grafana/grafana.ini
# set grafana to listen on port GRAFANA_PORT
sudo sed -i 's/;http_port = 3000/http_port = '"$GRAFANA_PORT"'/g' /etc/grafana/grafana.ini
# open port on ufw firewall
echo "* Opening FW port for Grafana."
sudo ufw allow ${GRAFANA_PORT}
# take care of grafana service
echo "* Enabling & starting Grafana service."
sudo systemctl daemon-reload
sudo systemctl start grafana-server
sudo systemctl enable grafana-server
# display status of service
echo "* Grafana service status:"
sudo systemctl status --no-pager -l grafana-server | head -n 10
echo "* Grafana Done."
echo ""
echo "* ========================="
echo "* Installing InfluxDB..."
echo "* ========================="
echo "* Getting InfluxDB code...."
sudo wget -qO- https://repos.influxdata.com/influxdb.key | sudo apt-key add -
echo "deb https://repos.influxdata.com/debian buster stable" | sudo tee /etc/apt/sources.list.d/influxdb.list
sudo apt update
echo "* Installing InfluxDB code...."
sudo apt install influxdb
sudo chown influxdb:influxdb /usr/lib/influxdb/scripts/influxd-systemd-start.sh
echo "* Enabling & starting InfluxDB service."
sudo systemctl unmask influxdb.service
sudo systemctl enable influxdb
sudo systemctl start influxdb
# display status of service
echo "* InfluxDB service status:"
sudo systemctl status --no-pager -l influxdb | head -n 10
echo ""
echo "* ========================="
echo "* Configuring InfluxDB..."
echo "* ========================="
echo "* Creating DB & users..."
influx -execute "create database wlanpi"
influx -execute "create retention policy wiperf_30_days on wlanpi duration 30d replication 1" -database wlanpi
influx -execute "create user $DB_USER with password '$DB_PWD' with all privileges" -database wlanpi
sudo sed -i 's/# auth-enabled = false/auth-enabled = true/g' /etc/influxdb/influxdb.conf
sudo systemctl restart influxdb
# add data source to Grafana
echo "* Adding DB as data source to Grafana..."
sudo cp influx_datasource.yaml /etc/grafana/provisioning/datasources/
sudo systemctl restart grafana-server
# add dashboard to Grafana
echo "* Adding dashboard to Grafana..."
sudo cp dashboard/WLAN_Pi_Monitor.json /usr/share/grafana/public/dashboards/
sudo cp import_dashboard.yaml /etc/grafana/provisioning/dashboards/
sudo systemctl restart grafana-server
# set crontab
echo "* adding crontab job to start polling..."
if crontab -u wlanpi -l &>/dev/null; then
# Keep existing cron jobs and add ours
{ crontab -u wlanpi -l 2>/dev/null; echo "*/1 * * * * /home/wlanpi/wlanpi_monitor/get_stats.sh"; } | crontab -u wlanpi -
else
# There are no existing cron jobs, let's add our job as the very first one
echo "*/1 * * * * /home/wlanpi/wlanpi_monitor/get_stats.sh" | crontab -u wlanpi -
fi
echo "All cron jobs of user \"wlanpi\" after we've finished:"
crontab -u wlanpi -l
echo "* Done."
echo ""
echo "* ================================================"
echo "* Browse Grafana at: http://$(hostname -I | xargs):${GRAFANA_PORT}/ (user/pwd=$GRAFANA_USER/$GRAFANA_PWD)"
echo "* ================================================"
echo ""
echo ""
|
#!/usr/bin/env bash
#set -x
ENABLED=1 # ENABLED=1 --> install, ENABLED=0 --> do nothing, ENABLED=-1 uninstall
function installSQL {
# Check to see if the acc table or cdr tables are in use
mysql -s -N --user="$MYSQL_ROOT_USERNAME" --password="$MYSQL_ROOT_PASSWORD" $MYSQL_KAM_DATABASE -e "select count(*) from dsip_lcr limit 10" > /dev/null 2>&1
if [ "$?" -eq 0 ]; then
echo -e "The dSIPRouter LCR Support (dsip_lcr) table already exists. Please backup this table before moving forward if you want the data."
echo -e "Would you like to install the FusionPBX LCR module now [y/n]:\c"
read ANSWER
if [ "$ANSWER" == "n" ]; then
return
fi
fi
# Replace the dSIPRouter LCR tables and add some optional Kamailio stored procedures
echo "Adding/Replacing the tables needed for LCR within dSIPRouter..."
mysql -s -N --user="$MYSQL_ROOT_USERNAME" --password="$MYSQL_ROOT_PASSWORD" $MYSQL_KAM_DATABASE < ./gui/modules/lcr/lcr.sql
}
function install {
installSQL
echo "LCR module installed"
}
function uninstall {
echo "LCR module uninstalled"
}
function main {
if [[ ${ENABLED} -eq 1 ]]; then
install
elif [[ ${ENABLED} -eq -1 ]]; then
uninstall
else
exit 0
fi
}
main
|
#!/bin/bash
export APPLICATION_SECRET="1234567890"
export GOOGLE_CLIENT_SECRET="GOOG"
export FACEBOOK_CLIENT_SECRET="FACE"
export SES_PASSWORD="SES"
export CUSTODIA_PASSWORD="web"
|
#!/usr/bin/env bash
curl -sL https://deb.nodesource.com/setup_13.x | sudo -E bash -
sudo apt-get install -y nodejs zip unzip
npm install -g @vue/cli@latest |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${UNLOCALIZED_RESOURCES_FOLDER_PATH+x} ]; then
# If UNLOCALIZED_RESOURCES_FOLDER_PATH is not set, then there's nowhere for us to copy
# resources to, so exit 0 (signalling the script phase was successful).
exit 0
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
case "${TARGETED_DEVICE_FAMILY:-}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
3)
TARGET_DEVICE_ARGS="--target-device tv"
;;
4)
TARGET_DEVICE_ARGS="--target-device watch"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\"" || true
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH"
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH" || true
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "${PODS_ROOT}/../A4xSDK/A4xSDK.bundle"
install_resource "${PODS_CONFIGURATION_BUILD_DIR}/A4xSDK/A4xSDK.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "${PODS_ROOT}/../A4xSDK/A4xSDK.bundle"
install_resource "${PODS_CONFIGURATION_BUILD_DIR}/A4xSDK/A4xSDK.bundle"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "${XCASSET_FILES:-}" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find -L "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "${PODS_ROOT}*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
if [ -z ${ASSETCATALOG_COMPILER_APPICON_NAME+x} ]; then
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
else
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" --app-icon "${ASSETCATALOG_COMPILER_APPICON_NAME}" --output-partial-info-plist "${TARGET_TEMP_DIR}/assetcatalog_generated_info_cocoapods.plist"
fi
fi
|
#!/bin/bash
set -o nounset
set -o errexit
shopt -s nullglob
ROOT_DIR=$(pwd)
pushd csplib/Problems > /dev/null # go through all problems in csplib
for prob in *;
do
pushd "${prob}" > /dev/null
if [ -d models ]; then # if it has a models directory
pushd models > /dev/null
for essence in *.essence; # go through all essence files under the models directory
do
TARGET_DIR="${ROOT_DIR}/problems/csplib-${prob}"
mkdir -p "${TARGET_DIR}"
cp "${essence}" "${TARGET_DIR}" # and copy the essence file to problems/csplib-probNUMBER
done
popd > /dev/null
fi
popd > /dev/null
done
popd > /dev/null
|