content
stringlengths 1
1.02M
⌀ |
---|
#!/bin/bash
# @author: kong <[email protected]>
# Retrieve the database container
CONTAINER_ID="$(docker ps | awk '$2 ~ /^mysql:5.5.62$/ { print $1 }')"
echo "【*】Start migrating for MySQL"
echo "MySQL5.5.62 Container ID:${CONTAINER_ID}"
# Check whether the database container has existed or not?
if [[ -z "${CONTAINER_ID}" ]]
then
# Not existed
echo "【error】MySQL migration was failed !"
echo "・Maybe the bash [docker_setup.sh] is not completed (confirm it with the bash [env_status.sh])"
echo "・Maybe the bash [docker_setup.sh] (Docker setup) was failed (check the log for furthermore details)"
else
# Existed
docker exec ${CONTAINER_ID} /bin/bash /docker/2_migrations.sh
echo "【done】MySQL migration is completed !"
fi
|
if [[ -n $SSH_CONNECTION ]]; then
export PS1='%m:%3~$(git_info_for_prompt)%# '
else
export PS1='%3~$(git_info_for_prompt)%# '
fi
export KEYTIMEOUT=3
export LSCOLORS="exfxcxdxbxegedabagacad"
export CLICOLOR=true
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
setopt NO_BG_NICE # don't nice background tasks
setopt NO_HUP
setopt NO_LIST_BEEP
setopt LOCAL_OPTIONS # allow functions to have local options
setopt LOCAL_TRAPS # allow functions to have local traps
setopt HIST_VERIFY
setopt SHARE_HISTORY # share history between sessions ???
setopt EXTENDED_HISTORY # add timestamps to history
setopt PROMPT_SUBST
setopt CORRECT
setopt COMPLETE_IN_WORD
setopt IGNORE_EOF
setopt APPEND_HISTORY # adds history
setopt INC_APPEND_HISTORY SHARE_HISTORY # adds history incrementally and share it across sessions
setopt HIST_IGNORE_ALL_DUPS # don't record dupes in history
setopt HIST_REDUCE_BLANKS
# don't expand aliases _before_ completion has finished
# like: git comm-[tab]
# setopt complete_aliases
setopt no_complete_aliases
stty -ixon
|
#!/bin/bash
echo $INSTALL_PATH
java -cp lib/SustituidorVariablesView.jar:lib/* mx.net.alvatroz.sustituidorvariables.Application
|
#!/bin/sh
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
DIR=$(dirname "$0")
[ "/${DIR#/}" != "$DIR" ] && DIR=$(dirname "$(pwd)/$0")
echo "Using verify-commits data from ${DIR}"
VERIFIED_ROOT=$(cat "${DIR}/trusted-git-root")
VERIFIED_SHA512_ROOT=$(cat "${DIR}/trusted-sha512-root-commit")
REVSIG_ALLOWED=$(cat "${DIR}/allow-revsig-commits")
HAVE_FAILED=false
HAVE_GNU_SHA512=1
[ ! -x "$(which sha512sum)" ] && HAVE_GNU_SHA512=0
if [ x"$1" = "x" ]; then
CURRENT_COMMIT="HEAD"
else
CURRENT_COMMIT="$1"
fi
if [ "${CURRENT_COMMIT#* }" != "$CURRENT_COMMIT" ]; then
echo "Commit must not contain spaces?" > /dev/stderr
exit 1
fi
VERIFY_TREE=0
if [ x"$2" = "x--tree-checks" ]; then
VERIFY_TREE=1
fi
NO_SHA1=1
PREV_COMMIT=""
while true; do
if [ "$CURRENT_COMMIT" = $VERIFIED_ROOT ]; then
echo "There is a valid path from "$CURRENT_COMMIT" to $VERIFIED_ROOT where all commits are signed!"
exit 0;
fi
if [ "$CURRENT_COMMIT" = $VERIFIED_SHA512_ROOT ]; then
if [ "$VERIFY_TREE" = "1" ]; then
echo "All Tree-SHA512s matched up to $VERIFIED_SHA512_ROOT" > /dev/stderr
fi
VERIFY_TREE=0
NO_SHA1=0
fi
if [ "$NO_SHA1" = "1" ]; then
export SCICOIN_VERIFY_COMMITS_ALLOW_SHA1=0
else
export SCICOIN_VERIFY_COMMITS_ALLOW_SHA1=1
fi
if [ "${REVSIG_ALLOWED#*$CURRENT_COMMIT}" != "$REVSIG_ALLOWED" ]; then
export SCICOIN_VERIFY_COMMITS_ALLOW_REVSIG=1
else
export SCICOIN_VERIFY_COMMITS_ALLOW_REVSIG=0
fi
if ! git -c "gpg.program=${DIR}/gpg.sh" verify-commit "$CURRENT_COMMIT" > /dev/null; then
if [ "$PREV_COMMIT" != "" ]; then
echo "No parent of $PREV_COMMIT was signed with a trusted key!" > /dev/stderr
echo "Parents are:" > /dev/stderr
PARENTS=$(git show -s --format=format:%P $PREV_COMMIT)
for PARENT in $PARENTS; do
git show -s $PARENT > /dev/stderr
done
else
echo "$CURRENT_COMMIT was not signed with a trusted key!" > /dev/stderr
fi
exit 1
fi
# We always verify the top of the tree
if [ "$VERIFY_TREE" = 1 -o "$PREV_COMMIT" = "" ]; then
IFS_CACHE="$IFS"
IFS='
'
for LINE in $(git ls-tree --full-tree -r "$CURRENT_COMMIT"); do
case "$LINE" in
"12"*)
echo "Repo contains symlinks" > /dev/stderr
IFS="$IFS_CACHE"
exit 1
;;
esac
done
IFS="$IFS_CACHE"
FILE_HASHES=""
for FILE in $(git ls-tree --full-tree -r --name-only "$CURRENT_COMMIT" | LC_ALL=C sort); do
if [ "$HAVE_GNU_SHA512" = 1 ]; then
HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | sha512sum | { read FIRST OTHER; echo $FIRST; } )
else
HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | shasum -a 512 | { read FIRST OTHER; echo $FIRST; } )
fi
[ "$FILE_HASHES" != "" ] && FILE_HASHES="$FILE_HASHES"'
'
FILE_HASHES="$FILE_HASHES$HASH $FILE"
done
if [ "$HAVE_GNU_SHA512" = 1 ]; then
TREE_HASH="$(echo "$FILE_HASHES" | sha512sum)"
else
TREE_HASH="$(echo "$FILE_HASHES" | shasum -a 512)"
fi
HASH_MATCHES=0
MSG="$(git show -s --format=format:%B "$CURRENT_COMMIT" | tail -n1)"
case "$MSG -" in
"Tree-SHA512: $TREE_HASH")
HASH_MATCHES=1;;
esac
if [ "$HASH_MATCHES" = "0" ]; then
echo "Tree-SHA512 did not match for commit $CURRENT_COMMIT" > /dev/stderr
exit 1
fi
fi
PARENTS=$(git show -s --format=format:%P "$CURRENT_COMMIT")
for PARENT in $PARENTS; do
PREV_COMMIT="$CURRENT_COMMIT"
CURRENT_COMMIT="$PARENT"
break
done
done
|
#!/bin/bash
set -e
set -x
HOSTT="${1}"
if [ "x${HOSTT}" = "x" -o "x${HOSTT}" = "xhost" ]; then
HOSTARG=""
else
HOSTARG="--host=${HOSTT}"
fi
SRCDIR="$(dirname $0)"
WDIR="wxWidgets"
cd ${SRCDIR}
git submodule init
git submodule update
cd ${WDIR}
git checkout .
cd ..
mkdir -p "${WDIR}-${HOSTT:-host}"
cd "${WDIR}-${HOSTT:-host}"
# Verify: Do we need '-Wl,-gc-sections' since we are creating static lib archives?
CXXFLAGS="-ffunction-sections -fdata-sections -Os -Wno-deprecated-declarations -Wno-misleading-indentation -Wno-undef -Wno-parentheses"
../${WDIR}/configure --without-expat --disable-compat28 --disable-compat30 \
--disable-richtooltip --disable-richmsgdlg --disable-richtext \
--without-libpng --without-libjpeg --without-regex \
--disable-ole --disable-mediactrl --disable-dataobj --disable-dataviewctrl \
--disable-treebook --disable-treelist --disable-stc \
--disable-webkit --disable-webview --disable-webviewwebkit --disable-webviewie \
--disable-svg --without-libtiff --without-zlib --without-opengl \
--without-gtkprint --disable-printfposparam --disable-printarch --disable-ps-in-msw \
--enable-cxx11 \
--disable-mshtmlhelp --disable-html --disable-htmlhelp \
--disable-ribbon --disable-propgrid --disable-aui \
--disable-sockets --disable-dialupman --disable-fs_inet \
--disable-shared ${HOSTARG} \
--disable-sys-libs \
--disable-debug --disable-debug_flag \
--disable-autoidman --disable-wxdib \
--disable-uiactionsim --disable-accessibility \
--disable-dragimage --disable-metafiles --disable-joystick \
--disable-hotkey --disable-busyinfo --disable-spline \
--disable-toolbook \
CXXFLAGS="${CXXFLAGS}"
CPUCORES=$(cat /proc/cpuinfo | grep -E '^processor' | wc -l)
make -j${BUILDJOBS:-${CPUCORES}} BUILD=release
# fix static lib path for cross compile targets
for lib in lib/*-${HOSTT:-host}.a; do
NEWNAME="$(echo -n "${lib}" | sed -n "s/-${HOSTT}\.a$//gp").a"
ln -sr "${lib}" "${NEWNAME}" 2>/dev/null || true
done
|
#!/bin/bash
ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
GPUID=$2
MODEL=$1
export OUTPUT_DIR_NAME=outputs/${MODEL}_${RANDOM}
#export OUTPUT_DIR_NAME=outputs/${MODEL}_test
export CURRENT_DIR=${ROOT_DIR}
export OUTPUT_DIR=${CURRENT_DIR}/${OUTPUT_DIR_NAME}
rm -rf $OUTPUT_DIR
mkdir -p $OUTPUT_DIR
export OMP_NUM_THREADS=10
export CUDA_VISIBLE_DEVICES=${GPUID}
#--early_stopping_patience 15 \
#--fp16 \
# --learning_rate=1e-4 \
# --learning_rate=3e-5 \
#--lr_scheduler cosine \
#--warmup_steps 500 \
#--lr_scheduler cosine_w_restarts \
#--save_top_k 4 \
python ${ROOT_DIR}/finetune.py \
--data_dir=${ROOT_DIR}/data/amr17 \
--learning_rate=3e-5 \
--num_train_epochs 2 \
--task graph2text \
--model_name_or_path=${MODEL} \
--train_batch_size=4 \
--eval_batch_size=4 \
--early_stopping_patience 15 \
--gpus 1 \
--output_dir=$OUTPUT_DIR \
--max_source_length=512 \
--max_target_length=384 \
--val_max_target_length=384 \
--test_max_target_length=384 \
--eval_max_gen_length=384 \
--do_train --do_predict \
--seed ${RANDOM} \
--eval_beams 5 |
#!/usr/bin/env bash
set -eu
if [ "${1:-}" = test ]; then
FOR_TESTING="true"
else
FOR_TESTING="false"
fi
DOMAIN="dnguyen0304"
NAMESPACE="roomlistwatcher"
VERSION=$(grep -Po "version='\K\d\.\d\.\d" setup.py)
REMOTE_SHARED_VOLUME="/tmp/build"
# Clean up existing packages created by previous builds.
rm --force ${NAMESPACE}*.zip
# Create the buildtime container.
BUILDTIME_BASE_IMAGE_VERSION="0.1.0"
tag=${DOMAIN}/${NAMESPACE}-buildtime:${BUILDTIME_BASE_IMAGE_VERSION}
if [ ! -z $(sudo docker images --quiet ${tag}) ]; then
docker rmi --force ${tag}
fi
docker build \
--file docker/buildtime/Dockerfile \
--tag ${tag} \
--build-arg DOMAIN=${DOMAIN} \
--build-arg NAMESPACE=${NAMESPACE} \
--build-arg BASE_IMAGE_VERSION=${BUILDTIME_BASE_IMAGE_VERSION} \
--build-arg COMPONENT=${NAMESPACE} \
.
# Create the package.
docker run \
--rm \
--volume $(pwd):${REMOTE_SHARED_VOLUME} \
${tag} \
${NAMESPACE} ${REMOTE_SHARED_VOLUME} ${VERSION}
# Create the container.
RUNTIME_BASE_IMAGE_VERSION="0.1.0"
tag=${DOMAIN}/${NAMESPACE}-runtime:${VERSION}
if [ ! -z $(sudo docker images --quiet ${tag}) ]; then
docker rmi --force ${tag}
fi
docker build \
--file docker/runtime/Dockerfile \
--tag ${tag} \
--build-arg DOMAIN=${DOMAIN} \
--build-arg NAMESPACE=${NAMESPACE} \
--build-arg BASE_IMAGE_VERSION=${RUNTIME_BASE_IMAGE_VERSION} \
--build-arg NAMESPACE=${NAMESPACE} \
.
if [ "${FOR_TESTING}" = true ]; then
docker build \
--file docker/runtime/testing/Dockerfile \
--tag ${tag} \
--build-arg DOMAIN=${DOMAIN} \
--build-arg NAMESPACE=${NAMESPACE} \
--build-arg BASE_IMAGE_VERSION=${RUNTIME_BASE_IMAGE_VERSION} \
.
fi
|
#!/usr/bin/env bash
openssl genrsa -out server.key 2048
openssl req -config openssl.cnf -new -x509 -key server.key -out server.pem -days 3650
#openssl x509 -req -days 1024 -in server.csr -signkey server.key -out server.crt
#openssl req -config openssl.cnf -new -key server.key -out server.csr |
#!/bin/bash
echo -e "-startup\n`grep jar$ ~/eclipse/eclipse.ini`\n--launcher.library\n`grep equinox.launcher.gtk ~/eclipse/eclipse.ini`\n`cat /nobody/eclipse/eclipse.ini`" > /nobody/eclipse.ini.tmp; \
mv /nobody/eclipse.ini.tmp /nobody/eclipse/eclipse.ini
|
#!/bin/bash
docker build -t google-iot-home-hub .
|
#!/usr/bin/env bash
set -euf -o pipefail
cd "$(dirname "${BASH_SOURCE[0]}")/.." # cd to repo root dir
if [[ ${USE_SYNTECT_SERVER_FROM_PATH-} == t* ]]; then
# NB: this is NOT the common path - below is.
export QUIET='true'
export ROCKET_SECRET_KEY="SeerutKeyIsI7releuantAndknvsuZPluaseIgnorYA="
export ROCKET_ENV="production"
export ROCKET_LIMITS='{json=10485760}'
export ROCKET_PORT=9238
if [[ "${INSECURE_DEV:-}" == '1' ]]; then
export ROCKET_ADDRESS='127.0.0.1'
fi
exec syntect_server
fi
addr=()
if [[ "${INSECURE_DEV:-}" == '1' ]]; then
addr+=("-e" "ROCKET_ADDRESS=0.0.0.0")
fi
docker inspect syntect_server >/dev/null 2>&1 && docker rm -f syntect_server
exec docker run --name=syntect_server --rm -p9238:9238 "${addr[@]}" sourcegraph/syntect_server:67fa4c1@sha256:e50ed88f971f7b08698abbc87a10fe18f2f8bbb6472766a15677621c59ca5185
|
#!/bin/bash
# set import environment
#---------------------------function--------------------------
function check_expect()
{
ret_val=$(expect -v)
g_ret=$(echo $ret_val | grep 'expect version')
if [ -n "$g_ret" ]; then
echo $ret_val
else
echo "please install expect: apt install expect or yum install expect"
exit 1
fi
}
function check_wget()
{
ret_val=$(wget -V)
w_ret=$(echo $ret_val | grep 'GNU Wget')
if [ -n "$w_ret" ]; then
echo ${w_ret%"linux-gnu"*}
else
echo "please install wget: apt install wget or yum install wget"
exit 1
fi
}
function check_pssh()
{
pssh > fout 2>&1
ret_val=$(cat fout | grep 'Usage: pssh')
rm -rf fout
if [ -n "$ret_val" ]; then
return 0
else
return 1
fi
}
function scp_trans()
{
dst_ip=$1
user=$2
password=$3
src_path=$4
dst_path=$5
expect -c "
spawn scp -P $port -r ${src_path} ${user}@${dst_ip}:${dst_path}
expect {
\"*assword\" {set timeout 30; send \"$password\r\"; exp_continue;}
\"yes/no\" {send \"yes\r\"; exp_continue;}
}"
return $?
}
function download()
{
obj=$1
if [ -f "$HOME/mingdi/$obj" ]; then
return 0
fi
wget http://gosspublic.alicdn.com/ossimport/tools/$obj -O $HOME/mingdi/$obj
if [ ! -f "$HOME/mingdi/$obj" ]; then
echo "wget $obj failed"
exit 1
fi
}
function untar()
{
obj=$1
tar -zxf $HOME/mingdi/$obj -C $HOME/mingdi/
if [ ! -d "$HOME/mingdi/${obj%.tar.gz*}" ]; then
echo "tar $obj failed"
exit 1
fi
}
#---------------------------main--------------------------
dst_ips="30.40.11.12"
port=22
user=baiyubin
passwd=Alibaba65683
src_path=$HOME/.ssh/authorized_keys
dst_path=$HOME/.ssh/authorized_keys
ossimport=ossimport-2.3.2.tar.gz
# check commands
check_expect
check_wget
echo "$(date '+%F %T') start..."
# gen rsa
echo "gen rsa key pair"
if [ ! -f "$HOME/.ssh/id_rsa" -o ! -f "$HOME/.ssh/id_rsa.pub" ]; then
rm -rf $HOME/.ssh/id_rsa $HOME/.ssh/id_rsa.pub
ssh-keygen -t rsa -f $HOME/.ssh/id_rsa -P ""
fi
echo "privateKeyFile is $HOME/.ssh/id_rsa"
if [ -f "$HOME/.ssh/authorized_keys" ]; then
mv $HOME/.ssh/authorized_keys $HOME/.ssh/authorized_keys.bak
fi
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
# TODO check ips
# scp authorized_keys
echo "scp authorized_keys"
for ip in $dst_ips; do
ret_val=$(scp_trans $ip $user $passwd $src_path $dst_path)
echo $ret_val
ret_val=$(echo $ret_val | grep '100%')
if [ -n "$ret_val" ] ; then
echo "scp to $ip ok"
else
echo "scp to $ip failed"
fi
done
# downdload tools
mkdir -p $HOME/mingdi
cd $HOME/mingdi
# install pssh
check_pssh
ret=$?
if [ $ret -eq 0 ]; then
echo "pssh version $(pssh --version)"
else
echo "install pssh"
download "pssh-2.3.1.tar.gz"
untar "pssh-2.3.1.tar.gz"
export PATH=$PATH:$HOME/mingdi/pssh-2.3.1/bin
check_pssh
ret=$?
if [ $ret -eq 0 ]; then
echo "pssh install ok"
echo "" >> $HOME/.bashrc
echo 'export PATH=$PATH:$HOME/mingdi/pssh-2.3.1/bin' >> $HOME/.bashrc
else
echo "pssh install failed"
exit 2
fi
fi
# gen ips
echo "gen ips"
if [ -f "$HOME/mingdi/ips" ]; then
mv $HOME/mingdi/ips $HOME/mingdi/ips.bak
fi
for ip in $dst_ips; do
if [ "$port" == "22" ]; then
echo "${user}@${ip}" >> $HOME/mingdi/ips
else
echo "${user}@${ip}:${port}" >> $HOME/mingdi/ips
fi
done
# mkdir mingdi
pssh -h $HOME/mingdi/ips -i "mkdir -p $HOME/mingdi"
# TODO check pssh/pscp result
# java
echo "install java"
download "jdk-8u101-linux-x64.tar.gz"
echo "scp jdk-8u101-linux-x64.tar.gz"
pscp -h $HOME/mingdi/ips $HOME/mingdi/jdk-8u101-linux-x64.tar.gz $HOME/mingdi/jdk-8u101-linux-x64.tar.gz
echo "untar jdk-8u101-linux-x64.tar.gz"
pssh -h $HOME/mingdi/ips -i "tar -zxf $HOME/mingdi/jdk-8u101-linux-x64.tar.gz -C $HOME/mingdi/"
echo "java env"
echo "" >> $HOME/.bashrc
echo "export JAVA_HOME=$HOME/mingdi/jdk1.8.0_101" >> $HOME/.bashrc
echo 'export JRE_HOME=${JAVA_HOME}/jre' >> $HOME/.bashrc
echo 'export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib' >> $HOME/.bashrc
echo 'export PATH=${JAVA_HOME}/bin:$PATH' >> $HOME/.bashrc
# ossutil
echo "down ossutil64"
download "ossutil64"
echo "scp ossutil64"
pscp -h $HOME/mingdi/ips $HOME/mingdi/ossutil64 $HOME/mingdi/ossutil64
echo "chmod +x ossutil64"
pssh -h $HOME/mingdi/ips -i "chmod +x $HOME/mingdi/ossutil64"
echo "" >> $HOME/.bashrc
echo "alias ossutil64='$HOME/mingdi/ossutil64'" >> $HOME/.bashrc
# distribute .bashrc
echo "scp .bashrc"
pscp -h $HOME/mingdi/ips $HOME/.bashrc $HOME/.bashrc
# ossimmprt
echo "get ossimport"
if [ ! -f "$HOME/mingdi/$ossimport" ]; then
wget http://gosspublic.alicdn.com/ossimport/international/distributed/$ossimport -O $HOME/mingdi/$ossimport
if [ ! -f "$HOME/mingdi/$ossimport" ]; then
echo "wget $ossimport failed"
exit 1
fi
fi
if [ -d "$HOME/mingdi/ossimport" ]; then
rm -rf $HOME/mingdi/ossimport.bak
mv $HOME/mingdi/ossimport $HOME/mingdi/ossimport.bak
fi
mkdir -p $HOME/mingdi/ossimport
tar -zxf $HOME/mingdi/$ossimport -C $HOME/mingdi/ossimport
if [ ! -d "$HOME/mingdi/ossimport/bin" ]; then
echo "tar $ossimport failed"
exit 1
fi
echo "workingDir is $HOME/mingdi/ossimport/workdir"
# TODO update import's config
echo "config workers"
rm -rf $HOME/mingdi/ossimport/conf/workers
for ip in $dst_ips; do
echo "$ip" >> $HOME/mingdi/ossimport/conf/workers
done
echo "config sys.properties"
sys_props_path=$HOME/mingdi/ossimport/conf/sys.properties
sed -i "s#workingDir=/root/import#workingDir=$HOME/mingdi/ossimport/workdir#g" $sys_props_path
sed -i "s#privateKeyFile=#privateKeyFile=$HOME/.ssh/id_rsa#g" $sys_props_path
sed -i "s/workerMaxThroughput(KB\/s)=100000000/workerMaxThroughput(KB\/s)=0/g" $sys_props_path
sed -i "s/workerUserName=root/workerUserName=$user/g" $sys_props_path
sed -i "s/workerPassword=\*\*\*\*\*\*/workerPassword=/g" $sys_props_path
sed -i "s/sshPort=22/sshPort=$port/g" $sys_props_path
sed -i "s/javaHeapMaxSize=1024m/javaHeapMaxSize=2g/g" $sys_props_path
echo "$(date '+%F %T') completed"
exit 0
|
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
if ! command -v bazel &>/dev/null; then
echo "Install bazel at https://bazel.build" >&2
exit 1
fi
set -o xtrace
bazel run @io_k8s_repo_infra//hack:update-deps -- "$@"
|
#!/bin/bash
#Copyright (C) 2014 OpenBet Limited
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Set up a random container name for tests to use
CNAME=shutit_test_container_$(dd if=/dev/urandom bs=256 count=1 2>/dev/null | md5sum | awk '{print $1}')
export SHUTIT_OPTIONS="-s container name $CNAME"
../../shutit build
|
python init.py --video_path ./data/video.mp4 --frame_factor 5 --reso_factor 1
CUDA_VISIBLE_DEVICES=4 python preprocess.py --input data/video_f5
CUDA_VISIBLE_DEVICES=4 python train.py --config configs/video_f5r1.txt |
#!/bin/bash
#--------Removing Files----
sudo apt-get purge netcat-*
sudo apt-get purge aircrack-ng
sudo apt-get purge airmon-ng
sudo apt-get purge hydra-gtk
sudo apt-get purge john
sudo apt-get purge johnny
sudo apt-get purge hive
sudo apt-get purge burp
sudo apt-get purge cainandable
sudo apt-get purge myheritage
sudo apt-get purge wireshark
sudo apt-get purge *nmap
sudo apt-get purge nikto
sudo apt-get purge nmap
sudo apt-get purge hashcat
sudo apt-get purge etherape
sudo apt-get purge kismet
sudo apt-get purge telnet
sudo apt-get purge postfix
sudo apt-get purge lcrack
sudo apt-get purge ophcrack
sudo apt-get purge sl
sudo apt-get purge nis
sudo apt-get purge rsync
sudo apt-get purge rsh-client
sudo apt-get purge ldap-utils
sudo apt-get purge finger
sudo apt-get purge talk
sudo apt-get purge fcrackzip
sudo apt-get purge rkhunter
#Puring services
sudo apt-get purge tomcat
sudo apt-get purge tomcat6
sudo apt-get purge postgresql
sudo apt-get purge dnsmasq
sudo apt-get purge vncserver
sudo apt-get purge tightvnc
sudo apt-get purge tightvnc-common -y
sudo apt-get purge tightvncserver
sudo apt-get purge php5
sudo apt-get purge vnc4server
sudo apt-get purge telnet-server
sudo apt-get purge nmdb
sudo apt-get purge dhclient
sudo apt-get puge rsh-server
#removes leftover directories
find . -name '*.mp3' -type f -delete
find . -name '*.mov' -type f -delete
find . -name '*.mp4' -type f -delete
find . -name '*.avi' -type f -delete
find . -name '*.mpg' -type f -delete
find . -name '*.mpeg' -type f -delete
find . -name '*.flac' -type f -delete
find . -name '*.m4a' -type f -delete
find . -name '*.flv' -type f -delete
find . -name '*.ogg' -type f -delete
find . -name '*.mov' -type f -delete
find /home -name '*.gif' -type f -delete
find /home -name '*.png' -type f -delete
find /home -name '*.jpg' -type f -delete
find /home -name '*.jpeg' -type f -delete
|
#!/bin/sh
set -ex
bindgen /opt/cuda/include/cuda.h > src/driver/ll.rs
rustfmt src/driver/ll.rs
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
copy_dir()
{
local source="$1"
local destination="$2"
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" \"${source}*\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" "${source}"/* "${destination}"
}
SELECT_SLICE_RETVAL=""
select_slice() {
local paths=("$@")
# Locate the correct slice of the .xcframework for the current architectures
local target_path=""
# Split archs on space so we can find a slice that has all the needed archs
local target_archs=$(echo $ARCHS | tr " " "\n")
local target_variant=""
if [[ "$PLATFORM_NAME" == *"simulator" ]]; then
target_variant="simulator"
fi
if [[ ! -z ${EFFECTIVE_PLATFORM_NAME+x} && "$EFFECTIVE_PLATFORM_NAME" == *"maccatalyst" ]]; then
target_variant="maccatalyst"
fi
for i in ${!paths[@]}; do
local matched_all_archs="1"
for target_arch in $target_archs
do
if ! [[ "${paths[$i]}" == *"$target_variant"* ]]; then
matched_all_archs="0"
break
fi
# Verifies that the path contains the variant string (simulator or maccatalyst) if the variant is set.
if [[ -z "$target_variant" && ("${paths[$i]}" == *"simulator"* || "${paths[$i]}" == *"maccatalyst"*) ]]; then
matched_all_archs="0"
break
fi
# This regex matches all possible variants of the arch in the folder name:
# Let's say the folder name is: ios-armv7_armv7s_arm64_arm64e/CoconutLib.framework
# We match the following: -armv7_, _armv7s_, _arm64_ and _arm64e/.
# If we have a specific variant: ios-i386_x86_64-simulator/CoconutLib.framework
# We match the following: -i386_ and _x86_64-
# When the .xcframework wraps a static library, the folder name does not include
# any .framework. In that case, the folder name can be: ios-arm64_armv7
# We also match _armv7$ to handle that case.
local target_arch_regex="[_\-]${target_arch}([\/_\-]|$)"
if ! [[ "${paths[$i]}" =~ $target_arch_regex ]]; then
matched_all_archs="0"
break
fi
done
if [[ "$matched_all_archs" == "1" ]]; then
# Found a matching slice
echo "Selected xcframework slice ${paths[$i]}"
SELECT_SLICE_RETVAL=${paths[$i]}
break
fi
done
}
install_xcframework() {
local basepath="$1"
local name="$2"
local package_type="$3"
local paths=("${@:4}")
# Locate the correct slice of the .xcframework for the current architectures
select_slice "${paths[@]}"
local target_path="$SELECT_SLICE_RETVAL"
if [[ -z "$target_path" ]]; then
echo "warning: [CP] Unable to find matching .xcframework slice in '${paths[@]}' for the current build architectures ($ARCHS)."
return
fi
local source="$basepath/$target_path"
local destination="${PODS_XCFRAMEWORKS_BUILD_DIR}/${name}"
if [ ! -d "$destination" ]; then
mkdir -p "$destination"
fi
copy_dir "$source/" "$destination"
echo "Copied $source to $destination"
}
install_xcframework "${PODS_ROOT}/PayUIndia-PG-SDK/PayUBizCoreKit.xcframework" "PayUIndia-PG-SDK" "framework" "ios-arm64_i386_x86_64-simulator" "ios-arm64_armv7"
|
if ! (( $+commands[rustup] && $+commands[cargo] )); then
return
fi
# Add completions folder in $ZSH_CACHE_DIR
# TODO: 2021-12-28: remove this bit of code as it exists in oh-my-zsh.sh
command mkdir -p "$ZSH_CACHE_DIR/completions"
(( ${fpath[(Ie)"$ZSH_CACHE_DIR/completions"]} )) || fpath=("$ZSH_CACHE_DIR/completions" $fpath)
# If the completion file doesn't exist yet, we need to autoload it and
# bind it to `cargo`. Otherwise, compinit will have already done that
if [[ ! -f "$ZSH_CACHE_DIR/completions/_cargo" ]]; then
autoload -Uz _cargo
declare -A _comps
_comps[cargo]=_cargo
fi
# If the completion file doesn't exist yet, we need to autoload it and
# bind it to `rustup`. Otherwise, compinit will have already done that
if [[ ! -f "$ZSH_CACHE_DIR/completions/_rustup" ]]; then
autoload -Uz _rustup
declare -A _comps
_comps[rustup]=_rustup
fi
# Generate completion files in the background
rustup completions zsh >| "$ZSH_CACHE_DIR/completions/_rustup" &|
cat >| "$ZSH_CACHE_DIR/completions/_cargo" <<'EOF'
#compdef cargo
source $(rustc +${${(z)$(rustup default)}[1]} --print sysroot)/share/zsh/site-functions/_cargo
EOF
|
set -e
if [ -n "$BASH" ]; then
BASH=~/.bash-profile
fi
if [ -d "$BASH" ]; then
echo "\033[0;33mYou already have Bash Profile installed.\033[0m You'll need to remove $BASH if you want to install"
exit
fi
echo "\033[0;34mCloning Bash Profile...\033[0m"
#hash git >/dev/null 2>&1 && env git clone --depth=1 https://github.com/eldorplus/bash-profile.git $BASH || {
# echo "git not installed"
# exit
#}
echo "\033[0;34mLooking for an existing bash config...\033[0m"
if [ -f ~/.bash_profile ] || [ -h ~/.bash_profile ]; then
echo "\033[0;33mFound ~/.bash_profile.\033[0m \033[0;32mBacking up to ~/.bash_profile.hold\033[0m";
mv ~/.bash_profile ~/.bash_profile.hold;
fi
echo "\033[0;34mUsing the Bash Profile template file and adding it to ~/.bash_profile\033[0m"
cp $BASH/templates/bash_profile.template ~/.bash_profile
sed -i -e "/^BASH=/ c\\
BASH=$BASH
" ~/.bash_profile
echo "\033[0;34mCopying your current PATH and adding it to the end of ~/.bash_profile for you.\033[0m"
sed -i -e "/export PATH=/ c\\
export PATH=\"$PATH\"
" ~/.bash_profile
if [ "$SHELL" != "$(which bash)" ]; then
echo "\033[0;34mTime to change your default shell to bash!\033[0m"
chsh -s `which bash`
fi
env bash
. ~/.bash_profile
|
#!/bin/sh
# TODO: Enable this and ignore generated files when we can use forc during `npm i`
# forc build
npx typechain --target=fuels --out-dir=src/example-contract-types out/debug/example-contract-abi.json
|
#!/bin/bash
if [ ! "$(wget https://dre.tretas.org/dre/2159741/despacho-14167-2015-de-1-de-dezembro -o /dev/null -O -|grep in_links -c)" -eq "0" ]; then
echo "rcaap: incumprimento pode já não existir";
else
echo "rcaap: Incumprimento mantém-se, a actualizar o README (faça um git diff, valide, e commit!)";
while IFS='' read -r line || [[ -n "$line" ]]; do
test $(echo "$line"|grep -v rcaap|wc -l) -eq "1" \
&& echo "$line" \
|| (h=$(echo "$line"|cut -d\| -f1-4); t=$(echo "$line"|cut -d\| -f6-); echo "$h| $(date +%Y/%m/%d) |$t");
done < README.md > new
mv new README.md
fi
|
#!/bin/bash
#for id in songs/*.mp3
#do
#NAME=$("${id}" | cut -d "." -f1 | cut -d "/" -f2)
#echo -e "${NAME}.mp3" | sox "songs/${NAME}.mp3" -n spectrogram -o "spectograms/${NAME}.png" -r -h -x 800 -y 300
#done
for id in songs/*.mp3
do
out="${id%.*}"
out=$(basename $out)
echo -e "$out.mp3" | sox songs/$out.mp3 -n remix - spectrogram -o spectrograms/$out.png -x 3000
done |
#!/bin/bash
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License.
#
#############################################################################
#
# Description:
#
# This script contains all dpdk specific functions. The functions here require
# that UtilsInit from utils.sh has been called to set up environment. For ssh
# root login, passwordless login, and without StrictHostChecking are required.
#
#############################################################################
# Below functions intended to aid dpdkSetupAndRunTest
# Requires:
# - UtilsInit has been called
# - SSH by root, passwordless login, and no StrictHostChecking. Basically have ran
# enableRoot.sh and enablePasswordLessRoot.sh from Testscripts/Linux
# Effects:
# Configures hugepages on machine at IP provided
function Hugepage_Setup() {
if [ -z "${1}" ]; then
LogErr "ERROR: must provide target ip to Hugepage_Setup()"
SetTestStateAborted
exit 1
fi
CheckIP ${1}
if [ $? -eq 1 ]; then
LogErr "ERROR: must pass valid ip to Hugepage_Setup()"
SetTestStateAborted
exit 1
fi
local hugepage_cmd="mkdir -p /mnt/huge; mount -t hugetlbfs nodev /mnt/huge && \
echo 4096 | tee /sys/devices/system/node/node*/hugepages/hugepages-2048kB/nr_hugepages > /dev/null"
ssh ${1} "${hugepage_cmd}"
}
# Requires:
# - UtilsInit has been called
# - SSH by root, passwordless login, and no StrictHostChecking. Basically have ran
# enableRoot.sh and enablePasswordLessRoot.sh from Testscripts/Linux
# Effects:
# modprobes required modules for dpdk on machine at IP provided
function Modprobe_Setup() {
if [ -z "${1}" ]; then
LogErr "ERROR: must provide target ip to Modprobe_Setup()"
SetTestStateAborted
exit 1
fi
CheckIP ${1}
if [ $? -eq 1 ]; then
LogErr "ERROR: must pass valid ip to Modprobe_Setup()"
SetTestStateAborted
exit 1
fi
local modprobe_cmd="modprobe -a ib_uverbs"
# known issue on sles15
local distro=$(detect_linux_distribution)$(detect_linux_distribution_version)
if [[ "${distro}" == "sles15" ]]; then
modprobe_cmd="${modprobe_cmd} mlx4_ib mlx5_ib || true"
fi
ssh ${1} "${modprobe_cmd}"
}
# Helper function to Install_Dpdk()
# Requires:
# - called only from Install_Dpdk()
# - see Install_Dpdk() requires
# - arguments: ip, distro
function Install_Dpdk_Dependencies() {
if [ -z "${1}" -o -z "${2}" ]; then
LogErr "ERROR: must provide install ip and distro to Install_Dpdk_Dependencies()"
SetTestStateAborted
exit 1
fi
local install_ip="${1}"
local distro="${2}"
CheckIP ${install_ip}
if [ $? -eq 1 ]; then
LogErr "ERROR: must pass valid ip to Modprobe_Setup()"
SetTestStateAborted
exit 1
fi
LogMsg "Detected distro: ${distro}"
if [[ "${distro}" == ubuntu* ]]; then
apt_packages="librdmacm-dev librdmacm1 build-essential libnuma-dev libmnl-dev libelf-dev dpkg-dev"
if [[ "${distro}" == "ubuntu16.04" ]]; then
ssh ${install_ip} ". utils.sh && CheckInstallLockUbuntu && add-apt-repository ppa:canonical-server/dpdk-azure -y"
else
apt_packages="${apt_packages} rdma-core"
fi
ssh ${install_ip} ". utils.sh && CheckInstallLockUbuntu && apt-get update"
ssh ${install_ip} ". utils.sh && CheckInstallLockUbuntu && apt-get install -y ${apt_packages}"
elif [[ "${distro}" == rhel7* || "${distro}" == centos7* ]]; then
ssh ${install_ip} "yum -y --nogpgcheck groupinstall 'Infiniband Support'"
ssh ${install_ip} "dracut --add-drivers 'mlx4_en mlx4_ib mlx5_ib' -f"
yum_flags=""
if [[ "${distro}" == centos7* ]]; then
# for all releases that are moved into vault.centos.org
# we have to update the repositories first
ssh ${install_ip} "yum -y --nogpgcheck install centos-release"
ssh ${install_ip} "yum clean all"
ssh ${install_ip} "yum makecache"
yum_flags="--enablerepo=C*-base --enablerepo=C*-updates"
fi
ssh ${install_ip} "yum install --nogpgcheck ${yum_flags} --setopt=skip_missing_names_on_install=False -y gcc make git tar wget dos2unix psmisc kernel-devel-$(uname -r) numactl-devel.x86_64 librdmacm-devel libmnl-devel"
elif [[ "${distro}" == "sles15" ]]; then
local kernel=$(uname -r)
dependencies_install_command="zypper --no-gpg-checks --non-interactive --gpg-auto-import-keys install gcc make git tar wget dos2unix psmisc libnuma-devel numactl librdmacm1 rdma-core-devel libmnl-devel"
if [[ "${kernel}" == *azure ]]; then
ssh "${install_ip}" "zypper install --oldpackage -y kernel-azure-devel=${kernel::-6}"
dependencies_install_command="${dependencies_install_command} kernel-devel-azure"
else
dependencies_install_command="${dependencies_install_command} kernel-default-devel"
fi
ssh "${install_ip}" "${dependencies_install_command}"
ssh ${install_ip} "ln -sf /usr/include/libmnl/libmnl/libmnl.h /usr/include/libmnl/libmnl.h"
else
LogErr "ERROR: unsupported distro ${distro} for DPDK on Azure"
SetTestStateAborted
exit 1
fi
if [ $? -ne 0 ]; then
LogErr "ERROR: Failed to install required packages on distro ${distro}"
SetTestStateFailed
exit 1
fi
}
# Requires:
# - basic environ i.e. have called UtilsInit
# - ${1} dpdk install target ip
# - SSH by root, passwordless login, and no StrictHostChecking. Basically have ran
# enableRoot.sh and enablePasswordLessRoot.sh from Testscripts/Linux
# Modifies:
# - vm at ip ${1} to install dpdk
# Effects:
# - does NOT set up hugepages or modprobe (see other funcs)
# - only installs dpdk on first IP provided
function Install_Dpdk() {
if [ -z "${LIS_HOME}" -o -z "${DPDK_LINK}" -o -z "${DPDK_DIR}" ]; then
LogErr "ERROR: LIS_HOME, DPDK_LINK, and DPDK_DIR must be defined before calling Install_Dpdk()"
SetTestStateAborted
exit 1
fi
if [ -z "${1}" ]; then
LogErr "ERROR: Must supply ip of host to Install_Dpdk()"
SetTestStateAborted
exit 1
fi
CheckIP ${1}
if [ $? -eq 1 ]; then
LogErr "ERROR: must pass valid ip to Install_Dpdk()"
SetTestStateAborted
exit 1
fi
local install_ip=${1}
LogMsg "Installing dpdk on ${install_ip}"
local distro=$(detect_linux_distribution)$(detect_linux_distribution_version)
Install_Dpdk_Dependencies $install_ip $distro
install_from_ppa=false
ssh ${install_ip} "if [[ -e '${DPDK_DIR}' ]]; then rm -rf '${DPDK_DIR}'; fi"
if [[ $DPDK_LINK =~ .tar ]]; then
ssh ${install_ip} "mkdir ${DPDK_DIR}"
dpdk_download_file="${DPDK_LINK##*/}"
wget_retry "${DPDK_LINK}" "/tmp" "${install_ip}"
ssh "${install_ip}" "tar -xf '/tmp/${dpdk_download_file}' -C ${DPDK_DIR} --strip-components=1"
check_exit_status "Get DPDK sources from '${DPDK_LINK}' on ${install_ip}" "exit"
elif [[ $DPDK_LINK =~ ".git" ]] || [[ $DPDK_LINK =~ "git:" ]]; then
ssh ${install_ip} "git clone ${DPDK_LINK} ${DPDK_DIR}"
elif [[ $DPDK_LINK =~ "ppa:" ]]; then
if [[ $distro != "ubuntu16.04" && $distro != "ubuntu18.04" ]]; then
LogErr "PPAs are supported only on Debian based distros."
SetTestStateAborted
exit 1
fi
ssh "${install_ip}" "add-apt-repository ${DPDK_LINK} -y -s"
ssh "${install_ip}" "apt-get update"
install_from_ppa=true
elif [[ $DPDK_LINK =~ "native" || $DPDK_LINK == "" ]]; then
if [[ $distro != "ubuntu16.04" && $distro != "ubuntu18.04" ]]; then
LogErr "Native installs are supported only on Debian based distros."
SetTestStateAborted
exit 1
fi
ssh "${install_ip}" "sed -i '/deb-src/s/^# //' /etc/apt/sources.list"
check_exit_status "Enable source repos on ${install_ip}" "exit"
install_from_ppa=true
else
LogErr "DPDK source link not supported: '${DPDK_LINK}'"
SetTestStateAborted
exit 1
fi
if [[ $install_from_ppa == true ]]; then
ssh "${install_ip}" "apt install -y dpdk dpdk-dev"
check_exit_status "Install DPDK from ppa ${DPDK_LINK} on ${install_ip}" "exit"
ssh "${install_ip}" "apt-get source dpdk"
check_exit_status "Get DPDK sources from ppa on ${install_ip}" "exit"
dpdk_version=$(ssh "${install_ip}" "dpkg -s 'dpdk' | grep 'Version' | head -1 | awk '{print \$2}' | awk -F- '{print \$1}'")
dpdk_source="dpdk_${dpdk_version}.orig.tar.xz"
dpdkSrcDir="dpdk-${dpdk_version}"
ssh "${install_ip}" "tar xf $dpdk_source"
check_exit_status "Get DPDK sources from ppa on ${install_ip}" "exit"
ssh "${install_ip}" "mv ${dpdkSrcDir} ${DPDK_DIR}"
fi
LogMsg "dpdk source on ${install_ip} at ${DPDK_DIR}"
LogMsg "MLX_PMD flag enabling on ${install_ip}"
ssh ${install_ip} "cd ${LIS_HOME}/${DPDK_DIR} && make config T=x86_64-native-linuxapp-gcc"
ssh ${install_ip} "sed -ri 's,(MLX._PMD=)n,\1y,' ${LIS_HOME}/${DPDK_DIR}/build/.config"
if type Dpdk_Configure > /dev/null; then
echo "Calling testcase provided Dpdk_Configure(install_ip) on ${install_ip}"
# shellcheck disable=SC2034
ssh ${install_ip} ". constants.sh; . utils.sh; . dpdkUtils.sh; cd ${LIS_HOME}/${DPDK_DIR}; $(typeset -f Dpdk_Configure); DPDK_DIR=${DPDK_DIR} LIS_HOME=${LIS_HOME} Dpdk_Configure ${install_ip}"
fi
ssh ${install_ip} "cd ${LIS_HOME}/${DPDK_DIR} && make -j"
check_exit_status "cd ${LIS_HOME}/${DPDK_DIR} && make -j" "exit"
ssh ${install_ip} "cd ${LIS_HOME}/${DPDK_DIR} && make install"
check_exit_status "cd ${LIS_HOME}/${DPDK_DIR} && make install" "exit"
LogMsg "Finished installing dpdk on ${install_ip}"
}
# Below function(s) intended for use by a testcase provided Run_Testcase() function:
# - Run_Testcase() allows a user to run their own test within a preconfigured DPDK environment
# - when called, it is gauranteed to have contants.sh, utils.sh, and dpdkUtils.sh sourced
# - UtilsInit is called in this environment
# - Run_Testcase() is called on the "sender" VM and should orchestrate the testcase
# across the other VMs
# - see other tests for example
# Create_Csv() creates a csv file for use with DPDK-TESTCASE-DRVER and
# outputs that name so the user can write parsed performance data into it
# Requires:
# - basic environ i.e. have called UtilsInit
# Effects:
# - creates csv, outputs name, capture to use
function Create_Csv() {
if [ -z "${LIS_HOME}" ]; then
LogErr "ERROR: LIS_HOME must be defined in environment"
SetTestStateAborted
exit 1
fi
local csv_name="${LIS_HOME}/dpdk_test.csv"
touch ${csv_name}
echo ${csv_name}
}
# Update_Phase() updates the test "phase"
# The test phase is read by the main testcase loop in DPDK-TESTCASE-DRIVER
# and the testcase provided Alter-Runtime function can read this. This allows
# testcases to control/adjust any aspect of runtime based on the testcase
# running on the VMs. e.g. revoke VF through Azure Infra during DPDK test
# Requires:
# - basic environ i.e. have called UtilsInit
# - 1st argument to be message to write to file
# Effects:
# - Clobbers previous phase with passed phase message
function Update_Phase() {
if [ -z "${1}" ]; then
LogErr "ERROR: Must supply phase message to Update_Phase()"
SetTestStateAborted
exit 1
fi
local msg="${1}"
LogMsg "Updated phase with: ${msg}"
echo "${msg}" > ${PHASE_FILE}
}
# Requires:
# - basic environ i.e. have called UtilsInit
# Effects:
# - Outputs phase message
function Read_Phase() {
cat ${PHASE_FILE}
}
# Create_Vm_Synthetic_Vf_Pair_Mappings() matches the name of VM with its synthetic and
# VF NIC pair.
# Requires:
# - basic environ i.e. have called UtilsInit
# - VM_NAMES to be defined as a list of vm names
# - each vm_name in the list is also a variable that stores its IP
# Effects:
# - sets global variables of the form:
# <vm_name>_iface
# <vm_name>_busaddr
function Create_Vm_Synthetic_Vf_Pair_Mappings() {
if [ -z "${VM_NAMES}" ]; then
LogErr "ERROR: VM_NAMES must be defined for Create_Vm_Synthetic_Vf_Pair_Mappings()"
SetTestStateAborted
exit 1
fi
local name
for name in $VM_NAMES; do
# shellcheck disable=SC2034
local pairs=($(ssh ${!name} "$(typeset -f get_synthetic_vf_pairs); get_synthetic_vf_pairs"))
if [ "${#pairs[@]}" -eq 0 ]; then
LogErr "ERROR: No ${name} VFs present"
SetTestStateFailed
exit 1
fi
# set global if/busaddr pairs
eval ${name}_iface="${pairs[0]}"
eval ${name}_busaddr="${pairs[1]}"
done
}
# Create_Timed_Testpmd_Cmd() creates the testpmd cmd string based on provided args
# Requires:
# - basic environ i.e. have called UtilsInit
# - DPDK_DIR is defined in environment
# - Arguments (in order):
# 1. duration in seconds
# 2. number of cores
# 3. busaddr of VF
# 4. name of corresponding synthetic nic iface
# 5. any of the valid testpmd fwd modes e.g. txonly, mac, rxonly
# Effects:
# - outputs testpmd command with no redireciton nor ampersand
function Create_Timed_Testpmd_Cmd() {
if [ -z "${1}" ]; then
LogErr "ERROR: duration must be passed to Create_Timed_Testpmd_Cmd"
SetTestStateAborted
exit 1
fi
local duration="${1}"
cmd="$(Create_Testpmd_Cmd ${2} ${3} ${4} ${5} ${6})"
echo "timeout ${duration} ${cmd}"
}
# Create_Testpmd_Cmd() creates the testpmd cmd string based on provided args
# Requires:
# - basic environ i.e. have called UtilsInit
# - DPDK_DIR is defined in environment
# - Arguments (in order):
# 1. number of cores
# 2. busaddr of VF
# 3. name of corresponding synthetic nic iface
# 4. any of the valid testpmd fwd modes e.g. txonly, mac, rxonly
# Effects:
# - outputs testpmd command with no redireciton nor ampersand
function Create_Testpmd_Cmd() {
if [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then
LogErr "ERROR: cores, busaddr, iface, and testpmd mode must be passed to Create_Testpmd_Cmd"
SetTestStateAborted
exit 1
fi
if [ -z "${DPDK_DIR}" ]; then
LogErr "ERROR: DPDK_DIR must be defined before calling Create_Testpmd_Cmd()"
SetTestStateAborted
exit 1
fi
local core="${1}"
local busaddr="${2}"
local iface="${3}"
local mode="${4}"
local additional_params="${5}"
# partial strings to concat
local testpmd="${LIS_HOME}/${DPDK_DIR}/build/app/testpmd"
local eal_opts="-l 0-${core} -w ${busaddr} --vdev='net_vdev_netvsc0,iface=${iface}' --"
local testpmd_opts0="--port-topology=chained --nb-cores ${core} --txq ${core} --rxq ${core}"
local testpmd_opts1="--mbcache=512 --txd=4096 --rxd=4096 --forward-mode=${mode} --stats-period 1 --tx-offloads=0x800e ${additional_params}"
echo "${testpmd} ${eal_opts} ${testpmd_opts0} ${testpmd_opts1}"
}
# Below function(s) intended for use by a testcase provided Dpdk_Configure() function:
# - Dpdk_Configure() lets a testcase configure dpdk before compilation
# - when called, it is gauranteed to have contants.sh, utils.sh, and dpdkUtils.sh
# sourced; it will be called on the target machine in dpdk top level dir,
# and it will be passed target machine's ip
# - UtilsInit is not called in this environment
# Requires:
# - called only from dpdk top level directory
# - type [SRC | DST] and testpmd ip to configure as arguments
# Modifies:
# - local testpmd tx src and destination ips
function Testpmd_Ip_Setup() {
if [ -z "${1}" -o -z "${2}" ]; then
LogErr "ERROR: must provide ip type as SRC or DST and testpmd ip to Testpmd_Ip_Setup()"
SetTestStateAborted
exit 1
fi
local ip_type=${1}
if [ "${ip_type}" != "SRC" -a "${ip_type}" != "DST" ]; then
LogErr "ERROR: ip type invalid use SRC or DST Testpmd_Ip_Setup()"
SetTestStateAborted
exit 1
fi
local ip_for_testpmd=${2}
local ip_arr=($(echo ${ip_for_testpmd} | sed "s/\./ /g"))
local ip_addr="define IP_${ip_type}_ADDR ((${ip_arr[0]}U << 24) | (${ip_arr[1]} << 16) | (${ip_arr[2]} << 8) | ${ip_arr[3]})"
local ip_config_cmd="sed -i 's/define IP_${ip_type}_ADDR.*/${ip_addr}/' app/test-pmd/txonly.c"
LogMsg "${ip_config_cmd}"
eval "${ip_config_cmd}"
}
# Requires:
# - called only from dpdk top level directory
# Modifies:
# - local testpmd txonly to support multiple flows
function Testpmd_Multiple_Tx_Flows_Setup() {
local num_port_code="#define NUM_SRC_PORTS 8"
local port_arr_code="static uint16_t src_ports[NUM_SRC_PORTS] = {200,300,400,500,600,700,800,900};"
local port_code="pkt_udp_hdr.src_port = rte_cpu_to_be_16(src_ports[nb_pkt % NUM_SRC_PORTS]);"
sed -i "54i ${num_port_code}" app/test-pmd/txonly.c
sed -i "55i ${port_arr_code}" app/test-pmd/txonly.c
# Note(v-advlad): we need to add port_code line after the nb_pkt is defined
lookup_line_nb_packet='for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {'
local lines_to_be_replaced=$(grep -nir "${lookup_line_nb_packet}" app/test-pmd/txonly.c| awk '{print $1}' | tr -d ':''')
local line_index=1
for line in $lines_to_be_replaced
do
line_to_be_replaced=$(($line_index + $line))
sed -i "${line_to_be_replaced}i ${port_code}" app/test-pmd/txonly.c
line_index=$(($line_index + 1))
done
# Note(v-advlad): fallback to previous implementation
if [[ -z "${lines_to_be_replaced}" ]]; then
sed -i "234i ${port_arr_code}" app/test-pmd/txonly.c
fi
}
# Requires:
# - called only from dpdk top level directory
# - first argument to is destination IP
# - requires ip forwarding to be turned on in the VM
# Modifies:
# - local testpmd mac mode to forward packets to supplied ip
# Notes:
# - Be aware of subnets
function Testpmd_Macfwd_To_Dest() {
local dpdk_version=$(Get_DPDK_Version "${LIS_HOME}/${DPDK_DIR}")
local dpdk_version_changed_mac_fwd="19.08"
local ptr_code="struct ipv4_hdr *ipv4_hdr;"
local offload_code="ol_flags |= PKT_TX_IP_CKSUM; ol_flags |= PKT_TX_IPV4;"
local dst_addr=$(echo ${1} | sed 'y/\./,/')
local dst_addr_code="ipv4_hdr = rte_pktmbuf_mtod_offset(mb, struct ipv4_hdr *, sizeof(struct ether_hdr)); ipv4_hdr->dst_addr = rte_be_to_cpu_32(IPv4(${dst_addr}));"
LogMsg "DPDK version: ${dpdk_version}. DPDK version changed: ${dpdk_version_changed_mac_fwd}"
if [[ ! $(printf "${dpdk_version_changed_mac_fwd}\n${dpdk_version}" | sort -V | head -n1) == "${dpdk_version}" ]]; then
LogMsg "Using newer forwarding code insertion"
ptr_code="struct rte_ipv4_hdr *rte_ipv4_hdr1;"
dst_addr_code="rte_ipv4_hdr1 = rte_pktmbuf_mtod_offset(mb, struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr)); rte_ipv4_hdr1->dst_addr = rte_be_to_cpu_32(RTE_IPV4(${dst_addr}));"
else
LogMsg "Using legacy forwarding code insertion"
fi
sed -i "53i ${ptr_code}" app/test-pmd/macfwd.c
sed -i "90i ${offload_code}" app/test-pmd/macfwd.c
sed -i "101i ${dst_addr_code}" app/test-pmd/macfwd.c
}
function Get_DPDK_Version() {
version_file_path="${1}/VERSION"
meson_config_path="${1}/meson.build"
dpdk_version=""
if [ -f "${version_file_path}" ]; then
dpdk_version=$(cat "${version_file_path}")
elif [ -f "${meson_config_path}" ]; then
dpdk_version=$(grep -m 1 "version:" $meson_config_path | awk '{print $2}' | tr -d "\`'\,")
fi
echo $dpdk_version
}
function Get_Trx_Rx_Ip_Flags() {
receiver="${1}"
local dpdk_version=$(Get_DPDK_Version "${LIS_HOME}/${DPDK_DIR}")
local dpdk_version_changed_tx_ips="19.05"
trx_rx_ips=""
if [[ ! $(printf "${dpdk_version_changed_tx_ips}\n${dpdk_version}" | sort -V | head -n1) == "${dpdk_version}" ]]; then
local dpdk_ips_cmd="hostname -I"
local sender_dpdk_ips=($(eval "${dpdk_ips_cmd}"))
local receiver_dpdk_ips=($(ssh "${receiver}" "${dpdk_ips_cmd}"))
trx_rx_ips="--tx-ip=${sender_dpdk_ips[1]},${receiver_dpdk_ips[1]}"
fi
echo "${trx_rx_ips}"
} |
export SECRET_KEY='trap'
export SQLALCHEMY_DATABASE_URI='postgresql+psycopg2://njoroge:njoro1234@localhost/njoroge'
export MAIL_USERNAME='[email protected]'
export MAIL_PASSWORD='njoro1234'
# export DATABASE_URL = 'mysql+mysqldb://njoroge:1234@localhost/njoroge.mysql'
python3.6 manage.py server
|
#!/bin/bash
set -euo pipefail # http://redsymbol.net/articles/unofficial-bash-strict-mode/
IFS=$'\n\t'
true=1
false=0
DEVELOPMENT_MAX_CONNECTIONS=2
PRODUCTION_MAX_CONNECTIONS=5
# Export all environmental variable needs for running the application to a 'ENV' file in project root
function main() {
echo > EnvFile
get_heroku_env_variables
load_jdbc_variable
set_max_connections $@
}
function get_heroku_env_variables() {
echo "Getting config variables!"
CONFIG_VARS=$(heroku config -a pazcapstone)
SKIP_FIRST=${false}
for VAR in ${CONFIG_VARS}; do
if [[ ${SKIP_FIRST} -eq ${false} ]]; then
SKIP_FIRST=${true}
else
set_env_variable_in_bash_session ${VAR}
fi
done
}
function set_env_variable_in_bash_session() {
for VAR in ${@}; do
name=$(echo ${VAR} | awk '{print substr($1, 1, length($1)-1)}')
value=$(echo ${VAR} | awk '{print $2}')
prepared_export=${name}'='${value}
echo ${prepared_export} >> EnvFile
done
}
function load_jdbc_variable() {
echo "Getting JDBC_DATABASE_URL!"
JDBC=$(heroku run echo '$JDBC_DATABASE_URL' -a pazcapstone)
prepared_export=JDBC_DATABASE_URL'='${JDBC}
echo ${prepared_export} >> EnvFile
export ${prepared_export}
}
function set_max_connections() {
max_connections=${DEVELOPMENT_MAX_CONNECTIONS}
# if DYNO variable is set, it means environment is production
if [ -n "${DYNO+1}" ]; then
echo "In production environment, set max database connection to $PRODUCTION_MAX_CONNECTIONS"
max_connections=${PRODUCTION_MAX_CONNECTIONS}
# if DYNO variable is unset, it means environment is local as only heroku dyno has DYNO env variable
else
echo "In local dev environment, set max database connection to $DEVELOPMENT_MAX_CONNECTIONS"
max_connections=${DEVELOPMENT_MAX_CONNECTIONS}
fi;
echo MAX_CONNECTIONS=${max_connections} >> EnvFile
}
main $@
|
#!/bin/bash
cd hdfs-base
./build.sh
cd ../hdfs-nn
./build.sh
cd ../hdfs-dn
./build.sh
cd ../spark-base
./build.sh
cd ../spark-master
./build.sh
cd ../spark-worker
./build.sh
cd ../zeppelin
./build.sh
|
#!/usr/bin/env bash
set -euo pipefail
if [ $# != 0 ]; then
echo "Usage: $0"
exit 1;
fi
if ! python -c "import packaging.version" &> /dev/null; then
python3 -m pip install packaging
fi
torch_version=$(python3 -c "import torch; print(torch.__version__)")
python_36_plus=$(python3 <<EOF
from packaging.version import parse as V
import sys
if V("{}.{}.{}".format(*sys.version_info[:3])) >= V("3.6"):
print("true")
else:
print("false")
EOF
)
pt_plus(){
python3 <<EOF
import sys
from packaging.version import parse as L
if L('$torch_version') >= L('$1'):
print("true")
else:
print("false")
EOF
}
echo "[INFO] torch_version=${torch_version}"
if ! "${python_36_plus}"; then
echo "[ERROR] python<3.6 is not supported"
exit 1
else
if $(pt_plus 1.5.0); then
pip install torch_optimizer
else
echo "[WARNING] torch_optimizer requires pytorch>=1.5.0"
fi
fi
# Check the pytorch version is not changed from the original version
current_torch_version="$(python3 -c 'import torch; print(torch.__version__)')"
if [ ${torch_version} != "${current_torch_version}" ]; then
echo "[ERROR] The torch version has been changed. Please report to espnet developers"
exit 1
fi
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-HPMI/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-HPMI/1024+0+512-N-VB-ADJ-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function remove_all_but_nouns_verbs_and_adjectives_first_two_thirds_full --eval_function last_element_eval |
#!/usr/bin/bash
if [ $# -eq 1 ]; then
yarn build
(cd public \
&& git init \
&& git add . \
&& git commit -m "$(date)" \
&& git remote add origin $1 \
&& git push -f -u origin master)
else
echo "Usage: $0 git"
fi |
#!/bin/bash
set -e
cd /data
cp -rf /tmp/mc-paper/* .
echo "eula=true" > eula.txt
if [[ ! -e server.properties ]]; then
cp /tmp/server.properties .
fi
if [[ -n "$MOTD" ]]; then
sed -i "/motd\s*=/ c motd=$MOTD" /data/server.properties
fi
if [[ -n "$LEVEL" ]]; then
sed -i "/level-name\s*=/ c level-name=$LEVEL" /data/server.properties
fi
if [[ -n "$OPS" ]]; then
echo $OPS | awk -v RS=, '{print}' >> ops.txt
fi
if [[ -n "$SEED" ]]; then
sed -i "/level-seed\s*=/ c level-seed=$SEED" /data/server.properties
fi
if [[ -n "$ENABLE_RCON" ]]; then
sed -i "/enable-rcon\s*=/ c enable-rcon=$ENABLE_RCON" /data/server.properties
fi
java $JVM_OPTS -jar ServerInstall-paper.jar nogui
|
#! /bin/bash
export TRACE=make.trace
# redirect fd 100 to lock file
# exec 100>timing.lock
# record the pid, and the start time (s.ns), and the command make is running
echo B $$ `date +%s.%N` "[$*]" >> $TRACE
# the actual command make wants to run
bash "$@"
# record the pid and the end time (s.ns)
echo E $$ `date +%s.%N` >> $TRACE
|
trap 'decrease' SIGTERM
decrease() {
echo "Decreasing a concurrent app"
curl -X PATCH $ENDPOINT/runner-scaling/reduce
exit 0;
}
while true; do :; done |
#!/bin/bash
OSTYPE=`uname -s`
if [ "x$OSTYPE" = "xDarwin" ]; then
PLATFORM=macos
DLLEXT=dylib
else
PLATFORM=linux-amd64
DLLEXT=so
fi
cd ../../
mkdir -p ./mygame/native/$PLATFORM
COMPILER_FLAGS="-isystem ./include -I. -Ofast -flto=full -fopenmp"
./dragonruby-bind --compiler-flags="$COMPILER_FLAGS" --ffi-module=MatoCore --output=./mygame/native/mato-bind.c ./mygame/cext/mato.c
clang $COMPILER_FLAGS -fPIC -shared ./mygame/cext/src/*.c ./mygame/native/mato-bind.c -o ./mygame/native/$PLATFORM/matocore.$DLLEXT
|
echo "Dumping Script API..."
./ScriptCompiler -dumpapi ../Docs/ScriptAPI.dox
if [ $? -ne 0 ]; then exit 1; fi
echo "Converting Doxygen files to Wiki..."
./DocConverter ../Docs ../../wiki Urho3D
if [ "$1" != "-a" ]; then exit 0; fi
echo "Converting Doxygen files to HTML..."
cd .. && doxygen Doxyfile 1>/dev/null
echo "Finish."
|
#!/bin/sh
# Exit on non-zero exit code
set -e
# Motech demo
echo "====================="
echo "Building motech-demo"
echo "====================="
MODULE_DIR=$CONTENT_DIR/modules/motech-demo
# Copy control
mkdir -p $TMP_DIR/motech-demo/DEBIAN
cp $MODULE_DIR/control $TMP_DIR/motech-demo/DEBIAN/control
# Update version
perl -p -i -e "s/\\$\\{version\\}/$MOTECH_VERSION/g" $TMP_DIR/motech-demo/DEBIAN/control
# Copy copyright
mkdir -p $TMP_DIR/motech-demo/usr/share/doc/motech-demo
cp $CONTENT_DIR/motech/usr/share/doc/motech/copyright $TMP_DIR/motech-demo/usr/share/doc/motech-demo/copyright
# Copy changelog
cp $MODULE_DIR/changelog* $TMP_DIR/motech-demo/usr/share/doc/motech-demo
gzip --best $TMP_DIR/motech-demo/usr/share/doc/motech-demo/changelog*
# Copy bundle
mkdir -p $TMP_DIR/motech-demo/$BUNDLE_DIR
cp $ARTIFACT_DIR/motech-demo-bundle*.jar $TMP_DIR/motech-demo/$BUNDLE_DIR
# Copy scripts
cp $MODULE_DIR/../common/post* $TMP_DIR/motech-demo/DEBIAN
# Permissions
find $TMP_DIR/motech-demo -type d | xargs chmod 755 # directories
find $TMP_DIR/motech-demo -type f | xargs chmod 644 # files
chmod 755 $TMP_DIR/motech-demo/DEBIAN/*
# Build
echo "Building package"
PACKAGE_NAME=motech-demo_$MOTECH_VERSION.deb
fakeroot dpkg-deb --build motech-demo
mv motech-demo.deb $BUILD_DIR/$PACKAGE_NAME
# Check for problems
echo "Checking package with lintian"
lintian -i $BUILD_DIR/$PACKAGE_NAME
# Clean up
rm -r $TMP_DIR/motech-demo
echo "Done. Finished building $PACKAGE_NAME"
|
# bash/zsh git prompt support
#
# Copyright (C) 2006,2007 Shawn O. Pearce <[email protected]>
# Distributed under the GNU General Public License, version 2.0.
#
# This script allows you to see repository status in your prompt.
#
# To enable:
#
# 1) Copy this file to somewhere (e.g. ~/.git-prompt.sh).
# 2) Add the following line to your .bashrc/.zshrc:
# source ~/.git-prompt.sh
# 3a) Change your PS1 to call __git_ps1 as
# command-substitution:
# Bash: PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ '
# ZSH: setopt PROMPT_SUBST ; PS1='[%n@%m %c$(__git_ps1 " (%s)")]\$ '
# the optional argument will be used as format string.
# 3b) Alternatively, for a slightly faster prompt, __git_ps1 can
# be used for PROMPT_COMMAND in Bash or for precmd() in Zsh
# with two parameters, <pre> and <post>, which are strings
# you would put in $PS1 before and after the status string
# generated by the git-prompt machinery. e.g.
# Bash: PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
# will show username, at-sign, host, colon, cwd, then
# various status string, followed by dollar and SP, as
# your prompt.
# ZSH: precmd () { __git_ps1 "%n" ":%~$ " "|%s" }
# will show username, pipe, then various status string,
# followed by colon, cwd, dollar and SP, as your prompt.
# Optionally, you can supply a third argument with a printf
# format string to finetune the output of the branch status
#
# The repository status will be displayed only if you are currently in a
# git repository. The %s token is the placeholder for the shown status.
#
# The prompt status always includes the current branch name.
#
# In addition, if you set GIT_PS1_SHOWDIRTYSTATE to a nonempty value,
# unstaged (*) and staged (+) changes will be shown next to the branch
# name. You can configure this per-repository with the
# bash.showDirtyState variable, which defaults to true once
# GIT_PS1_SHOWDIRTYSTATE is enabled.
#
# You can also see if currently something is stashed, by setting
# GIT_PS1_SHOWSTASHSTATE to a nonempty value. If something is stashed,
# then a '$' will be shown next to the branch name.
#
# If you would like to see if there're untracked files, then you can set
# GIT_PS1_SHOWUNTRACKEDFILES to a nonempty value. If there're untracked
# files, then a '%' will be shown next to the branch name. You can
# configure this per-repository with the bash.showUntrackedFiles
# variable, which defaults to true once GIT_PS1_SHOWUNTRACKEDFILES is
# enabled.
#
# If you would like to see the difference between HEAD and its upstream,
# set GIT_PS1_SHOWUPSTREAM="auto". A "<" indicates you are behind, ">"
# indicates you are ahead, "<>" indicates you have diverged and "="
# indicates that there is no difference. You can further control
# behaviour by setting GIT_PS1_SHOWUPSTREAM to a space-separated list
# of values:
#
# verbose show number of commits ahead/behind (+/-) upstream
# name if verbose, then also show the upstream abbrev name
# legacy don't use the '--count' option available in recent
# versions of git-rev-list
# git always compare HEAD to @{upstream}
# svn always compare HEAD to your SVN upstream
#
# You can change the separator between the branch name and the above
# state symbols by setting GIT_PS1_STATESEPARATOR. The default separator
# is SP.
#
# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
# find one, or @{upstream} otherwise. Once you have set
# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
# setting the bash.showUpstream config variable.
#
# If you would like to see more information about the identity of
# commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
# to one of these values:
#
# contains relative to newer annotated tag (v1.6.3.2~35)
# branch relative to newer tag or branch (master~4)
# describe relative to older annotated tag (v1.6.3.1-13-gdd42c2f)
# tag relative to any older tag (v1.6.3.1-13-gdd42c2f)
# default exactly matching tag
#
# If you would like a colored hint about the current dirty state, set
# GIT_PS1_SHOWCOLORHINTS to a nonempty value. The colors are based on
# the colored output of "git status -sb" and are available only when
# using __git_ps1 for PROMPT_COMMAND or precmd.
#
# If you would like __git_ps1 to do nothing in the case when the current
# directory is set up to be ignored by git, then set
# GIT_PS1_HIDE_IF_PWD_IGNORED to a nonempty value. Override this on the
# repository level by setting bash.hideIfPwdIgnored to "false".
# check whether printf supports -v
__git_printf_supports_v=
printf -v __git_printf_supports_v -- '%s' yes >/dev/null 2>&1
# stores the divergence from upstream in $p
# used by GIT_PS1_SHOWUPSTREAM
__git_ps1_show_upstream ()
{
local key value
local svn_remote svn_url_pattern count n
local upstream=git legacy="" verbose="" name=""
svn_remote=()
# get some config options from git-config
local output="$(git config -z --get-regexp '^(svn-remote\..*\.url|bash\.showupstream)$' 2>/dev/null | tr '\0\n' '\n ')"
while read -r key value; do
case "$key" in
bash.showupstream)
GIT_PS1_SHOWUPSTREAM="$value"
if [[ -z "${GIT_PS1_SHOWUPSTREAM}" ]]; then
p=""
return
fi
;;
svn-remote.*.url)
svn_remote[$((${#svn_remote[@]} + 1))]="$value"
svn_url_pattern="$svn_url_pattern\\|$value"
upstream=svn+git # default upstream is SVN if available, else git
;;
esac
done <<< "$output"
# parse configuration values
for option in ${GIT_PS1_SHOWUPSTREAM}; do
case "$option" in
git|svn) upstream="$option" ;;
verbose) verbose=1 ;;
legacy) legacy=1 ;;
name) name=1 ;;
esac
done
# Find our upstream
case "$upstream" in
git) upstream="@{upstream}" ;;
svn*)
# get the upstream from the "git-svn-id: ..." in a commit message
# (git-svn uses essentially the same procedure internally)
local -a svn_upstream
svn_upstream=($(git log --first-parent -1 \
--grep="^git-svn-id: \(${svn_url_pattern#??}\)" 2>/dev/null))
if [[ 0 -ne ${#svn_upstream[@]} ]]; then
svn_upstream=${svn_upstream[${#svn_upstream[@]} - 2]}
svn_upstream=${svn_upstream%@*}
local n_stop="${#svn_remote[@]}"
for ((n=1; n <= n_stop; n++)); do
svn_upstream=${svn_upstream#${svn_remote[$n]}}
done
if [[ -z "$svn_upstream" ]]; then
# default branch name for checkouts with no layout:
upstream=${GIT_SVN_ID:-git-svn}
else
upstream=${svn_upstream#/}
fi
elif [[ "svn+git" = "$upstream" ]]; then
upstream="@{upstream}"
fi
;;
esac
# Find how many commits we are ahead/behind our upstream
if [[ -z "$legacy" ]]; then
count="$(git rev-list --count --left-right \
"$upstream"...HEAD 2>/dev/null)"
else
# produce equivalent output to --count for older versions of git
local commits
if commits="$(git rev-list --left-right "$upstream"...HEAD 2>/dev/null)"
then
local commit behind=0 ahead=0
for commit in $commits
do
case "$commit" in
"<"*) ((behind++)) ;;
*) ((ahead++)) ;;
esac
done
count="$behind $ahead"
else
count=""
fi
fi
# calculate the result
if [[ -z "$verbose" ]]; then
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p="=" ;;
"0 "*) # ahead of upstream
p=">" ;;
*" 0") # behind upstream
p="<" ;;
*) # diverged from upstream
p="<>" ;;
esac
else
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p=" u=" ;;
"0 "*) # ahead of upstream
p=" u+${count#0 }" ;;
*" 0") # behind upstream
p=" u-${count% 0}" ;;
*) # diverged from upstream
p=" u+${count#* }-${count% *}" ;;
esac
if [[ -n "$count" && -n "$name" ]]; then
__git_ps1_upstream_name=$(git rev-parse \
--abbrev-ref "$upstream" 2>/dev/null)
if [ $pcmode = yes ] && [ $ps1_expanded = yes ]; then
p="$p \${__git_ps1_upstream_name}"
else
p="$p ${__git_ps1_upstream_name}"
# not needed anymore; keep user's
# environment clean
unset __git_ps1_upstream_name
fi
fi
fi
}
# Helper function that is meant to be called from __git_ps1. It
# injects color codes into the appropriate gitstring variables used
# to build a gitstring.
__git_ps1_colorize_gitstring ()
{
if [[ -n ${ZSH_VERSION-} ]]; then
local c_red='%F{red}'
local c_green='%F{green}'
local c_lblue='%F{blue}'
local c_clear='%f'
else
# Using \[ and \] around colors is necessary to prevent
# issues with command line editing/browsing/completion!
local c_red='\[\e[31m\]'
local c_green='\[\e[32m\]'
local c_lblue='\[\e[1;34m\]'
local c_clear='\[\e[0m\]'
fi
local bad_color=$c_red
local ok_color=$c_green
local flags_color="$c_lblue"
local branch_color=""
if [ $detached = no ]; then
branch_color="$ok_color"
else
branch_color="$bad_color"
fi
c="$branch_color$c"
z="$c_clear$z"
if [ "$w" = "*" ]; then
w="$bad_color$w"
fi
if [ -n "$i" ]; then
i="$ok_color$i"
fi
if [ -n "$s" ]; then
s="$flags_color$s"
fi
if [ -n "$u" ]; then
u="$bad_color$u"
fi
r="$c_clear$r"
}
# Helper function to read the first line of a file into a variable.
# __git_eread requires 2 arguments, the file path and the name of the
# variable, in that order.
__git_eread ()
{
test -r "$1" && IFS=$'\r\n' read "$2" <"$1"
}
# see if a cherry-pick or revert is in progress, if the user has committed a
# conflict resolution with 'git commit' in the middle of a sequence of picks or
# reverts then CHERRY_PICK_HEAD/REVERT_HEAD will not exist so we have to read
# the todo file.
__git_sequencer_status ()
{
local todo
if test -f "$g/CHERRY_PICK_HEAD"
then
r="|CHERRY-PICKING"
return 0;
elif test -f "$g/REVERT_HEAD"
then
r="|REVERTING"
return 0;
elif __git_eread "$g/sequencer/todo" todo
then
case "$todo" in
p[\ \ ]|pick[\ \ ]*)
r="|CHERRY-PICKING"
return 0
;;
revert[\ \ ]*)
r="|REVERTING"
return 0
;;
esac
fi
return 1
}
# __git_ps1 accepts 0 or 1 arguments (i.e., format string)
# when called from PS1 using command substitution
# in this mode it prints text to add to bash PS1 prompt (includes branch name)
#
# __git_ps1 requires 2 or 3 arguments when called from PROMPT_COMMAND (pc)
# in that case it _sets_ PS1. The arguments are parts of a PS1 string.
# when two arguments are given, the first is prepended and the second appended
# to the state string when assigned to PS1.
# The optional third parameter will be used as printf format string to further
# customize the output of the git-status string.
# In this mode you can request colored hints using GIT_PS1_SHOWCOLORHINTS=true
__git_ps1 ()
{
# preserve exit status
local exit=$?
local pcmode=no
local detached=no
local ps1pc_start='\u@\h:\w '
local ps1pc_end='\$ '
local printf_format=' (%s)'
case "$#" in
2|3) pcmode=yes
ps1pc_start="$1"
ps1pc_end="$2"
printf_format="${3:-$printf_format}"
# set PS1 to a plain prompt so that we can
# simply return early if the prompt should not
# be decorated
PS1="$ps1pc_start$ps1pc_end"
;;
0|1) printf_format="${1:-$printf_format}"
;;
*) return $exit
;;
esac
# ps1_expanded: This variable is set to 'yes' if the shell
# subjects the value of PS1 to parameter expansion:
#
# * bash does unless the promptvars option is disabled
# * zsh does not unless the PROMPT_SUBST option is set
# * POSIX shells always do
#
# If the shell would expand the contents of PS1 when drawing
# the prompt, a raw ref name must not be included in PS1.
# This protects the user from arbitrary code execution via
# specially crafted ref names. For example, a ref named
# 'refs/heads/$(IFS=_;cmd=sudo_rm_-rf_/;$cmd)' might cause the
# shell to execute 'sudo rm -rf /' when the prompt is drawn.
#
# Instead, the ref name should be placed in a separate global
# variable (in the __git_ps1_* namespace to avoid colliding
# with the user's environment) and that variable should be
# referenced from PS1. For example:
#
# __git_ps1_foo=$(do_something_to_get_ref_name)
# PS1="...stuff...\${__git_ps1_foo}...stuff..."
#
# If the shell does not expand the contents of PS1, the raw
# ref name must be included in PS1.
#
# The value of this variable is only relevant when in pcmode.
#
# Assume that the shell follows the POSIX specification and
# expands PS1 unless determined otherwise. (This is more
# likely to be correct if the user has a non-bash, non-zsh
# shell and safer than the alternative if the assumption is
# incorrect.)
#
local ps1_expanded=yes
[ -z "${ZSH_VERSION-}" ] || [[ -o PROMPT_SUBST ]] || ps1_expanded=no
[ -z "${BASH_VERSION-}" ] || shopt -q promptvars || ps1_expanded=no
local repo_info rev_parse_exit_code
repo_info="$(git rev-parse --git-dir --is-inside-git-dir \
--is-bare-repository --is-inside-work-tree \
--short HEAD 2>/dev/null)"
rev_parse_exit_code="$?"
if [ -z "$repo_info" ]; then
return $exit
fi
local short_sha=""
if [ "$rev_parse_exit_code" = "0" ]; then
short_sha="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
fi
local inside_worktree="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local bare_repo="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local inside_gitdir="${repo_info##*$'\n'}"
local g="${repo_info%$'\n'*}"
if [ "true" = "$inside_worktree" ] &&
[ -n "${GIT_PS1_HIDE_IF_PWD_IGNORED-}" ] &&
[ "$(git config --bool bash.hideIfPwdIgnored)" != "false" ] &&
git check-ignore -q .
then
return $exit
fi
local r=""
local b=""
local step=""
local total=""
if [ -d "$g/rebase-merge" ]; then
__git_eread "$g/rebase-merge/head-name" b
__git_eread "$g/rebase-merge/msgnum" step
__git_eread "$g/rebase-merge/end" total
r="|REBASE"
else
if [ -d "$g/rebase-apply" ]; then
__git_eread "$g/rebase-apply/next" step
__git_eread "$g/rebase-apply/last" total
if [ -f "$g/rebase-apply/rebasing" ]; then
__git_eread "$g/rebase-apply/head-name" b
r="|REBASE"
elif [ -f "$g/rebase-apply/applying" ]; then
r="|AM"
else
r="|AM/REBASE"
fi
elif [ -f "$g/MERGE_HEAD" ]; then
r="|MERGING"
elif __git_sequencer_status; then
:
elif [ -f "$g/BISECT_LOG" ]; then
r="|BISECTING"
fi
if [ -n "$b" ]; then
:
elif [ -h "$g/HEAD" ]; then
# symlink symbolic ref
b="$(git symbolic-ref HEAD 2>/dev/null)"
else
local head=""
if ! __git_eread "$g/HEAD" head; then
return $exit
fi
# is it a symbolic ref?
b="${head#ref: }"
if [ "$head" = "$b" ]; then
detached=yes
b="$(
case "${GIT_PS1_DESCRIBE_STYLE-}" in
(contains)
git describe --contains HEAD ;;
(branch)
git describe --contains --all HEAD ;;
(tag)
git describe --tags HEAD ;;
(describe)
git describe HEAD ;;
(* | default)
git describe --tags --exact-match HEAD ;;
esac 2>/dev/null)" ||
b="$short_sha..."
b="($b)"
fi
fi
fi
if [ -n "$step" ] && [ -n "$total" ]; then
r="$r $step/$total"
fi
local w=""
local i=""
local s=""
local u=""
local c=""
local p=""
if [ "true" = "$inside_gitdir" ]; then
if [ "true" = "$bare_repo" ]; then
c="BARE:"
else
b="GIT_DIR!"
fi
elif [ "true" = "$inside_worktree" ]; then
if [ -n "${GIT_PS1_SHOWDIRTYSTATE-}" ] &&
[ "$(git config --bool bash.showDirtyState)" != "false" ]
then
git diff --no-ext-diff --quiet || w="*"
git diff --no-ext-diff --cached --quiet || i="+"
if [ -z "$short_sha" ] && [ -z "$i" ]; then
i="#"
fi
fi
if [ -n "${GIT_PS1_SHOWSTASHSTATE-}" ] &&
git rev-parse --verify --quiet refs/stash >/dev/null
then
s="$"
fi
if [ -n "${GIT_PS1_SHOWUNTRACKEDFILES-}" ] &&
[ "$(git config --bool bash.showUntrackedFiles)" != "false" ] &&
git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*' >/dev/null 2>/dev/null
then
u="%${ZSH_VERSION+%}"
fi
if [ -n "${GIT_PS1_SHOWUPSTREAM-}" ]; then
__git_ps1_show_upstream
fi
fi
local z="${GIT_PS1_STATESEPARATOR-" "}"
# NO color option unless in PROMPT_COMMAND mode
if [ $pcmode = yes ] && [ -n "${GIT_PS1_SHOWCOLORHINTS-}" ]; then
__git_ps1_colorize_gitstring
fi
b=${b##refs/heads/}
if [ $pcmode = yes ] && [ $ps1_expanded = yes ]; then
__git_ps1_branch_name=$b
b="\${__git_ps1_branch_name}"
fi
local f="$w$i$s$u"
local gitstring="$c$b${f:+$z$f}$r$p"
if [ $pcmode = yes ]; then
if [ "${__git_printf_supports_v-}" != yes ]; then
gitstring=$(printf -- "$printf_format" "$gitstring")
else
printf -v gitstring -- "$printf_format" "$gitstring"
fi
PS1="$ps1pc_start$gitstring$ps1pc_end"
else
printf -- "$printf_format" "$gitstring"
fi
return $exit
}
|
#!/bin/bash
#Setup the environnement
mkdir wwdc15
cd wwdc15
mkdir tmp_download
cd tmp_download
#Extract IDs
wget https://developer.apple.com/videos/wwdc/2015/
cat index.html | grep "\t<a href=\"?id=" | sed -e 's/<a href=//' -e 's#/a>##' -e 's/>//' -e 's/</"/' > ../download_data
#Download dedicated webpages
rm index.html
cat ../download_data | awk '{ split($0, a, "\""); system( "wget -c \"https://developer.apple.com/videos/wwdc/2015/"a[2]"\" -O \"" a[3] "\"")}'
#Final download
for file in *
do
var=$(cat "$file" | grep "_hd_" | awk '{ split($0, a, "dl=1"); split(a[2], b, "href=\""); $1=b[2]; print substr($1, 0, length($1)-1)}')
wget -c "$var" -O "../$file.mp4"
done
#cleanup
cd ..
rm -rf tmp_download
rm download_data |
#!/bin/sh
# Common Openshift EAP6 scripts
if [ "${SCRIPT_DEBUG}" = "true" ] ; then
set -x
echo "Script debugging is enabled, allowing bash commands and their arguments to be printed as they are executed"
fi
CONFIG_FILE=$JBOSS_HOME/standalone/configuration/standalone-openshift.xml
LOGGING_FILE=$JBOSS_HOME/standalone/configuration/logging.properties
CONFIGURE_SCRIPTS=(
$JBOSS_HOME/bin/launch/backward-compatibility.sh
$JBOSS_HOME/bin/launch/configure_extensions.sh
$JBOSS_HOME/bin/launch/passwd.sh
$JBOSS_HOME/bin/launch/messaging.sh
$JBOSS_HOME/bin/launch/datasource.sh
$JBOSS_HOME/bin/launch/resource-adapter.sh
$JBOSS_HOME/bin/launch/admin.sh
$JBOSS_HOME/bin/launch/ha.sh
$JBOSS_HOME/bin/launch/jgroups.sh
$JBOSS_HOME/bin/launch/https.sh
$JBOSS_HOME/bin/launch/json_logging.sh
$JBOSS_HOME/bin/launch/security-domains.sh
$JBOSS_HOME/bin/launch/jboss_modules_system_pkgs.sh
$JBOSS_HOME/bin/launch/keycloak.sh
$JBOSS_HOME/bin/launch/deploymentScanner.sh
$JBOSS_HOME/bin/launch/ports.sh
$JBOSS_HOME/bin/launch/access_log_valve.sh
$JBOSS_HOME/bin/launch/standalone.sh
/opt/run-java/proxy-options
)
|
#!/usr/bin/env bash
os=`uname`
dist="ubuntu"
arch=`uname -m`
if test -f "/usr/lib/os-release"; then
dist=$(grep ID= /etc/os-release |head -1 |cut -c4-| sed 's/\"//g')
fi
if test -f "/usr/lib/fedora-release"; then
dist="fedora"
fi
if test -f "/usr/lib/centos-release"; then
dist="centos"
fi
echo dist is "$dist"
if [ -z "$DISTR" ] ; then
DISTR=$dist
fi
echo DISTR is "$DISTR"
case "$os" in
Darwin)
IONEAPI_ROOT=~/apps/oneapi
;;
Linux)
IONEAPI_ROOT=/opt/intel/oneapi
;;
esac
if [[ "$os" == "Darwin" ]]; then
# HOMEBREW_NO_AUTO_UPDATE=1 brew cask uninstall oclint || true
# HOMEBREW_NO_INSTALL_CLEANUP=1 HOMEBREW_NO_AUTO_UPDATE=1 brew install gcc "$MPI_IMPL" openblas python3 ||true
HOMEBREW_NO_INSTALL_CLEANUP=1 HOMEBREW_NO_AUTO_UPDATE=1 brew install gcc "$MPI_IMPL" python3 gsed grep automake autoconf ||true
if [[ "$FC" == "ifort" ]] || [[ "$FC" == "ifx" ]] ; then
if [[ -f ~/apps/oneapi/setvars.sh ]]; then
echo ' using intel cache installation '
else
mkdir -p ~/mntdmg ~/apps/oneapi || true
cd ~/Downloads
dir_base="18342"
dir_hpc="18341"
base="m_BaseKit_p_2022.1.0.92_offline"
hpc="m_HPCKit_p_2022.1.0.86_offline"
curl -LJO https://registrationcenter-download.intel.com/akdlm/irc_nas/"$dir_base"/"$base".dmg
curl -LJO https://registrationcenter-download.intel.com/akdlm/irc_nas/"$dir_hpc"/"$hpc".dmg
echo "installing BaseKit"
hdiutil attach "$base".dmg -mountpoint ~/mntdmg -nobrowse
sudo ~/mntdmg/bootstrapper.app/Contents/MacOS/install.sh --cli --eula accept \
--action install --components default --install-dir ~/apps/oneapi
hdiutil detach ~/mntdmg
#
echo "installing HPCKit"
hdiutil attach "$hpc".dmg -mountpoint ~/mntdmg -nobrowse
sudo ~/mntdmg/bootstrapper.app/Contents/MacOS/install.sh --cli --eula accept \
--action install --components default --install-dir ~/apps/oneapi
hdiutil detach ~/mntdmg
ls -lrta ~/apps/oneapi ||true
sudo rm -rf "$IONEAPI_ROOT"/intelpython "$IONEAPI_ROOT"/dal "$IONEAPI_ROOT"/advisor \
"$IONEAPI_ROOT"/ipp "$IONEAPI_ROOT"/conda_channel "$IONEAPI_ROOT"/dnnl \
"$IONEAPI_ROOT"/installer "$IONEAPI_ROOT"/vtune_profiler "$IONEAPI_ROOT"/tbb || true
fi
source "$IONEAPI_ROOT"/setvars.sh || true
export I_MPI_F90="$FC"
ls -lrta ~/apps/oneapi ||true
df -h
rm -f *dmg || true
df -h
"$FC" -V
icc -V
else
#hack to fix Github actions mpif90
gccver=`brew list --versions gcc| head -1 |cut -c 5-`
echo brew gccver is $gccver
ln -sf /usr/local/Cellar/gcc/$gccver/bin/gfortran-* /usr/local/Cellar/gcc/$gccver/bin/gfortran || true
ln -sf /usr/local/Cellar/gcc/$gccver/bin/gfortran-* /usr/local/bin/gfortran || true
# ln -sf /usr/local/bin/$FC /usr/local/bin/gfortran
$FC --version
gfortran --version
fi
#hack to get 3.10 as default
brew install [email protected]
brew link --force --overwrite [email protected]
# if [[ "$MPI_IMPL" == "openmpi" ]]; then
# HOMEBREW_NO_INSTALL_CLEANUP=1 HOMEBREW_NO_AUTO_UPDATE=1 brew install scalapack
# fi
fi
if [[ "$os" == "Linux" ]]; then
if [[ "$DISTR" == "fedora" ]] || [[ "$DISTR" == "centos" ]] ; then
rpminst=dnf
if [[ "$DISTR" == "centos" ]] ; then
rpminst=yum
fi
sudo $rpminst udate; sudo $rpminst -y install perl perl python3-devel time patch openblas-serial64 openmpi-devel cmake gcc-gfortran unzip which make tar bzip2 openssh-clients rsync
# module load mpi
if [[ "$MPI_IMPL" == "openmpi" ]]; then
sudo $rpminst -y install openmpi-devel
else
echo ready only for openmpi
exit 1
fi
export PATH=/usr/lib64/"$MPI_IMPL"/bin:$PATH
export LD_LIBRARY_PATH=/usr/lib64/"$MPI_IMPL"/lib:$LD_LIBRARY_PATH
which mpif90
mpif90 -show
else
if [[ "$MPI_IMPL" == "openmpi" ]]; then
mpi_bin="openmpi-bin" ; mpi_libdev="libopenmpi-dev" scalapack_libdev="libscalapack-openmpi-dev"
fi
if [[ "$MPI_IMPL" == "mpich" ]]; then
mpi_bin="mpich" ; mpi_libdev="libmpich-dev" scalapack_libdev="libscalapack-mpich-dev"
fi
if [[ "$MPI_IMPL" == "intel" || "$FC" == "ifort" || "$FC" == "ifx" ]]; then
export APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1
tries=0 ; until [ "$tries" -ge 10 ] ; do \
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
&& sudo -E apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
&& rm -f GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB || true \
&& echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list \
&& sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main" \
&& sudo apt-get update && break ;\
tries=$((tries+1)) ; echo attempt no. $tries ; sleep 30 ; done
mpi_bin=" " ; mpi_libdev="intel-oneapi-mpi-devel" scalapack_libdev="intel-oneapi-mkl"
fi
sudo apt-get update
sudo apt-get -y install software-properties-common
sudo add-apt-repository universe && sudo apt-get update
# sudo apt-get -y install gfortran python3-dev python-dev cmake "$mpi_libdev" "$mpi_bin" "$scalapack_libdev" make perl libopenblas-dev python3 rsync
sudo apt-get -y install gfortran python3-dev python-dev cmake "$mpi_libdev" "$mpi_bin" make perl python3 rsync
if [[ "$FC" == "gfortran-11" ]] || [[ "$CC" == "gcc-11" ]]; then
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
sudo apt-get -y install gcc-11 gfortran-11 g++-11
fi
if [[ "$FC" == "ifort" ]] || [[ "$FC" == "ifx" ]]; then
sudo apt-get -y install intel-oneapi-ifort intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic intel-oneapi-mkl
if [[ "$?" != 0 ]]; then
echo "apt-get install failed: exit code " "${?}"
exit 1
fi
sudo apt-get -y install intel-oneapi-mpi-devel
fi
if [[ "$FC" == "flang" ]]; then
if [[ "USE_AOMP" == "Y" ]]; then
aomp_major=13
aomp_minor=0-6
wget https://github.com/ROCm-Developer-Tools/aomp/releases/download/rel_"$aomp_major"."$aomp_minor"/aomp_Ubuntu2004_"$aomp_major"."$aomp_minor"_amd64.deb
sudo dpkg -i aomp_Ubuntu2004_"$aomp_major"."$aomp_minor"_amd64.deb
export PATH=/usr/lib/aomp_"$aomp_major"."$aomp_minor"/bin/:$PATH
export LD_LIBRARY_PATH=/usr/lib/aomp_"$aomp_major"."$aomp_minor"/lib:$LD_LIBRARY_PATH
ls -lrt /usr/lib | grep aomp ||true
else
aocc_version=3.2.0
aocc_dir=aocc-compiler-${aocc_version}
curl -LJO https://developer.amd.com/wordpress/media/files/${aocc_dir}.tar
tar xf ${aocc_dir}.tar
./${aocc_dir}/install.sh
source setenv_AOCC.sh
pwd
fi
flang -v
which flang
fi
if [[ "$FC" == "amdflang" ]]; then
sudo apt-get install -y wget gnupg2 coreutils dialog tzdata
rocm_version=4.5.2
wget -q -O - https://repo.radeon.com/rocm/rocm.gpg.key | sudo apt-key add -
echo 'deb [arch=amd64] https://repo.radeon.com/rocm/apt/'$rocm_version'/ ubuntu main' | sudo tee /etc/apt/sources.list.d/rocm.list
sudo apt-get update -y && sudo apt-get -y install rocm-llvm openmp-extras
export PATH=/opt/rocm-"$rocm_version"/bin:$PATH
export LD_LIBRARY_PATH=/opt/rocm-"$rocm_version"/lib:/opt/rocm-"$rocm_version"/llvm/lib:$LD_LIBRARY_PATH
amdflang -v
amdclang -v
fi
if [[ "$FC" == "nvfortran" ]]; then
sudo apt-get -y install lmod g++ libtinfo5 libncursesw5 lua-posix lua-filesystem lua-lpeg lua-luaossl
nv_major=22
nv_minor=1
nverdot="$nv_major"."$nv_minor"
nverdash="$nv_major"-"$nv_minor"
arch_dpkg=`dpkg --print-architecture`
nv_p1=nvhpc-"$nverdash"_"$nverdot"_"$arch_dpkg".deb
nv_p2=nvhpc-20"$nv_major"_"$nverdot"_"$arch_dpkg".deb
wget https://developer.download.nvidia.com/hpc-sdk/"$nverdot"/"$nv_p1"
wget https://developer.download.nvidia.com/hpc-sdk/"$nverdot"/"$nv_p2"
sudo dpkg -i "$nv_p1" "$nv_p2"
export PATH=/opt/nvidia/hpc_sdk/Linux_"$arch"/"$nverdot"/compilers/bin:$PATH
export LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_"$arch"/"$nverdot"/compilers/lib:$LD_LIBRARY_PATH
sudo /opt/nvidia/hpc_sdk/Linux_"$arch"/"$nverdot"/compilers/bin/makelocalrc -x
# source /etc/profile.d/lmod.sh
# module use /opt/nvidia/hpc_sdk/modulefiles
# module load nvhpc
export FC=nvfortran
# if [ -z "$BUILD_MPICH" ] ; then
##use bundled openmpi
# export PATH=/opt/nvidia/hpc_sdk/Linux_"$arch"/"$nverdot"/comm_libs/mpi/bin:$PATH
# export LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_"$arch"/"$nverdot"/comm_libs/mpi/lib:$LD_LIBRARY_PATH
# fi
export CC=gcc
env | grep FC || true
nvfortran -v
nvfortran
which nvfortran
fi
fi
fi
|
export MOVIE_API_KEY='f80d5dfbac424cbb9ebffcc23c378ffd'
export SECRET_KEY='e0f24eb2648140bcb944bf65069249ea'
python3 manage.py server |
#!/usr/bin/env bash
kafka-console-consumer.sh --zookeeper localhost:2181 --topic test-topic-name --from-beginning
|
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0+
#
# Periodically scan a directory tree to prevent files from being reaped
# by systemd and friends on long runs.
#
# Usage: kvm-remote-noreap.sh pathname
#
# Copyright (C) 2021 Facebook, Inc.
#
# Authors: Paul E. McKenney <[email protected]>
pathname="$1"
if test "$pathname" = ""
then
echo Usage: kvm-remote-noreap.sh pathname
exit 1
fi
if ! test -d "$pathname"
then
echo Usage: kvm-remote-noreap.sh pathname
echo " pathname must be a directory."
exit 2
fi
while test -d "$pathname"
do
find "$pathname" -type f -exec touch -c {} \; > /dev/null 2>&1
sleep 30
done
|
#!/bin/sh
# First, find suitable JDK
version=$("java" -version 2>&1 | awk -F '"' '/version/ {print $2}')
jdk_home="no"
if [[ "$version" > "1.8" ]]; then
jdk_home=${JAVA_HOME}
echo "default JDK version is OK, JDK home is $jdk_home"
else
jdk_path=/opt
echo "begin to find suitable JDK...."
for path in `find $jdk_path -name jmap`
do
_java=${path%/*}/java
version=$("$_java" -version 2>&1 | awk -F '"' '{print $2}')
if [[ "$version" > "1.8" ]]; then
jdk_home=${_java%/bin*}
echo "find out suitable JDK, JDK home is $jdk_home"
break
fi
done
fi
if [ "$jdk_home" == "no" ] ;then
echo "no suitable JDK was found, which is required jdk1.8, exit"
exit 0
fi
JAVA_HOME=$jdk_home
CLASSPATH=.:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
echo "-------------------------java info-------------------------"
echo $(java -version)
echo "-------------------------pwd-------------------------"
echo $(pwd)
# Second, should I watch?
working_directory=$(pwd)
proc_watcher="no"
if [ "$1" == "--no-watch" ]; then
proc_watcher="no"
shift
fi
# Third, choose profile
sia_task_scheduler_config="sia-task-scheduler.yml"
echo "using workspace $working_directory"
echo "proc_watch: $proc_watcher"
javaOpts="-server -Xms128m -Xmx256m -Xss256k -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:+CMSIncrementalPacing -XX:CMSIncrementalDutyCycleMin=0 -XX:CMSIncrementalDutyCycle=10 -XX:+UseParNewGC -XX:+UseCMSCompactAtFullCollection -XX:-CMSParallelRemarkEnabled -XX:CMSFullGCsBeforeCompaction=0 -XX:CMSInitiatingOccupancyFraction=70 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=."
java $javaOpts -XX:OnOutOfMemoryError='kill -9 %p' -Dspring.config.location=../config/$sia_task_scheduler_config -jar $working_directory/$2 &
# Fourth, add crontab process watcher
if [ "$proc_watcher" == "yes" ]; then
sleep 1
# add crontab
cronfile=$(pwd)/$1".cron.run"
crontab -l | grep -v "$1" 1>$cronfile 2>/dev/null
echo "*/1 * * * * sh $working_directory/task_proc_watcher.sh \"$1\" \"$working_directory\" \"sh run4scheduler.sh --no-watch $1 $2 \" >/dev/null 2>&1" >> $cronfile
crontab $cronfile
rm $cronfile
exit 0
fi
|
#!/bin/bash
echo "Bygger flex-bucket-uploader for docker compose utvikling"
./gradlew shadowJar
docker build -t flex-bucket-uploader:latest .
|
#!/bin/bash
RETCODE=$(fw_exists pypy)
[ ! "$RETCODE" == 0 ] || { return 0; }
fw_get https://bitbucket.org/pypy/pypy/downloads/pypy-2.3.1-linux64.tar.bz2 -O pypy-2.3.1-linux64.tar.bz2
fw_untar pypy-2.3.1-linux64.tar.bz2
ln -sf pypy-2.3.1-linux64 pypy
if [ ! -f "get-pip.py" ]; then
fw_get https://bootstrap.pypa.io/get-pip.py -O get-pip.py
fi
./pypy/bin/pypy get-pip.py
./pypy/bin/pip install virtualenv
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-ST/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-ST/1024+0+512-SS-N-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_sentences_remove_all_but_nouns_first_two_thirds_sixth --eval_function penultimate_sixth_eval |
checkRequirements() {
if ! which git > /dev/null; then
echo "git is required to install BATS"
exit 1
fi
if [[ $EUID == 0 ]]; then export SUDO=""; else export SUDO="sudo"; fi
}
installBats() {
cd /tmp
git clone https://github.com/bats-core/bats-core.git
cd /tmp/bats-core
$SUDO ./install.sh /usr/local
}
checkInstall() {
if ! which bats > /dev/null; then
echo "BATS has failed to install."
exit 1
fi
echo
echo "BATS-Core installed"
echo
}
runInstallBats() {
checkRequirements
installBats
checkInstall
}
runInstallBats |
export PATH=/dellfsqd3/MGI_QINGDAO/USER/lishuangshuang/software/miniconda3/bin:$PATH
export PATH=/dellfsqd3/MGI_QINGDAO/USER/lishuangshuang/software/miniconda3/envs/scRNA_snakemake/bin:$PATH
export LD_LIBRARY_PATH=/dellfsqd3/MGI_QINGDAO/USER/lishuangshuang/software/miniconda3/envs/scRNA_snakemake/lib:$LD_LIBRARY_PATH
|
# Define equivalent of TOML config in environment
# [build]
# jobs = $XBPS_MAKEJOBS
export CARGO_BUILD_JOBS="$XBPS_MAKEJOBS"
if [ "$CROSS_BUILD" ]; then
# Define equivalent of TOML config in environment
# [target.${RUST_TARGET}]
# linker = ${CC}
_XBPS_CROSS_RUST_TARGET_ENV="${XBPS_CROSS_RUST_TARGET^^}"
_XBPS_CROSS_RUST_TARGET_ENV="${_XBPS_CROSS_RUST_TARGET_ENV//-/_}"
export CARGO_TARGET_${_XBPS_CROSS_RUST_TARGET_ENV}_LINKER="$CC"
unset _XBPS_CROSS_RUST_TARGET_ENV
# Define equivalent of TOML config in environment
# [build]
# target = ${RUST_TARGET}
export CARGO_BUILD_TARGET="$RUST_TARGET"
else
unset CARGO_BUILD_TARGET
fi
# For cross-compiling rust -sys crates
export PKG_CONFIG_ALLOW_CROSS=1
# libgit2-sys
export LIBGIT2_SYS_USE_PKG_CONFIG=1
# gettext-rs
export GETTEXT_BIN_DIR=/usr/bin
export GETTEXT_LIB_DIR="${XBPS_CROSS_BASE}/usr/lib/gettext"
export GETTEXT_INCLUDE_DIR="${XBPS_CROSS_BASE}/usr/include"
# libssh2-sys
export LIBSSH2_SYS_USE_PKG_CONFIG=1
# sodium-sys
export SODIUM_LIB_DIR="${XBPS_CROSS_BASE}/usr/include"
export SODIUM_INC_DIR="${XBPS_CROSS_BASE}/usr/lib"
|
#!/bin/sh
# Copyright (c) Citrix Systems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
set -eu
grep -q 'AutoScaleMode\.\(Font\|None\)' "$1" && echo "$1 has incorrect AutoScaleMode!"
|
#!/bin/bash -x
# Copyright 2020, SURFsara.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cowsay "Deploy static website"
mo website-ingress.yaml_template > website-ingress.yaml
kubectl apply -f nginx-pvc.yaml
kubectl apply -f nginx-cm.yaml
kubectl apply -f nginx-dep.yaml
kubectl apply -f nginx-svc.yaml
export POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath='{ .items[0].metadata.name}')
echo "Waiting for nginx to become ready."
kubectl wait --for=condition=ready pod/$POD_NAME
for f in static/*
do
echo $f
kubectl cp $f $POD_NAME:/usr/share/nginx/html/
done
kubectl apply -f website-ingress.yaml
|
#!/bin/bash
set -eu
PROJECT_ROOT="$(realpath "$(dirname "$0")/../")"
cd "${PROJECT_ROOT}"
if [ ! -e /.dockerenv ]; then
source ./.env
fi
source ./configsets.sh
echo "Uploading configsets to Apache Zookeeper"
# shellcheck disable=SC2153
for CONFIGSET in "${CONFIGSETS[@]}";
do
echo "> Uploading ${CONFIGSET}"
bash "${SOLR_INSTALLATION_PATH}/server/scripts/cloud-scripts/zkcli.sh" \
-z "${ZOOKEEPER_CONNECTION_STRING}" \
-cmd upconfig \
-confdir "${PROJECT_ROOT}/configsets/${CONFIGSET}/conf" \
-confname "${CONFIGSET}" > /dev/null 2>&1
done
|
#!/usr/bin/env bash
#
# this is a barebones test (if the site starts with the default settings)
#
# 1. lint the source
# 2. start site with default settings (e.g. no config)
# 3. sleep for two seconds to let it spin up
# 4. kill the child process
#
set -e
set -u
if [[ ! -f cosi-site.js ]]; then
if [[ -f ../cosi-site.js ]]; then
cd ..
else
echo "Unable to find cosi-site.js in . or .."
fi
fi
[[ -x test/test_lint.sh ]] && test/test_lint.sh
node_args="--trace-deprecation --trace-sync-io --throw-deprecation"
node $node_args cosi-site.js --log_dir="test" &
cs_pid=$!
sleep 2
kill $cs_pid
# remove the log if we've reached here without incident
# (considered to be a successful test, don't need to
# keep appending to the test log)
set +e
[[ -f test/cosi-site.log ]] && rm test/cosi-site.log
#
# the basic source is 'commit'able
#
## END
|
#! /bin/bash
# SPDX-License-Identifier: Apache-2.0
##
# Copyright (C) 2020 Jihoon Lee <[email protected]>
#
# @file prepare_openblas.sh
# @date 08 December 2020
# @brief This file is a helper tool to build android
# @author Jihoon lee <[email protected]>
#
# usage: ./prepare_openblas.sh target
set -e
TARGET=$1
TAR_PREFIX=openblas
TAR_NAME=${TAR_PREFIX}-0.2.20.tar.gz
URL="https://github.com/nnstreamer/nnstreamer-android-resource/raw/master/external/${TAR_NAME}"
echo "PREPARING OPENBLAS at ${TARGET}"
[ ! -d ${TARGET} ] && mkdir -p ${TARGET}
pushd ${TARGET}
function _download_cblas {
[ -f $TAR_NAME ] && echo "${TAR_NAME} exists, skip downloading" && return 0
echo "[OPENBLAS] downloading ${TAR_NAME}\n"
if ! wget -q ${URL} ; then
echo "[OPENBLAS] Download failed, please check url\n"
exit $?
fi
echo "[OPENBLAS] Finish downloading openblas\n"
}
function _untar_cblas {
echo "[OPENBLAS] untar openblas\n"
tar -zxvf ${TAR_NAME} -C ${TARGET}
rm -f ${TAR_NAME}
}
[ ! -d "${TAR_PREFIX}" ] && _download_cblas && _untar_cblas
popd
|
#! /bin/bash
#### Variables ####
NAMESPACE="default"
OUTFILE="k8sviz.out"
TYPE="dot"
KUBECONFIG=~/.kube/config
SHFLAGS_DIR="$(dirname ${BASH_SOURCE})/lib/"
SHFLAGS_PATH="${SHFLAGS_DIR}shflags"
SHFLAGS_URL="https://raw.githubusercontent.com/kward/shflags/master/shflags"
VERSION_URL="https://raw.githubusercontent.com/mkimuram/k8sviz/master/version.txt"
VERSION=$(curl -L -s ${VERSION_URL})
CONTAINER_IMG=mkimuram/k8sviz:${VERSION}
if [ ! -f ${SHFLAGS_PATH} ];then
echo "${SHFLAGS_PATH} not found. Downloading." >&2
mkdir -p ${SHFLAGS_DIR}
if [ $? -ne 0 ];then
cat << EOF >&2
Failed to create ${SHFLAGS_DIR} directory.
Move this script to the directory where you have write permission.
EOF
exit 1
fi
curl -L -f -o ${SHFLAGS_PATH} ${SHFLAGS_URL}
if [ $? -ne 0 ];then
cat << EOF >&2
Failed to download shflags.
You can manually download it from ${SHFLAGS_URL}
and copy it to ${SHFLAGS_DIR} to fix it.
EOF
exit 1
fi
fi
. ${SHFLAGS_PATH}
DEFINE_string 'namespace' "${NAMESPACE}" 'The namespace to visualize.' 'n'
DEFINE_string 'outfile' "${OUTFILE}" 'The filename to output.' 'o'
DEFINE_string 'type' "${TYPE}" 'The type of output.' 't'
DEFINE_string 'kubeconfig' "${KUBECONFIG}" 'Path to kubeconfig file.' 'k'
DEFINE_string 'image' "${CONTAINER_IMG}" 'Image name of the container.' 'i'
# Parse Options
FLAGS "$@" || exit $?
eval set -- "${FLAGS_ARGV}"
#### Main ####
# Split OUTFILE to the directory and the filename to be used with container
DIR=$(dirname ${FLAGS_outfile})
ABSDIR=$(cd ${DIR}; pwd -P)
FILENAME=$(basename ${FLAGS_outfile})
# Make KUBECONFIG to absolute path
KUBEDIR=$(dirname ${FLAGS_kubeconfig})
ABSKUBEDIR=$(cd ${KUBEDIR}; pwd -P)
KUBEFILE=$(basename ${FLAGS_kubeconfig})
KUBECONFIG="${ABSKUBEDIR}/${KUBEFILE}"
# Check if KUBECONFIG file exists
if [ ! -f "${KUBECONFIG}" ];then
echo "KUBECONFIG file wasn't found in ${KUBECONFIG}." >&2
echo "You need to specify the right path with --kubeconfig option." >&2
exit 1
fi
docker run --network host \
--user $(id -u):$(id -g) \
-v ${ABSDIR}:/work \
-v ${KUBECONFIG}:/config:ro \
-it --rm ${FLAGS_image} \
/k8sviz -kubeconfig /config \
-n ${FLAGS_namespace} -t ${FLAGS_type} -o /work/${FILENAME}
|
#!/bin/bash
set -e
cmd="$@"
# This entrypoint is used to play nicely with the current cookiecutter configuration.
# Since docker-compose relies heavily on environment variables itself for configuration, we'd have to define multiple
# environment variables just to support cookiecutter out of the box. That makes no sense, so this little entrypoint
# does all this for us.
# the official postgres image uses 'postgres' as default user if not set explictly.
if [ -z "$POSTGRES_USER" ]; then
export POSTGRES_USER=postgres
fi
# If not DB is set, then use USER by default
if [ -z "$POSTGRES_DB" ]; then
export POSTGRES_DB=$POSTGRES_USER
fi
# Need to update the DATABASE_URL if using DOCKER
export DATABASE_URL=postgres://$POSTGRES_USER:$POSTGRES_PASSWORD@postgres:5432/$POSTGRES_DB
function postgres_ready(){
python << END
import sys
import psycopg2
try:
conn = psycopg2.connect(dbname="$POSTGRES_DB", user="$POSTGRES_USER", password="$POSTGRES_PASSWORD", host="postgres")
except psycopg2.OperationalError:
sys.exit(-1)
sys.exit(0)
END
}
until postgres_ready; do
>&2 echo "Postgres is unavailable - sleeping"
sleep 1
done
>&2 echo "Postgres is up - continuing..."
exec $cmd |
#!/usr/bin/env bats
load helpers
function teardown() {
cleanup_test
}
# 1. test running with loading the default apparmor profile.
# test that we can run with the default apparmor profile which will not block touching a file in `.`
@test "load default apparmor profile and run a container with it" {
# this test requires apparmor, so skip this test if apparmor is not enabled.
enabled=$(is_apparmor_enabled)
if [[ "$enabled" -eq 0 ]]; then
skip "skip this test since apparmor is not enabled."
fi
start_ocid
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname1": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor1.json
run ocic pod run --name apparmor1 --config "$TESTDIR"/apparmor1.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname1 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
}
# 2. test running with loading a specific apparmor profile as ocid default apparmor profile.
# test that we can run with a specific apparmor profile which will block touching a file in `.` as ocid default apparmor profile.
@test "load a specific apparmor profile as default apparmor and run a container with it" {
# this test requires apparmor, so skip this test if apparmor is not enabled.
enabled=$(is_apparmor_enabled)
if [[ "$enabled" -eq 0 ]]; then
skip "skip this test since apparmor is not enabled."
fi
load_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
start_ocid "" "$APPARMOR_TEST_PROFILE_NAME"
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname2": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor2.json
run ocic pod run --name apparmor2 --config "$TESTDIR"/apparmor2.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "Permission denied" ]]
cleanup_ctrs
cleanup_pods
stop_ocid
remove_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
}
# 3. test running with loading a specific apparmor profile but not as ocid default apparmor profile.
# test that we can run with a specific apparmor profile which will block touching a file in `.`
@test "load default apparmor profile and run a container with another apparmor profile" {
# this test requires apparmor, so skip this test if apparmor is not enabled.
enabled=$(is_apparmor_enabled)
if [[ "$enabled" -eq 0 ]]; then
skip "skip this test since apparmor is not enabled."
fi
load_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
start_ocid
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname3": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor3.json
run ocic pod run --name apparmor3 --config "$TESTDIR"/apparmor3.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname3 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "Permission denied" ]]
cleanup_ctrs
cleanup_pods
stop_ocid
remove_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
}
# 4. test running with wrong apparmor profile name.
# test that we can will fail when running a ctr with rong apparmor profile name.
@test "run a container with wrong apparmor profile name" {
# this test requires apparmor, so skip this test if apparmor is not enabled.
enabled=$(is_apparmor_enabled)
if [[ "$enabled" -eq 0 ]]; then
skip "skip this test since apparmor is not enabled."
fi
start_ocid
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname4": "not-exists"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor4.json
run ocic pod run --name apparmor4 --config "$TESTDIR"/apparmor4.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname4 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "Creating container failed" ]]
cleanup_ctrs
cleanup_pods
stop_ocid
}
# 5. test running with default apparmor profile unloaded.
# test that we can will fail when running a ctr with rong apparmor profile name.
@test "run a container after unloading default apparmor profile" {
# this test requires apparmor, so skip this test if apparmor is not enabled.
enabled=$(is_apparmor_enabled)
if [[ "$enabled" -eq 0 ]]; then
skip "skip this test since apparmor is not enabled."
fi
start_ocid
remove_apparmor_profile "$FAKE_OCID_DEFAULT_PROFILE_PATH"
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname5": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor5.json
run ocic pod run --name apparmor5 --config "$TESTDIR"/apparmor5.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname5 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
}
|
#!/usr/bin/env bash
. ./set-env.sh
docker build \
-t $1 \
-f ${PWD}/../../src/main/docker/DockerFile \
${PWD}/../../build/libs/; |
#!/bin/sh
#
# Copyright (c) 2007 Shawn Pearce
#
test_description='test git fast-import utility'
. ./test-lib.sh
. "$TEST_DIRECTORY"/diff-lib.sh ;# test-lib chdir's into trash
# Print $1 bytes from stdin to stdout.
#
# This could be written as "head -c $1", but IRIX "head" does not
# support the -c option.
head_c () {
perl -e '
my $len = $ARGV[1];
while ($len > 0) {
my $s;
my $nread = sysread(STDIN, $s, $len);
die "cannot read: $!" unless defined($nread);
print $s;
$len -= $nread;
}
' - "$1"
}
verify_packs () {
for p in .git/objects/pack/*.pack
do
git verify-pack "$@" "$p" || return
done
}
file2_data='file2
second line of EOF'
file3_data='EOF
in 3rd file
END'
file4_data=abcd
file4_len=4
file5_data='an inline file.
we should see it later.'
file6_data='#!/bin/sh
echo "$@"'
###
### series A
###
test_expect_success 'empty stream succeeds' '
git fast-import </dev/null
'
test_expect_success 'A: create pack from stdin' '
test_tick &&
cat >input <<-INPUT_END &&
blob
mark :2
data <<EOF
$file2_data
EOF
blob
mark :3
data <<END
$file3_data
END
blob
mark :4
data $file4_len
$file4_data
commit refs/heads/master
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
initial
COMMIT
M 644 :2 file2
M 644 :3 file3
M 755 :4 file4
tag series-A
from :5
data <<EOF
An annotated tag without a tagger
EOF
tag series-A-blob
from :3
data <<EOF
An annotated tag that annotates a blob.
EOF
INPUT_END
git fast-import --export-marks=marks.out <input &&
git whatchanged master
'
test_expect_success 'A: verify pack' '
verify_packs
'
test_expect_success 'A: verify commit' '
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
initial
EOF
git cat-file commit master | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify tree' '
cat >expect <<-EOF &&
100644 blob file2
100644 blob file3
100755 blob file4
EOF
git cat-file -p master^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify file2' '
echo "$file2_data" >expect &&
git cat-file blob master:file2 >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify file3' '
echo "$file3_data" >expect &&
git cat-file blob master:file3 >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify file4' '
printf "$file4_data" >expect &&
git cat-file blob master:file4 >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify tag/series-A' '
cat >expect <<-EOF &&
object $(git rev-parse refs/heads/master)
type commit
tag series-A
An annotated tag without a tagger
EOF
git cat-file tag tags/series-A >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify tag/series-A-blob' '
cat >expect <<-EOF &&
object $(git rev-parse refs/heads/master:file3)
type blob
tag series-A-blob
An annotated tag that annotates a blob.
EOF
git cat-file tag tags/series-A-blob >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify marks output' '
cat >expect <<-EOF &&
:2 `git rev-parse --verify master:file2`
:3 `git rev-parse --verify master:file3`
:4 `git rev-parse --verify master:file4`
:5 `git rev-parse --verify master^0`
EOF
test_cmp expect marks.out
'
test_expect_success 'A: verify marks import' '
git fast-import \
--import-marks=marks.out \
--export-marks=marks.new \
</dev/null &&
test_cmp expect marks.new
'
test_expect_success 'A: tag blob by sha1' '
test_tick &&
new_blob=$(echo testing | git hash-object --stdin) &&
cat >input <<-INPUT_END &&
tag series-A-blob-2
from $(git rev-parse refs/heads/master:file3)
data <<EOF
Tag blob by sha1.
EOF
blob
mark :6
data <<EOF
testing
EOF
commit refs/heads/new_blob
committer <> 0 +0000
data 0
M 644 :6 new_blob
#pretend we got sha1 from fast-import
ls "new_blob"
tag series-A-blob-3
from $new_blob
data <<EOF
Tag new_blob.
EOF
INPUT_END
cat >expect <<-EOF &&
object $(git rev-parse refs/heads/master:file3)
type blob
tag series-A-blob-2
Tag blob by sha1.
object $new_blob
type blob
tag series-A-blob-3
Tag new_blob.
EOF
git fast-import <input &&
git cat-file tag tags/series-A-blob-2 >actual &&
git cat-file tag tags/series-A-blob-3 >>actual &&
test_cmp expect actual
'
test_expect_success 'A: verify marks import does not crash' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/verify--import-marks
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
recreate from :5
COMMIT
from :5
M 755 :2 copy-of-file2
INPUT_END
git fast-import --import-marks=marks.out <input &&
git whatchanged verify--import-marks
'
test_expect_success 'A: verify pack' '
verify_packs
'
test_expect_success 'A: verify diff' '
cat >expect <<-EOF &&
:000000 100755 0000000000000000000000000000000000000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 A copy-of-file2
EOF
git diff-tree -M -r master verify--import-marks >actual &&
compare_diff_raw expect actual &&
test `git rev-parse --verify master:file2` \
= `git rev-parse --verify verify--import-marks:copy-of-file2`
'
test_expect_success 'A: export marks with large values' '
test_tick &&
mt=$(git hash-object --stdin < /dev/null) &&
>input.blob &&
>marks.exp &&
>tree.exp &&
cat >input.commit <<-EOF &&
commit refs/heads/verify--dump-marks
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
test the sparse array dumping routines with exponentially growing marks
COMMIT
EOF
i=0 l=4 m=6 n=7 &&
while test "$i" -lt 27
do
cat >>input.blob <<-EOF &&
blob
mark :$l
data 0
blob
mark :$m
data 0
blob
mark :$n
data 0
EOF
echo "M 100644 :$l l$i" >>input.commit &&
echo "M 100644 :$m m$i" >>input.commit &&
echo "M 100644 :$n n$i" >>input.commit &&
echo ":$l $mt" >>marks.exp &&
echo ":$m $mt" >>marks.exp &&
echo ":$n $mt" >>marks.exp &&
printf "100644 blob $mt\tl$i\n" >>tree.exp &&
printf "100644 blob $mt\tm$i\n" >>tree.exp &&
printf "100644 blob $mt\tn$i\n" >>tree.exp &&
l=$(($l + $l)) &&
m=$(($m + $m)) &&
n=$(($l + $n)) &&
i=$((1 + $i)) || return 1
done &&
sort tree.exp > tree.exp_s &&
cat input.blob input.commit | git fast-import --export-marks=marks.large &&
git ls-tree refs/heads/verify--dump-marks >tree.out &&
test_cmp tree.exp_s tree.out &&
test_cmp marks.exp marks.large
'
###
### series B
###
test_expect_success 'B: fail on invalid blob sha1' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
mark :1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/master
M 755 0000000000000000000000000000000000000001 zero1
INPUT_END
test_when_finished "rm -f .git/objects/pack_* .git/objects/index_*" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: accept branch name "TEMP_TAG"' '
cat >input <<-INPUT_END &&
commit TEMP_TAG
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
tag base
COMMIT
from refs/heads/master
INPUT_END
test_when_finished "rm -f .git/TEMP_TAG
git gc
git prune" &&
git fast-import <input &&
test -f .git/TEMP_TAG &&
test `git rev-parse master` = `git rev-parse TEMP_TAG^`
'
test_expect_success 'B: accept empty committer' '
cat >input <<-INPUT_END &&
commit refs/heads/empty-committer-1
committer <> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/empty-committer-1
git gc
git prune" &&
git fast-import <input &&
out=$(git fsck) &&
echo "$out" &&
test -z "$out"
'
test_expect_success 'B: accept and fixup committer with no name' '
cat >input <<-INPUT_END &&
commit refs/heads/empty-committer-2
committer <[email protected]> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/empty-committer-2
git gc
git prune" &&
git fast-import <input &&
out=$(git fsck) &&
echo "$out" &&
test -z "$out"
'
test_expect_success 'B: fail on invalid committer (1)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name email> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (2)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name <e<mail> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (3)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name <email>> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (4)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name <email $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (5)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name<email> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
###
### series C
###
test_expect_success 'C: incremental import create pack from stdin' '
newf=`echo hi newf | git hash-object -w --stdin` &&
oldf=`git rev-parse --verify master:file2` &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
second
COMMIT
from refs/heads/master
M 644 $oldf file2/oldf
M 755 $newf file2/newf
D file3
INPUT_END
git fast-import <input &&
git whatchanged branch
'
test_expect_success 'C: verify pack' '
verify_packs
'
test_expect_success 'C: validate reuse existing blob' '
test $newf = `git rev-parse --verify branch:file2/newf` &&
test $oldf = `git rev-parse --verify branch:file2/oldf`
'
test_expect_success 'C: verify commit' '
cat >expect <<-EOF &&
parent `git rev-parse --verify master^0`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
second
EOF
git cat-file commit branch | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'C: validate rename result' '
cat >expect <<-EOF &&
:000000 100755 0000000000000000000000000000000000000000 f1fb5da718392694d0076d677d6d0e364c79b0bc A file2/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 R100 file2 file2/oldf
:100644 000000 0d92e9f3374ae2947c23aa477cbc68ce598135f1 0000000000000000000000000000000000000000 D file3
EOF
git diff-tree -M -r master branch >actual &&
compare_diff_raw expect actual
'
###
### series D
###
test_expect_success 'D: inline data in commit' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third
COMMIT
from refs/heads/branch^0
M 644 inline newdir/interesting
data <<EOF
$file5_data
EOF
M 755 inline newdir/exec.sh
data <<EOF
$file6_data
EOF
INPUT_END
git fast-import <input &&
git whatchanged branch
'
test_expect_success 'D: verify pack' '
verify_packs
'
test_expect_success 'D: validate new files added' '
cat >expect <<-EOF &&
:000000 100755 0000000000000000000000000000000000000000 e74b7d465e52746be2b4bae983670711e6e66657 A newdir/exec.sh
:000000 100644 0000000000000000000000000000000000000000 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 A newdir/interesting
EOF
git diff-tree -M -r branch^ branch >actual &&
compare_diff_raw expect actual
'
test_expect_success 'D: verify file5' '
echo "$file5_data" >expect &&
git cat-file blob branch:newdir/interesting >actual &&
test_cmp expect actual
'
test_expect_success 'D: verify file6' '
echo "$file6_data" >expect &&
git cat-file blob branch:newdir/exec.sh >actual &&
test_cmp expect actual
'
###
### series E
###
test_expect_success 'E: rfc2822 date, --date-format=raw' '
cat >input <<-INPUT_END &&
commit refs/heads/branch
author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> Tue Feb 6 11:22:18 2007 -0500
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> Tue Feb 6 12:35:02 2007 -0500
data <<COMMIT
RFC 2822 type date
COMMIT
from refs/heads/branch^0
INPUT_END
test_must_fail git fast-import --date-format=raw <input
'
test_expect_success 'E: rfc2822 date, --date-format=rfc2822' '
git fast-import --date-format=rfc2822 <input
'
test_expect_success 'E: verify pack' '
verify_packs
'
test_expect_success 'E: verify commit' '
cat >expect <<-EOF &&
author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> 1170778938 -0500
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1170783302 -0500
RFC 2822 type date
EOF
git cat-file commit branch | sed 1,2d >actual &&
test_cmp expect actual
'
###
### series F
###
test_expect_success 'F: non-fast-forward update skips' '
old_branch=`git rev-parse --verify branch^0` &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
losing things already?
COMMIT
from refs/heads/branch~1
reset refs/heads/other
from refs/heads/branch
INPUT_END
test_must_fail git fast-import <input &&
# branch must remain unaffected
test $old_branch = `git rev-parse --verify branch^0`
'
test_expect_success 'F: verify pack' '
verify_packs
'
test_expect_success 'F: verify other commit' '
cat >expect <<-EOF &&
tree `git rev-parse branch~1^{tree}`
parent `git rev-parse branch~1`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
losing things already?
EOF
git cat-file commit other >actual &&
test_cmp expect actual
'
###
### series G
###
test_expect_success 'G: non-fast-forward update forced' '
old_branch=`git rev-parse --verify branch^0` &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
losing things already?
COMMIT
from refs/heads/branch~1
INPUT_END
git fast-import --force <input
'
test_expect_success 'G: verify pack' '
verify_packs
'
test_expect_success 'G: branch changed, but logged' '
test $old_branch != `git rev-parse --verify branch^0` &&
test $old_branch = `git rev-parse --verify branch@{1}`
'
###
### series H
###
test_expect_success 'H: deletall, add 1' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/H
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third
COMMIT
from refs/heads/branch^0
M 644 inline i-will-die
data <<EOF
this file will never exist.
EOF
deleteall
M 644 inline h/e/l/lo
data <<EOF
$file5_data
EOF
INPUT_END
git fast-import <input &&
git whatchanged H
'
test_expect_success 'H: verify pack' '
verify_packs
'
test_expect_success 'H: validate old files removed, new files added' '
cat >expect <<-EOF &&
:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D file2/newf
:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D file2/oldf
:100755 000000 85df50785d62d3b05ab03d9cbf7e4a0b49449730 0000000000000000000000000000000000000000 D file4
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 R100 newdir/interesting h/e/l/lo
:100755 000000 e74b7d465e52746be2b4bae983670711e6e66657 0000000000000000000000000000000000000000 D newdir/exec.sh
EOF
git diff-tree -M -r H^ H >actual &&
compare_diff_raw expect actual
'
test_expect_success 'H: verify file' '
echo "$file5_data" >expect &&
git cat-file blob H:h/e/l/lo >actual &&
test_cmp expect actual
'
###
### series I
###
test_expect_success 'I: export-pack-edges' '
cat >input <<-INPUT_END &&
commit refs/heads/export-boundary
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
we have a border. its only 40 characters wide.
COMMIT
from refs/heads/branch
INPUT_END
git fast-import --export-pack-edges=edges.list <input
'
test_expect_success 'I: verify edge list' '
cat >expect <<-EOF &&
.git/objects/pack/pack-.pack: `git rev-parse --verify export-boundary`
EOF
sed -e s/pack-.*pack/pack-.pack/ edges.list >actual &&
test_cmp expect actual
'
###
### series J
###
test_expect_success 'J: reset existing branch creates empty commit' '
cat >input <<-INPUT_END &&
commit refs/heads/J
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create J
COMMIT
from refs/heads/branch
reset refs/heads/J
commit refs/heads/J
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
initialize J
COMMIT
INPUT_END
git fast-import <input
'
test_expect_success 'J: branch has 1 commit, empty tree' '
test 1 = `git rev-list J | wc -l` &&
test 0 = `git ls-tree J | wc -l`
'
test_expect_success 'J: tag must fail on empty branch' '
cat >input <<-INPUT_END &&
reset refs/heads/J2
tag wrong_tag
from refs/heads/J2
data <<EOF
Tag branch that was reset.
EOF
INPUT_END
test_must_fail git fast-import <input
'
###
### series K
###
test_expect_success 'K: reinit branch with from' '
cat >input <<-INPUT_END &&
commit refs/heads/K
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create K
COMMIT
from refs/heads/branch
commit refs/heads/K
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
redo K
COMMIT
from refs/heads/branch^1
INPUT_END
git fast-import <input
'
test_expect_success 'K: verify K^1 = branch^1' '
test `git rev-parse --verify branch^1` \
= `git rev-parse --verify K^1`
'
###
### series L
###
test_expect_success 'L: verify internal tree sorting' '
cat >input <<-INPUT_END &&
blob
mark :1
data <<EOF
some data
EOF
blob
mark :2
data <<EOF
other data
EOF
commit refs/heads/L
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create L
COMMIT
M 644 :1 b.
M 644 :1 b/other
M 644 :1 ba
commit refs/heads/L
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
update L
COMMIT
M 644 :2 b.
M 644 :2 b/other
M 644 :2 ba
INPUT_END
cat >expect <<-EXPECT_END &&
:100644 100644 4268632... 55d3a52... M b.
:040000 040000 0ae5cac... 443c768... M b
:100644 100644 4268632... 55d3a52... M ba
EXPECT_END
git fast-import <input &&
git diff-tree --abbrev --raw L^ L >output &&
test_cmp expect output
'
test_expect_success 'L: nested tree copy does not corrupt deltas' '
cat >input <<-INPUT_END &&
blob
mark :1
data <<EOF
the data
EOF
commit refs/heads/L2
committer C O Mitter <[email protected]> 1112912473 -0700
data <<COMMIT
init L2
COMMIT
M 644 :1 a/b/c
M 644 :1 a/b/d
M 644 :1 a/e/f
commit refs/heads/L2
committer C O Mitter <[email protected]> 1112912473 -0700
data <<COMMIT
update L2
COMMIT
C a g
C a/e g/b
M 644 :1 g/b/h
INPUT_END
cat >expect <<-\EOF &&
g/b/f
g/b/h
EOF
test_when_finished "git update-ref -d refs/heads/L2" &&
git fast-import <input &&
git ls-tree L2 g/b/ >tmp &&
cat tmp | cut -f 2 >actual &&
test_cmp expect actual &&
git fsck `git rev-parse L2`
'
###
### series M
###
test_expect_success 'M: rename file in same subdirectory' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/M1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/branch^0
R file2/newf file2/n.e.w.f
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 file2/newf file2/n.e.w.f
EOF
git fast-import <input &&
git diff-tree -M -r M1^ M1 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'M: rename file to new subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/M2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/branch^0
R file2/newf i/am/new/to/you
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 file2/newf i/am/new/to/you
EOF
git fast-import <input &&
git diff-tree -M -r M2^ M2 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'M: rename subdirectory to new subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/M3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/M2^0
R i other/sub
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 i/am/new/to/you other/sub/am/new/to/you
EOF
git fast-import <input &&
git diff-tree -M -r M3^ M3 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'M: rename root to subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/M4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
rename root
COMMIT
from refs/heads/M2^0
R "" sub
INPUT_END
cat >expect <<-EOF &&
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 R100 file2/oldf sub/file2/oldf
:100755 100755 85df50785d62d3b05ab03d9cbf7e4a0b49449730 85df50785d62d3b05ab03d9cbf7e4a0b49449730 R100 file4 sub/file4
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 i/am/new/to/you sub/i/am/new/to/you
:100755 100755 e74b7d465e52746be2b4bae983670711e6e66657 e74b7d465e52746be2b4bae983670711e6e66657 R100 newdir/exec.sh sub/newdir/exec.sh
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 R100 newdir/interesting sub/newdir/interesting
EOF
git fast-import <input &&
git diff-tree -M -r M4^ M4 >actual &&
cat actual &&
compare_diff_raw expect actual
'
###
### series N
###
test_expect_success 'N: copy file in same subdirectory' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/N1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file copy
COMMIT
from refs/heads/branch^0
C file2/newf file2/n.e.w.f
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file2/n.e.w.f
EOF
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N1^ N1 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: copy then modify subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/N2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
clean directory copy
COMMIT
from refs/heads/branch^0
C file2 file3
commit refs/heads/N2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
modify directory copy
COMMIT
M 644 inline file3/file5
data <<EOF
$file5_data
EOF
INPUT_END
cat >expect <<-EOF &&
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting file3/file5
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N2^^ N2 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: copy dirty subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/N3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
dirty directory copy
COMMIT
from refs/heads/branch^0
M 644 inline file2/file5
data <<EOF
$file5_data
EOF
C file2 file3
D file2/file5
INPUT_END
git fast-import <input &&
test `git rev-parse N2^{tree}` = `git rev-parse N3^{tree}`
'
test_expect_success 'N: copy directory by id' '
cat >expect <<-\EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
cat >input <<-INPUT_END &&
commit refs/heads/N4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
compare_diff_raw expect actual
'
test_expect_success PIPE 'N: read and copy directory' '
cat >expect <<-\EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
git update-ref -d refs/heads/N4 &&
rm -f backflow &&
mkfifo backflow &&
(
exec <backflow &&
cat <<-EOF &&
commit refs/heads/N4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash, part 2
COMMIT
from refs/heads/branch^0
ls "file2"
EOF
read mode type tree filename &&
echo "M 040000 $tree file3"
) |
git fast-import --cat-blob-fd=3 3>backflow &&
git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
compare_diff_raw expect actual
'
test_expect_success PIPE 'N: empty directory reads as missing' '
cat <<-\EOF >expect &&
OBJNAME
:000000 100644 OBJNAME OBJNAME A unrelated
EOF
echo "missing src" >expect.response &&
git update-ref -d refs/heads/read-empty &&
rm -f backflow &&
mkfifo backflow &&
(
exec <backflow &&
cat <<-EOF &&
commit refs/heads/read-empty
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
read "empty" (missing) directory
COMMIT
M 100644 inline src/greeting
data <<BLOB
hello
BLOB
C src/greeting dst1/non-greeting
C src/greeting unrelated
# leave behind "empty" src directory
D src/greeting
ls "src"
EOF
read -r line &&
printf "%s\n" "$line" >response &&
cat <<-\EOF
D dst1
D dst2
EOF
) |
git fast-import --cat-blob-fd=3 3>backflow &&
test_cmp expect.response response &&
git rev-list read-empty |
git diff-tree -r --root --stdin |
sed "s/$_x40/OBJNAME/g" >actual &&
test_cmp expect actual
'
test_expect_success 'N: copy root directory by tree hash' '
cat >expect <<-\EOF &&
:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D file3/newf
:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D file3/oldf
EOF
root=$(git rev-parse refs/heads/branch^0^{tree}) &&
cat >input <<-INPUT_END &&
commit refs/heads/N6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy root directory by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $root ""
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N4 N6 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: copy root by path' '
cat >expect <<-\EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf oldroot/file2/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf oldroot/file2/oldf
:100755 100755 85df50785d62d3b05ab03d9cbf7e4a0b49449730 85df50785d62d3b05ab03d9cbf7e4a0b49449730 C100 file4 oldroot/file4
:100755 100755 e74b7d465e52746be2b4bae983670711e6e66657 e74b7d465e52746be2b4bae983670711e6e66657 C100 newdir/exec.sh oldroot/newdir/exec.sh
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting oldroot/newdir/interesting
EOF
cat >input <<-INPUT_END &&
commit refs/heads/N-copy-root-path
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy root directory by (empty) path
COMMIT
from refs/heads/branch^0
C "" oldroot
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r branch N-copy-root-path >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: delete directory by copying' '
cat >expect <<-\EOF &&
OBJID
:100644 000000 OBJID OBJID D foo/bar/qux
OBJID
:000000 100644 OBJID OBJID A foo/bar/baz
:000000 100644 OBJID OBJID A foo/bar/qux
EOF
empty_tree=$(git mktree </dev/null) &&
cat >input <<-INPUT_END &&
commit refs/heads/N-delete
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
collect data to be deleted
COMMIT
deleteall
M 100644 inline foo/bar/baz
data <<DATA_END
hello
DATA_END
C "foo/bar/baz" "foo/bar/qux"
C "foo/bar/baz" "foo/bar/quux/1"
C "foo/bar/baz" "foo/bar/quuux"
M 040000 $empty_tree foo/bar/quux
M 040000 $empty_tree foo/bar/quuux
commit refs/heads/N-delete
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
delete subdirectory
COMMIT
M 040000 $empty_tree foo/bar/qux
INPUT_END
git fast-import <input &&
git rev-list N-delete |
git diff-tree -r --stdin --root --always |
sed -e "s/$_x40/OBJID/g" >actual &&
test_cmp expect actual
'
test_expect_success 'N: modify copied tree' '
cat >expect <<-\EOF &&
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting file3/file5
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
cat >input <<-INPUT_END &&
commit refs/heads/N5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3
commit refs/heads/N5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
modify directory copy
COMMIT
M 644 inline file3/file5
data <<EOF
$file5_data
EOF
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N5^^ N5 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: reject foo/ syntax' '
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5B
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy with invalid syntax
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3/
INPUT_END
'
test_expect_success 'N: reject foo/ syntax in copy source' '
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5C
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy with invalid syntax
COMMIT
from refs/heads/branch^0
C file2/ file3
INPUT_END
'
test_expect_success 'N: reject foo/ syntax in rename source' '
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5D
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
rename with invalid syntax
COMMIT
from refs/heads/branch^0
R file2/ file3
INPUT_END
'
test_expect_success 'N: reject foo/ syntax in ls argument' '
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5E
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy with invalid syntax
COMMIT
from refs/heads/branch^0
ls "file2/"
INPUT_END
'
test_expect_success 'N: copy to root by id and modify' '
echo "hello, world" >expect.foo &&
echo hello >expect.bar &&
git fast-import <<-SETUP_END &&
commit refs/heads/N7
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
hello, tree
COMMIT
deleteall
M 644 inline foo/bar
data <<EOF
hello
EOF
SETUP_END
tree=$(git rev-parse --verify N7:) &&
git fast-import <<-INPUT_END &&
commit refs/heads/N8
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy to root by id and modify
COMMIT
M 040000 $tree ""
M 644 inline foo/foo
data <<EOF
hello, world
EOF
INPUT_END
git show N8:foo/foo >actual.foo &&
git show N8:foo/bar >actual.bar &&
test_cmp expect.foo actual.foo &&
test_cmp expect.bar actual.bar
'
test_expect_success 'N: extract subtree' '
branch=$(git rev-parse --verify refs/heads/branch^{tree}) &&
cat >input <<-INPUT_END &&
commit refs/heads/N9
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
extract subtree branch:newdir
COMMIT
M 040000 $branch ""
C "newdir" ""
INPUT_END
git fast-import <input &&
git diff --exit-code branch:newdir N9
'
test_expect_success 'N: modify subtree, extract it, and modify again' '
echo hello >expect.baz &&
echo hello, world >expect.qux &&
git fast-import <<-SETUP_END &&
commit refs/heads/N10
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
hello, tree
COMMIT
deleteall
M 644 inline foo/bar/baz
data <<EOF
hello
EOF
SETUP_END
tree=$(git rev-parse --verify N10:) &&
git fast-import <<-INPUT_END &&
commit refs/heads/N11
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy to root by id and modify
COMMIT
M 040000 $tree ""
M 100644 inline foo/bar/qux
data <<EOF
hello, world
EOF
R "foo" ""
C "bar/qux" "bar/quux"
INPUT_END
git show N11:bar/baz >actual.baz &&
git show N11:bar/qux >actual.qux &&
git show N11:bar/quux >actual.quux &&
test_cmp expect.baz actual.baz &&
test_cmp expect.qux actual.qux &&
test_cmp expect.qux actual.quux'
###
### series O
###
test_expect_success 'O: comments are all skipped' '
cat >input <<-INPUT_END &&
#we will
commit refs/heads/O1
# -- ignore all of this text
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
# $GIT_COMMITTER_NAME has inserted here for his benefit.
data <<COMMIT
dirty directory copy
COMMIT
# do not forget the import blank line!
#
# yes, we started from our usual base of branch^0.
# i like branch^0.
from refs/heads/branch^0
# and we need to reuse file2/file5 from N3 above.
M 644 inline file2/file5
# otherwise the tree will be different
data <<EOF
$file5_data
EOF
# do not forget to copy file2 to file3
C file2 file3
#
# or to delete file5 from file2.
D file2/file5
# are we done yet?
INPUT_END
git fast-import <input &&
test `git rev-parse N3` = `git rev-parse O1`
'
test_expect_success 'O: blank lines not necessary after data commands' '
cat >input <<-INPUT_END &&
commit refs/heads/O2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
dirty directory copy
COMMIT
from refs/heads/branch^0
M 644 inline file2/file5
data <<EOF
$file5_data
EOF
C file2 file3
D file2/file5
INPUT_END
git fast-import <input &&
test `git rev-parse N3` = `git rev-parse O2`
'
test_expect_success 'O: repack before next test' '
git repack -a -d
'
test_expect_success 'O: blank lines not necessary after other commands' '
cat >input <<-INPUT_END &&
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zstring
COMMIT
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zof
COMMIT
checkpoint
commit refs/heads/O3
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zempty
COMMIT
checkpoint
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zcommits
COMMIT
reset refs/tags/O3-2nd
from :5
reset refs/tags/O3-3rd
from :5
INPUT_END
cat >expect <<-INPUT_END &&
string
of
empty
commits
INPUT_END
git fast-import <input &&
test 8 = `find .git/objects/pack -type f | wc -l` &&
test `git rev-parse refs/tags/O3-2nd` = `git rev-parse O3^` &&
git log --reverse --pretty=oneline O3 | sed s/^.*z// >actual &&
test_cmp expect actual
'
test_expect_success 'O: progress outputs as requested by input' '
cat >input <<-INPUT_END &&
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zstring
COMMIT
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zof
COMMIT
progress Two commits down, 2 to go!
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zempty
COMMIT
progress Three commits down, 1 to go!
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zcommits
COMMIT
progress done!
INPUT_END
git fast-import <input >actual &&
grep "progress " <input >expect &&
test_cmp expect actual
'
###
### series P (gitlinks)
###
test_expect_success 'P: superproject & submodule mix' '
cat >input <<-INPUT_END &&
blob
mark :1
data 10
test file
reset refs/heads/sub
commit refs/heads/sub
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 12
sub_initial
M 100644 :1 file
blob
mark :3
data <<DATAEND
[submodule "sub"]
path = sub
url = "`pwd`/sub"
DATAEND
commit refs/heads/subuse1
mark :4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 8
initial
from refs/heads/master
M 100644 :3 .gitmodules
M 160000 :2 sub
blob
mark :5
data 20
test file
more data
commit refs/heads/sub
mark :6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 11
sub_second
from :2
M 100644 :5 file
commit refs/heads/subuse1
mark :7
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 7
second
from :4
M 160000 :6 sub
INPUT_END
git fast-import <input &&
git checkout subuse1 &&
rm -rf sub &&
mkdir sub &&
(
cd sub &&
git init &&
git fetch --update-head-ok .. refs/heads/sub:refs/heads/master &&
git checkout master
) &&
git submodule init &&
git submodule update
'
test_expect_success 'P: verbatim SHA gitlinks' '
SUBLAST=$(git rev-parse --verify sub) &&
SUBPREV=$(git rev-parse --verify sub^) &&
cat >input <<-INPUT_END &&
blob
mark :1
data <<DATAEND
[submodule "sub"]
path = sub
url = "`pwd`/sub"
DATAEND
commit refs/heads/subuse2
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 8
initial
from refs/heads/master
M 100644 :1 .gitmodules
M 160000 $SUBPREV sub
commit refs/heads/subuse2
mark :3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 7
second
from :2
M 160000 $SUBLAST sub
INPUT_END
git branch -D sub &&
git gc &&
git prune &&
git fast-import <input &&
test $(git rev-parse --verify subuse2) = $(git rev-parse --verify subuse1)
'
test_expect_success 'P: fail on inline gitlink' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/subuse3
mark :1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/subuse2
M 160000 inline sub
data <<DATA
$SUBPREV
DATA
INPUT_END
test_must_fail git fast-import <input
'
test_expect_success 'P: fail on blob mark in gitlink' '
test_tick &&
cat >input <<-INPUT_END &&
blob
mark :1
data <<DATA
$SUBPREV
DATA
commit refs/heads/subuse3
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/subuse2
M 160000 :1 sub
INPUT_END
test_must_fail git fast-import <input
'
###
### series Q (notes)
###
test_expect_success 'Q: commit notes' '
note1_data="The first note for the first commit" &&
note2_data="The first note for the second commit" &&
note3_data="The first note for the third commit" &&
note1b_data="The second note for the first commit" &&
note1c_data="The third note for the first commit" &&
note2b_data="The second note for the second commit" &&
test_tick &&
cat >input <<-INPUT_END &&
blob
mark :2
data <<EOF
$file2_data
EOF
commit refs/heads/notes-test
mark :3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
first (:3)
COMMIT
M 644 :2 file2
blob
mark :4
data $file4_len
$file4_data
commit refs/heads/notes-test
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
second (:5)
COMMIT
M 644 :4 file4
commit refs/heads/notes-test
mark :6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third (:6)
COMMIT
M 644 inline file5
data <<EOF
$file5_data
EOF
M 755 inline file6
data <<EOF
$file6_data
EOF
blob
mark :7
data <<EOF
$note1_data
EOF
blob
mark :8
data <<EOF
$note2_data
EOF
commit refs/notes/foobar
mark :9
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:9)
COMMIT
N :7 :3
N :8 :5
N inline :6
data <<EOF
$note3_data
EOF
commit refs/notes/foobar
mark :10
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:10)
COMMIT
N inline :3
data <<EOF
$note1b_data
EOF
commit refs/notes/foobar2
mark :11
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:11)
COMMIT
N inline :3
data <<EOF
$note1c_data
EOF
commit refs/notes/foobar
mark :12
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:12)
COMMIT
deleteall
N inline :5
data <<EOF
$note2b_data
EOF
INPUT_END
git fast-import <input &&
git whatchanged notes-test
'
test_expect_success 'Q: verify pack' '
verify_packs
'
test_expect_success 'Q: verify first commit' '
commit1=$(git rev-parse notes-test~2) &&
commit2=$(git rev-parse notes-test^) &&
commit3=$(git rev-parse notes-test) &&
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
first (:3)
EOF
git cat-file commit notes-test~2 | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second commit' '
cat >expect <<-EOF &&
parent $commit1
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
second (:5)
EOF
git cat-file commit notes-test^ | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third commit' '
cat >expect <<-EOF &&
parent $commit2
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
third (:6)
EOF
git cat-file commit notes-test | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first notes commit' '
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:9)
EOF
git cat-file commit refs/notes/foobar~2 | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit1
100644 blob $commit2
100644 blob $commit3
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar~2^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for first commit' '
echo "$note1_data" >expect &&
git cat-file blob refs/notes/foobar~2:$commit1 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for second commit' '
echo "$note2_data" >expect &&
git cat-file blob refs/notes/foobar~2:$commit2 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for third commit' '
echo "$note3_data" >expect &&
git cat-file blob refs/notes/foobar~2:$commit3 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second notes commit' '
cat >expect <<-EOF &&
parent `git rev-parse --verify refs/notes/foobar~2`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:10)
EOF
git cat-file commit refs/notes/foobar^ | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit1
100644 blob $commit2
100644 blob $commit3
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar^^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second note for first commit' '
echo "$note1b_data" >expect &&
git cat-file blob refs/notes/foobar^:$commit1 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for second commit' '
echo "$note2_data" >expect &&
git cat-file blob refs/notes/foobar^:$commit2 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for third commit' '
echo "$note3_data" >expect &&
git cat-file blob refs/notes/foobar^:$commit3 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third notes commit' '
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:11)
EOF
git cat-file commit refs/notes/foobar2 | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit1
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar2^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third note for first commit' '
echo "$note1c_data" >expect &&
git cat-file blob refs/notes/foobar2:$commit1 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify fourth notes commit' '
cat >expect <<-EOF &&
parent `git rev-parse --verify refs/notes/foobar^`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:12)
EOF
git cat-file commit refs/notes/foobar | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify fourth notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit2
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second note for second commit' '
echo "$note2b_data" >expect &&
git cat-file blob refs/notes/foobar:$commit2 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: deny note on empty branch' '
cat >input <<-EOF &&
reset refs/heads/Q0
commit refs/heads/note-Q0
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Note for an empty branch.
COMMIT
N inline refs/heads/Q0
data <<NOTE
some note
NOTE
EOF
test_must_fail git fast-import <input
'
###
### series R (feature and option)
###
test_expect_success 'R: abort on unsupported feature' '
cat >input <<-EOF &&
feature no-such-feature-exists
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: supported feature is accepted' '
cat >input <<-EOF &&
feature date-format=now
EOF
git fast-import <input
'
test_expect_success 'R: abort on receiving feature after data command' '
cat >input <<-EOF &&
blob
data 3
hi
feature date-format=now
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: only one import-marks feature allowed per stream' '
>git.marks &&
>git2.marks &&
cat >input <<-EOF &&
feature import-marks=git.marks
feature import-marks=git2.marks
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: export-marks feature forbidden by default' '
echo "feature export-marks=git.marks" >input &&
test_must_fail git fast-import <input
'
test_expect_success 'R: export-marks feature results in a marks file being created' '
cat >input <<-EOF &&
feature export-marks=git.marks
blob
mark :1
data 3
hi
EOF
git fast-import --allow-unsafe-features <input &&
grep :1 git.marks
'
test_expect_success 'R: export-marks options can be overridden by commandline options' '
cat >input <<-\EOF &&
feature export-marks=feature-sub/git.marks
blob
mark :1
data 3
hi
EOF
git fast-import --allow-unsafe-features \
--export-marks=cmdline-sub/other.marks <input &&
grep :1 cmdline-sub/other.marks &&
test_path_is_missing feature-sub
'
test_expect_success 'R: catch typo in marks file name' '
test_must_fail git fast-import --import-marks=nonexistent.marks </dev/null &&
echo "feature import-marks=nonexistent.marks" |
test_must_fail git fast-import --allow-unsafe-features
'
test_expect_success 'R: import and output marks can be the same file' '
rm -f io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
cat >expect <<-EOF &&
:1 $blob
:2 $blob
EOF
git fast-import --export-marks=io.marks <<-\EOF &&
blob
mark :1
data 3
hi
EOF
git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF &&
blob
mark :2
data 3
hi
EOF
test_cmp expect io.marks
'
test_expect_success 'R: --import-marks=foo --output-marks=foo to create foo fails' '
rm -f io.marks &&
test_must_fail git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF
blob
mark :1
data 3
hi
EOF
'
test_expect_success 'R: --import-marks-if-exists' '
rm -f io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
echo ":1 $blob" >expect &&
git fast-import --import-marks-if-exists=io.marks --export-marks=io.marks <<-\EOF &&
blob
mark :1
data 3
hi
EOF
test_cmp expect io.marks
'
test_expect_success 'R: feature import-marks-if-exists' '
rm -f io.marks &&
>expect &&
git fast-import --export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=not_io.marks
EOF
test_cmp expect io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
echo ":1 $blob" >io.marks &&
echo ":1 $blob" >expect &&
echo ":2 $blob" >>expect &&
git fast-import --export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=io.marks
blob
mark :2
data 3
hi
EOF
test_cmp expect io.marks &&
echo ":3 $blob" >>expect &&
git fast-import --import-marks=io.marks \
--export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=not_io.marks
blob
mark :3
data 3
hi
EOF
test_cmp expect io.marks &&
>expect &&
git fast-import --import-marks-if-exists=not_io.marks \
--export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=io.marks
EOF
test_cmp expect io.marks
'
test_expect_success 'R: import to output marks works without any content' '
cat >input <<-EOF &&
feature import-marks=marks.out
feature export-marks=marks.new
EOF
git fast-import --allow-unsafe-features <input &&
test_cmp marks.out marks.new
'
test_expect_success 'R: import marks prefers commandline marks file over the stream' '
cat >input <<-EOF &&
feature import-marks=nonexistent.marks
feature export-marks=marks.new
EOF
git fast-import --import-marks=marks.out --allow-unsafe-features <input &&
test_cmp marks.out marks.new
'
test_expect_success 'R: multiple --import-marks= should be honoured' '
cat >input <<-EOF &&
feature import-marks=nonexistent.marks
feature export-marks=combined.marks
EOF
head -n2 marks.out > one.marks &&
tail -n +3 marks.out > two.marks &&
git fast-import --import-marks=one.marks --import-marks=two.marks \
--allow-unsafe-features <input &&
test_cmp marks.out combined.marks
'
test_expect_success 'R: feature relative-marks should be honoured' '
cat >input <<-EOF &&
feature relative-marks
feature import-marks=relative.in
feature export-marks=relative.out
EOF
mkdir -p .git/info/fast-import/ &&
cp marks.new .git/info/fast-import/relative.in &&
git fast-import --allow-unsafe-features <input &&
test_cmp marks.new .git/info/fast-import/relative.out
'
test_expect_success 'R: feature no-relative-marks should be honoured' '
cat >input <<-EOF &&
feature relative-marks
feature import-marks=relative.in
feature no-relative-marks
feature export-marks=non-relative.out
EOF
git fast-import --allow-unsafe-features <input &&
test_cmp marks.new non-relative.out
'
test_expect_success 'R: feature ls supported' '
echo "feature ls" |
git fast-import
'
test_expect_success 'R: feature cat-blob supported' '
echo "feature cat-blob" |
git fast-import
'
test_expect_success 'R: cat-blob-fd must be a nonnegative integer' '
test_must_fail git fast-import --cat-blob-fd=-1 </dev/null
'
test_expect_success !MINGW 'R: print old blob' '
blob=$(echo "yes it can" | git hash-object -w --stdin) &&
cat >expect <<-EOF &&
${blob} blob 11
yes it can
EOF
echo "cat-blob $blob" |
git fast-import --cat-blob-fd=6 6>actual &&
test_cmp expect actual
'
test_expect_success !MINGW 'R: in-stream cat-blob-fd not respected' '
echo hello >greeting &&
blob=$(git hash-object -w greeting) &&
cat >expect <<-EOF &&
${blob} blob 6
hello
EOF
git fast-import --cat-blob-fd=3 3>actual.3 >actual.1 <<-EOF &&
cat-blob $blob
EOF
test_cmp expect actual.3 &&
test_must_be_empty actual.1 &&
git fast-import 3>actual.3 >actual.1 <<-EOF &&
option cat-blob-fd=3
cat-blob $blob
EOF
test_must_be_empty actual.3 &&
test_cmp expect actual.1
'
test_expect_success !MINGW 'R: print mark for new blob' '
echo "effluentish" | git hash-object --stdin >expect &&
git fast-import --cat-blob-fd=6 6>actual <<-\EOF &&
blob
mark :1
data <<BLOB_END
effluentish
BLOB_END
get-mark :1
EOF
test_cmp expect actual
'
test_expect_success !MINGW 'R: print new blob' '
blob=$(echo "yep yep yep" | git hash-object --stdin) &&
cat >expect <<-EOF &&
${blob} blob 12
yep yep yep
EOF
git fast-import --cat-blob-fd=6 6>actual <<-\EOF &&
blob
mark :1
data <<BLOB_END
yep yep yep
BLOB_END
cat-blob :1
EOF
test_cmp expect actual
'
test_expect_success !MINGW 'R: print new blob by sha1' '
blob=$(echo "a new blob named by sha1" | git hash-object --stdin) &&
cat >expect <<-EOF &&
${blob} blob 25
a new blob named by sha1
EOF
git fast-import --cat-blob-fd=6 6>actual <<-EOF &&
blob
data <<BLOB_END
a new blob named by sha1
BLOB_END
cat-blob $blob
EOF
test_cmp expect actual
'
test_expect_success 'setup: big file' '
(
echo "the quick brown fox jumps over the lazy dog" >big &&
for i in 1 2 3
do
cat big big big big >bigger &&
cat bigger bigger bigger bigger >big ||
exit
done
)
'
test_expect_success 'R: print two blobs to stdout' '
blob1=$(git hash-object big) &&
blob1_len=$(wc -c <big) &&
blob2=$(echo hello | git hash-object --stdin) &&
{
echo ${blob1} blob $blob1_len &&
cat big &&
cat <<-EOF
${blob2} blob 6
hello
EOF
} >expect &&
{
cat <<-\END_PART1 &&
blob
mark :1
data <<data_end
END_PART1
cat big &&
cat <<-\EOF
data_end
blob
mark :2
data <<data_end
hello
data_end
cat-blob :1
cat-blob :2
EOF
} |
git fast-import >actual &&
test_cmp expect actual
'
test_expect_success PIPE 'R: copy using cat-file' '
expect_id=$(git hash-object big) &&
expect_len=$(wc -c <big) &&
echo $expect_id blob $expect_len >expect.response &&
rm -f blobs &&
cat >frontend <<-\FRONTEND_END &&
#!/bin/sh
FRONTEND_END
mkfifo blobs &&
(
export GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL GIT_COMMITTER_DATE &&
cat <<-\EOF &&
feature cat-blob
blob
mark :1
data <<BLOB
EOF
cat big &&
cat <<-\EOF &&
BLOB
cat-blob :1
EOF
read blob_id type size <&3 &&
echo "$blob_id $type $size" >response &&
head_c $size >blob <&3 &&
read newline <&3 &&
cat <<-EOF &&
commit refs/heads/copied
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy big file as file3
COMMIT
M 644 inline file3
data <<BLOB
EOF
cat blob &&
echo BLOB
) 3<blobs |
git fast-import --cat-blob-fd=3 3>blobs &&
git show copied:file3 >actual &&
test_cmp expect.response response &&
test_cmp big actual
'
test_expect_success PIPE 'R: print blob mid-commit' '
rm -f blobs &&
echo "A blob from _before_ the commit." >expect &&
mkfifo blobs &&
(
exec 3<blobs &&
cat <<-EOF &&
feature cat-blob
blob
mark :1
data <<BLOB
A blob from _before_ the commit.
BLOB
commit refs/heads/temporary
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Empty commit
COMMIT
cat-blob :1
EOF
read blob_id type size <&3 &&
head_c $size >actual <&3 &&
read newline <&3 &&
echo
) |
git fast-import --cat-blob-fd=3 3>blobs &&
test_cmp expect actual
'
test_expect_success PIPE 'R: print staged blob within commit' '
rm -f blobs &&
echo "A blob from _within_ the commit." >expect &&
mkfifo blobs &&
(
exec 3<blobs &&
cat <<-EOF &&
feature cat-blob
commit refs/heads/within
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Empty commit
COMMIT
M 644 inline within
data <<BLOB
A blob from _within_ the commit.
BLOB
EOF
to_get=$(
echo "A blob from _within_ the commit." |
git hash-object --stdin
) &&
echo "cat-blob $to_get" &&
read blob_id type size <&3 &&
head_c $size >actual <&3 &&
read newline <&3 &&
echo deleteall
) |
git fast-import --cat-blob-fd=3 3>blobs &&
test_cmp expect actual
'
test_expect_success 'R: quiet option results in no stats being output' '
cat >input <<-EOF &&
option git quiet
blob
data 3
hi
EOF
git fast-import 2>output <input &&
test_must_be_empty output
'
test_expect_success 'R: feature done means terminating "done" is mandatory' '
echo feature done | test_must_fail git fast-import &&
test_must_fail git fast-import --done </dev/null
'
test_expect_success 'R: terminating "done" with trailing gibberish is ok' '
git fast-import <<-\EOF &&
feature done
done
trailing gibberish
EOF
git fast-import <<-\EOF
done
more trailing gibberish
EOF
'
test_expect_success 'R: terminating "done" within commit' '
cat >expect <<-\EOF &&
OBJID
:000000 100644 OBJID OBJID A hello.c
:000000 100644 OBJID OBJID A hello2.c
EOF
git fast-import <<-EOF &&
commit refs/heads/done-ends
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<EOT
Commit terminated by "done" command
EOT
M 100644 inline hello.c
data <<EOT
Hello, world.
EOT
C hello.c hello2.c
done
EOF
git rev-list done-ends |
git diff-tree -r --stdin --root --always |
sed -e "s/$_x40/OBJID/g" >actual &&
test_cmp expect actual
'
test_expect_success 'R: die on unknown option' '
cat >input <<-EOF &&
option git non-existing-option
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: unknown commandline options are rejected' '\
test_must_fail git fast-import --non-existing-option < /dev/null
'
test_expect_success 'R: die on invalid option argument' '
echo "option git active-branches=-5" |
test_must_fail git fast-import &&
echo "option git depth=" |
test_must_fail git fast-import &&
test_must_fail git fast-import --depth="5 elephants" </dev/null
'
test_expect_success 'R: ignore non-git options' '
cat >input <<-EOF &&
option non-existing-vcs non-existing-option
EOF
git fast-import <input
'
##
## R: very large blobs
##
test_expect_success 'R: blob bigger than threshold' '
blobsize=$((2*1024*1024 + 53)) &&
test-genrandom bar $blobsize >expect &&
cat >input <<-INPUT_END &&
commit refs/heads/big-file
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
R - big file
COMMIT
M 644 inline big1
data $blobsize
INPUT_END
cat expect >>input &&
cat >>input <<-INPUT_END &&
M 644 inline big2
data $blobsize
INPUT_END
cat expect >>input &&
echo >>input &&
test_create_repo R &&
git --git-dir=R/.git fast-import --big-file-threshold=1 <input
'
test_expect_success 'R: verify created pack' '
(
cd R &&
verify_packs -v > ../verify
)
'
test_expect_success 'R: verify written objects' '
git --git-dir=R/.git cat-file blob big-file:big1 >actual &&
test_cmp_bin expect actual &&
a=$(git --git-dir=R/.git rev-parse big-file:big1) &&
b=$(git --git-dir=R/.git rev-parse big-file:big2) &&
test $a = $b
'
test_expect_success 'R: blob appears only once' '
n=$(grep $a verify | wc -l) &&
test 1 = $n
'
###
### series S
###
#
# Make sure missing spaces and EOLs after mark references
# cause errors.
#
# Setup:
#
# 1--2--4
# \ /
# -3-
#
# commit marks: 301, 302, 303, 304
# blob marks: 403, 404, resp.
# note mark: 202
#
# The error message when a space is missing not at the
# end of the line is:
#
# Missing space after ..
#
# or when extra characters come after the mark at the end
# of the line:
#
# Garbage after ..
#
# or when the dataref is neither "inline " or a known SHA1,
#
# Invalid dataref ..
#
test_expect_success 'S: initialize for S tests' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/S
mark :301
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 1
COMMIT
M 100644 inline hello.c
data <<BLOB
blob 1
BLOB
commit refs/heads/S
mark :302
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 2
COMMIT
from :301
M 100644 inline hello.c
data <<BLOB
blob 2
BLOB
blob
mark :403
data <<BLOB
blob 3
BLOB
blob
mark :202
data <<BLOB
note 2
BLOB
INPUT_END
git fast-import --export-marks=marks <input
'
#
# filemodify, three datarefs
#
test_expect_success 'S: filemodify with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit N
COMMIT
M 100644 :403x hello.c
EOF
cat err &&
test_i18ngrep "space after mark" err
'
# inline is misspelled; fast-import thinks it is some unknown dataref
test_expect_success 'S: filemodify with garbage after inline must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit N
COMMIT
M 100644 inlineX hello.c
data <<BLOB
inline
BLOB
EOF
cat err &&
test_i18ngrep "nvalid dataref" err
'
test_expect_success 'S: filemodify with garbage after sha1 must fail' '
sha1=$(grep :403 marks | cut -d\ -f2) &&
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit N
COMMIT
M 100644 ${sha1}x hello.c
EOF
cat err &&
test_i18ngrep "space after SHA1" err
'
#
# notemodify, three ways to say dataref
#
test_expect_success 'S: notemodify with garabge after mark dataref must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note dataref markref
COMMIT
N :202x :302
EOF
cat err &&
test_i18ngrep "space after mark" err
'
test_expect_success 'S: notemodify with garbage after inline dataref must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note dataref inline
COMMIT
N inlineX :302
data <<BLOB
note blob
BLOB
EOF
cat err &&
test_i18ngrep "nvalid dataref" err
'
test_expect_success 'S: notemodify with garbage after sha1 dataref must fail' '
sha1=$(grep :202 marks | cut -d\ -f2) &&
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note dataref sha1
COMMIT
N ${sha1}x :302
EOF
cat err &&
test_i18ngrep "space after SHA1" err
'
#
# notemodify, mark in commit-ish
#
test_expect_success 'S: notemodify with garbage after mark commit-ish must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/Snotes
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note commit-ish
COMMIT
N :202 :302x
EOF
cat err &&
test_i18ngrep "after mark" err
'
#
# from
#
test_expect_success 'S: from with garbage after mark must fail' '
test_must_fail \
git fast-import --import-marks=marks --export-marks=marks <<-EOF 2>err &&
commit refs/heads/S2
mark :303
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 3
COMMIT
from :301x
M 100644 :403 hello.c
EOF
# go create the commit, need it for merge test
git fast-import --import-marks=marks --export-marks=marks <<-EOF &&
commit refs/heads/S2
mark :303
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 3
COMMIT
from :301
M 100644 :403 hello.c
EOF
# now evaluate the error
cat err &&
test_i18ngrep "after mark" err
'
#
# merge
#
test_expect_success 'S: merge with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
mark :304
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
merge 4
COMMIT
from :302
merge :303x
M 100644 :403 hello.c
EOF
cat err &&
test_i18ngrep "after mark" err
'
#
# tag, from markref
#
test_expect_success 'S: tag with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
tag refs/tags/Stag
from :302x
tagger $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<TAG
tag S
TAG
EOF
cat err &&
test_i18ngrep "after mark" err
'
#
# cat-blob markref
#
test_expect_success 'S: cat-blob with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
cat-blob :403x
EOF
cat err &&
test_i18ngrep "after mark" err
'
#
# ls markref
#
test_expect_success 'S: ls with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
ls :302x hello.c
EOF
cat err &&
test_i18ngrep "space after mark" err
'
test_expect_success 'S: ls with garbage after sha1 must fail' '
sha1=$(grep :302 marks | cut -d\ -f2) &&
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
ls ${sha1}x hello.c
EOF
cat err &&
test_i18ngrep "space after tree-ish" err
'
###
### series T (ls)
###
# Setup is carried over from series S.
test_expect_success 'T: ls root tree' '
sed -e "s/Z\$//" >expect <<-EOF &&
040000 tree $(git rev-parse S^{tree}) Z
EOF
sha1=$(git rev-parse --verify S) &&
git fast-import --import-marks=marks <<-EOF >actual &&
ls $sha1 ""
EOF
test_cmp expect actual
'
test_expect_success 'T: delete branch' '
git branch to-delete &&
git fast-import <<-EOF &&
reset refs/heads/to-delete
from 0000000000000000000000000000000000000000
EOF
test_must_fail git rev-parse --verify refs/heads/to-delete
'
test_expect_success 'T: empty reset doesnt delete branch' '
git branch not-to-delete &&
git fast-import <<-EOF &&
reset refs/heads/not-to-delete
EOF
git show-ref &&
git rev-parse --verify refs/heads/not-to-delete
'
###
### series U (filedelete)
###
test_expect_success 'U: initialize for U tests' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
test setup
COMMIT
M 100644 inline hello.c
data <<BLOB
blob 1
BLOB
M 100644 inline good/night.txt
data <<BLOB
sleep well
BLOB
M 100644 inline good/bye.txt
data <<BLOB
au revoir
BLOB
INPUT_END
git fast-import <input
'
test_expect_success 'U: filedelete file succeeds' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
delete good/night.txt
COMMIT
from refs/heads/U^0
D good/night.txt
INPUT_END
git fast-import <input
'
test_expect_success 'U: validate file delete result' '
cat >expect <<-EOF &&
:100644 000000 2907ebb4bf85d91bf0716bb3bd8a68ef48d6da76 0000000000000000000000000000000000000000 D good/night.txt
EOF
git diff-tree -M -r U^1 U >actual &&
compare_diff_raw expect actual
'
test_expect_success 'U: filedelete directory succeeds' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
delete good dir
COMMIT
from refs/heads/U^0
D good
INPUT_END
git fast-import <input
'
test_expect_success 'U: validate directory delete result' '
cat >expect <<-EOF &&
:100644 000000 69cb75792f55123d8389c156b0b41c2ff00ed507 0000000000000000000000000000000000000000 D good/bye.txt
EOF
git diff-tree -M -r U^1 U >actual &&
compare_diff_raw expect actual
'
test_expect_success 'U: filedelete root succeeds' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
must succeed
COMMIT
from refs/heads/U^0
D ""
INPUT_END
git fast-import <input
'
test_expect_success 'U: validate root delete result' '
cat >expect <<-EOF &&
:100644 000000 c18147dc648481eeb65dc5e66628429a64843327 0000000000000000000000000000000000000000 D hello.c
EOF
git diff-tree -M -r U^1 U >actual &&
compare_diff_raw expect actual
'
test_done
|
#!/bin/bash
python ../adoc_utils/adoc-link-check.py .
|
#!/usr/bin/env bash
set -e
# Tar up the helm chart
TMPDIR="tmp"
mkdir -p $TMPDIR
# ci can't do this. Refactor script to optionally do it sometimes?
# docker pull cfplatformeng/spacebears:latest
# docker save cfplatformeng/spacebears:latest -o ../example-chart/images/spacebears.latest.tgz
# docker save gcr.io/kubernetes-helm/tiller:v2.8.2 -o ../example-chart/images/tiller.2.8.2.tgz
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
tar -cvzf ./${TMPDIR}/helm_chart_src.tgz -C "${DIR}/../" example-chart
# Add it as a blob in the bosh release
bosh add-blob ./${TMPDIR}/helm_chart_src.tgz helm_chart_src.tgz
bosh create-release --name=example-chart --force
bosh upload-release --name=example-chart
|
#!/bin/sh
ng build
npx electron .
|
#!/usr/bin/env bats
@test "creates the initial index and checksums file" {
bin/org init
[ $? = 0 ]
}
|
#!/bin/bash
dieharder -d 0 -g 404 -S 1847783859
|
#!/bin/bash
# This script will build the project.
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo -e "Build Pull Request #$TRAVIS_PULL_REQUEST => Branch [$TRAVIS_BRANCH]"
./gradlew build
elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]; then
echo -e 'Build Branch with Snapshot => Branch ['$TRAVIS_BRANCH']'
./gradlew -Prelease.travisci=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" -Prelease.scope=patch build snapshot
elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then
echo -e 'Build Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']'
case "$TRAVIS_TAG" in
*-rc\.*)
./gradlew -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" candidate
;;
*)
./gradlew -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" final
;;
esac
else
echo -e 'WARN: Should not be here => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']'
./gradlew build
fi
|
#!/bin/bash
# Финальная сборка директории project: склеивание частей воедино.
source $1/$2/config
# COMPRESS_USAGE
# COMPRESS_NAMES
# COMPRESS_LINEBREAKS
# COMPRESS_USAGE
# TARGET_UBUNTU_DESKTOP
# TARGET_WEB
# CO_DEBUG
# CO_RELEASE
UTILS_PATH=$(dirname "$0")/utils
############################
# Здесь мы обходим дерево директорий children и производим изменения в файлах в этом дереве
# Формируем пути к папкам (переменные root, list и all) и к файлам (all_js, all_css и all_js_css).
root=$1/$2
declare -a list # список директорий на обработку. Не включая корень
declare -a all_js
declare -a all_css
stack=($root)
#echo "$(ls $root)"
if [ -f $root/gameplay.js ]
then
all_js=(${all_js[@]} $root/gameplay.js)
else
echo "Не найден файл $root/gameplay.js"
exit 1
fi
if [ -f $root/style.css ]
then
all_css=(${all_css[@]} $root/style.css)
else
echo "Не найден файл $root/style.css"
exit 1
fi
while [ ! ${#stack[@]} -eq 0 ]
do
element=${stack[@]:0:1}/children
stack=("${stack[@]:1}")
if [ -d $element ]
then
for i in $(ls -1 $element)
do
if [ -d $element/$i ]
then
if [ ! -f $element/$i/style.css ]
then
echo "Не найден файл $element/$i/style.css.js - директория $element/$i пропускается"
continue 1
fi
if [ -f $element/$i/gameplay.js ]
then
all_js=(${all_js[@]} $element/$i/gameplay.js)
else
echo "Не найден файл $element/$i/gameplay.js - директория $element/$i пропускается"
continue 1
#exit 1
fi
all_css=(${all_css[@]} $element/$i/style.css)
stack=(${stack[@]} $element/$i)
list=(${list[@]} $element/$i)
fi
done
fi
done
all=($root ${list[@]})
all_js_css=(${all_js[@]} ${all_css[@]})
############################
# Ожидается в директориях traliva и traliva_kit найти файл style.css и директорию res
declare -a usage_addon_keys
sed -e "s/#RES#/#RES#\/_traliva/g" $root/traliva/style.css > $root/style.css_tmp
mv $root/traliva/res $root/res/_traliva
rm -rf $root/traliva
for i in $all
do
#echo "** $i"
if [ ! -a $i/style.css_tmp ]; then touch $i/style.css_tmp; fi
if [ -d $i/traliva_kit ]
then
sed -e "s/#RES#/#RES#\/_traliva_kit/g" $i/traliva_kit/style.css >> $i/style.css_tmp
mv $i/traliva_kit/res $i/res/_traliva_kit
rm -rf $i/traliva_kit
fi
cat $i/style.css >> $i/style.css_tmp
#cp $i/gameplay.js $i/gameplay.js_tmp
done
#if [ "$CO_DEBUG" = true ]
#then
# usage_addon_keys=(${usage_addon_keys[@]} traliva_debug)
# #usage_addon_keys=(${usage_addon_keys[@]} traliva_aaa)
#fi
#if [ "$COMPRESS_USAGE" = true ]
#then
# #
# #$UTILS_PATH/sugar/usage.sh "\"${all_js_css[@]}\"" "\"${usage_addon_keys[@]}\""
#fi
## -- синтаксический сахар: перечисления
## ==
#if [ "$CO_RELEASE" = true ]
#then
# #if [ "$COMPRESS_NAMES" = true ]
# #then
# #fi
# if [ "$COMPRESS_LINEBREAKS" = true ]
# then
# for i in $all
# do
# cat $i/style.css_tmp | $UTILS_PATH/css.py > $i/style.css_tmp2 && mv $i/style.css_tmp2 $i/style.css_tmp
# cat $i/gameplay.js_tmp | $UTILS_PATH/js.py > $i/gameplay.js_tmp2 && mv $i/gameplay.js_tmp2 $i/gameplay.js_tmp
# done
# fi
#fi
for i in $all
do
mv $i/style.css_tmp $i/style.css
#mv $i/gameplay.js_tmp $i/gameplay.js
done
# ###############
flags=0
if [ "$COMPRESS_NAMES" = true ]; then flags=$(($flags|0x2)); echo "FLAG names: compress_names"; fi
if [ "$COMPRESS_LINEBREAKS" = true ]; then flags=$(($flags|0x4)); echo "FLAG linebreaks: compress_linebreaks"; fi
if [ "$CO_RELEASE" = true ]; then flags=$(($flags|0x8)); echo "FLAG release: release"; fi
if [ "$CO_DEBUG" = true ]
then
echo " #u#traliva_kit_debug## #u#debug##" >> ${all_js[0]}
fi
echo "flags: $flags"
$UTILS_PATH/js.py 1 $flags ${#all_js[@]} ${all_js[@]} ${#all_css[@]} ${all_css[@]} "$root"
|
#!/bin/bash
cd /home/appuser
git clone -b monolith https://github.com/express42/reddit.git
cd reddit
bundle install
puma -d
|
alias vi="vim"
alias bc="bc -l" # load mathlib with bc (scale to 20)
alias zhistless="export HISTFILE=/dev/null; echo 'wont save zsh command history'"
|
# Create volumes
docker volume create --name=adventure_bundle
docker volume create --name=adventure_pg
docker volume create --name=adventure_data
# Create base image
docker-compose build
# Start the containers
docker-compose up -d
# Create the database
docker-compose exec adventure bundle exec rake db:setup
|
#!/bin/sh
# set -e
set -x
rm -rf .build
rm -rf dist
mkdir .build
cp -r ./app ./.build/app
cp -r ./lib ./.build/lib
cp ./index.js ./index.ls ./package.json ./.build
cp -r ./node_modules ./.build/node_modules
rm -rf ./.build/node_modules/electron-packager
./node_modules/.bin/electron-packager ./.build 'Oulipo' --platform=darwin --arch=x64 --version=0.28.1 --out=dist
cp ./Info.plist ./dist/Oulipo.app/Info.plist
./node_modules/.bin/electron-packager ./.build 'Oulipo' --platform=win32 --arch=x64 --version=0.28.1 --out=dist
rm -rf .build
|
#!/bin/bash
remote=
for arg; do
[[ "$arg" =~ :.*/$ ]] && remote=$arg && continue
case "$arg" in
*) exit 1;;
esac
done
[ "$remote" ] || exit 1
self=$(readlink -e "$0") || exit 1
self=$(dirname "${self}") || exit 1
rsync --inplace --delete --out-format="%t %o %f ... %n" --filter=". ${self}/rs-filter" -Phac "$remote" "${self}/../"
|
#!/usr/bin/env bash
set -eu -o pipefail
# TODO: require "--force" in order to create directories or overwrite existing files
IMAGES_DIR="./static/images"
if [[ $# -ne 1 ]]; then
echo "$0 -- copies images exported from Miro to the proper location in this repo"
echo -e "\nUsage:"
echo " $0 (path-to-source-images)"
echo -e "\nSource for images and instructions:\n https://miro.com/app/board/o9J_lIfcKZY=/?moveToWidget=3074457358161105960&cot=14\n"
exit 1
fi
if [[ ! -d $IMAGES_DIR ]]; then
echo "$0: Error: unable to find the destination directory (i.e. \"$IMAGES_DIR)\")."
echo "hint: this script must be executed from the root of this project."
exit 2
fi
SRC_DIR="$1"
MIRO_BOARD_NAME="imgs-for-docs"
FILENAME_PREFIX="${MIRO_BOARD_NAME} - "
# use the null character as delimiter to avoid problems with spaces in filenames
find $SRC_DIR -name "${MIRO_BOARD_NAME}*" -print0 | while read -d $'\0' src_path
do
dest_path=$(basename $src_path)
dest_path=$(echo $dest_path | sed "s|^$FILENAME_PREFIX||")
dest_path=$(echo $dest_path | tr '_' '/')
dest_path="${IMAGES_DIR}/${dest_path}"
dest_dirs=$(dirname ${dest_path})
if [[ ! -d "${dest_dirs}" ]]; then
( set -x; mkdir -p "${dest_dirs}" )
fi
( set -x; mv "$src_path" "$dest_path" )
done
|
#!/usr/bin/env bash
cd $(cd $(dirname $0); pwd)
rm -f master* *.png 2>&1 > /dev/null
curl -L -s "https://github.com/arvida/emoji-cheat-sheet.com/archive/master.zip" > master.zip
unzip master.zip
rm master.zip
mv emoji-cheat-sheet.com-master/public/graphics/emojis/*.png .
rm -rf emoji-cheat-sheet.com-master
|
#!/bin/bash
# Set an option to exit immediately if any error appears
set -o errexit
# Main function that describes the behavior of the
# script.
# By making it a function we can place our methods
# below and have the main execution described in a
# concise way via function invocations.
main() {
setup_dependencies
update_docker_configuration
echo "#########################################################"
echo "SUCCESS: Done! Finished setting up Travis machine. "
echo "#########################################################"
}
# Prepare the dependencies that the machine need.
# Here I'm just updating the apt references and then
# installing both python and python-pip. This allows
# us to make use of `pip` to fetch the latest `docker-compose`
# later.
# We also upgrade `docker-ce` so that we can get the
# latest docker version which allows us to perform
# image squashing as well as multi-stage builds.
setup_dependencies() {
echo "#########################################################"
echo "INFO: Setting up dependencies."
echo "#########################################################"
sudo apt update -y
sudo apt install realpath python python-pip -y
sudo apt install --only-upgrade docker-ce -y
sudo pip install docker-compose || true
docker info
docker-compose --version
git config --global user.name "MISP-dockerized-bot"
git clone --recurse-submodules https://github.com/8ear/MISP-dockerized-documentation.git ~/misp-docs
}
# Tweak the daemon configuration so that we
# can make use of experimental features (like image
# squashing) as well as have a bigger amount of
# concurrent downloads and uploads.
update_docker_configuration() {
echo "#########################################################"
echo "INFO: Updating docker configuration"
echo "#########################################################"
echo '{
"experimental": true,
"storage-driver": "overlay2",
"max-concurrent-downloads": 50,
"max-concurrent-uploads": 50
}' | sudo tee /etc/docker/daemon.json
sudo service docker restart
}
main |
source /tmp/lib.sh
loglevel=$(sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" | grep loglevel | cut -d' ' -f2)
if [ "$loglevel" == 'INFO' -o "$loglevel" == 'VERBOSE' ]; then exit $PASS; fi
exit $FAIL
|
python setup.py clean
read -p "Enter commit message: " desc
export BRANCH=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p')
read -p "Did you update README.me to include version $BRANCH?"
git commit -m "$desc"
cmd="sed 's/VERSION/$BRANCH/g'"
cat entangle/__version_tmpl__.py | eval $cmd >entangle/__version__.py
git add entangle/__version__.py
git commit -m "Updated version"
git push origin $BRANCH
|
#! /bin/bash
cp ${test_dir}/logfile_access_log.1 logfile_changed.0
chmod u+w logfile_changed.0
run_test ${lnav_test} -n \
-c ":rebuild" \
-c ":shexec head -1 ${test_dir}/logfile_access_log.0 > logfile_changed.0" \
-c ":rebuild" \
logfile_changed.0
check_error_output "line buffer cache flush" <<EOF
EOF
check_output "line buffer cache flush is not working" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
EOF
run_test ./drive_line_buffer "${top_srcdir}/src/line_buffer.hh"
check_output "Line buffer output doesn't match input?" < \
"${top_srcdir}/src/line_buffer.hh"
run_test ./drive_line_buffer < ${top_srcdir}/src/line_buffer.hh
check_output "Line buffer output doesn't match input from pipe?" < \
"${top_srcdir}/src/line_buffer.hh"
cat > lb.dat <<EOF
1
2
3
4
5
EOF
LINE_OFF=`grep -b '4' lb.dat | cut -f 1 -d :`
run_test ./drive_line_buffer -o $LINE_OFF lb.dat
check_output "Seeking in the line buffer doesn't work?" <<EOF
4
5
EOF
cat "${top_srcdir}/src/"*.hh "${top_srcdir}/src/"*.cc > lb-2.dat
grep -b '$' lb-2.dat | cut -f 1 -d : > lb.index
line_count=`wc -l lb-2.dat`
run_test ./drive_line_buffer -i lb.index -n 10 lb-2.dat
check_output "Random reads don't match input?" <<EOF
All done
EOF
|
#!/usr/bin/env bash
#############################################################################################################
# This script walks the commit history of the repositories under ../test/repos. For each repository it will #
# attempt to extract all the files which give rise to a conflict in a 3-way merge. #
#############################################################################################################
# Useful for testing
function earlyexit {
# get rid of changes so the next checkout doesnt complain
git merge --abort
git reset -q --hard
git clean -fdxq
git checkout master
exit 1
}
trap earlyexit SIGINT SIGTERM
cd ../test/repos
for D in ./*; do
if [ -d "$D" ]; then
cd "$D"
echo "Mining $D"
# We store the current status so we can restore cleanly once we are done
old_branch=$(git symbolic-ref --short HEAD)
# We list all commits which are identified as merges (>1 parent)
for commit in `git rev-list --merges HEAD`
do
# find the parents of the merge commit
parents=$(git log -1 --format=%P $commit)
fst=${parents%% *}
rest=${parents#* }
# check out the first parent
git checkout -q $fst
# merge with the rest of them
git merge --no-commit $rest >/dev/null 2>&1
# check for conflicting files
if git ls-files --unmerged | grep -q '^'; then
echo "Found conflict in $D - $commit"
unmerged=$(git ls-files --unmerged | cut -d' ' -f2,3 | paste -d'\n' -s)
# We parse the list of git objects representing the conflicting files. This representation is fixed
# with a 40 character objId and an identifier which represents the role. We restrict to merges between 2 parents.
while read -r objID; do
obj=${objID:0:40}
role=${objID:41:1}
extension=${objID: -3}
if [ "$extension" == "clj" ]; then
# The role represents which version of the git object we are dealing with.
# 1 -> Is the common parent for both branches (The origin)
# 2 -> Version on branch A
# 3 -> Version on branch B
if [ "$role" -eq "1" ]; then
# N.B. obj appear ordered by role. Role 1 will always be the first.
targetDir="../../conflicts2/$D-$commit-$obj"
mkdir -p "$targetDir"
fname="O.clj"
fi
if [ "$role" -eq "2" ]; then
fname="A.clj"
fi
if [ "$role" -eq "3" ]; then
fname="B.clj"
fi
# Output the git object as a file according to our identification
git cat-file -p $obj > "$targetDir/$fname"
fi
done <<< "$unmerged"
# Abort the merge to restore initial state
git merge --abort
fi
# get rid of changes so the next checkout doesnt complain
git reset -q --hard
git clean -fdxq
done
git checkout -q $old_branch
cd ..
fi
done
|
#!/bin/bash
LOCATION=eastus
RESOURCE_GROUP_NAME=[YOUR-RESOURCE-GROUP-NAME]
DEPLOYMENT_NAME=Deployment-$(date +"%Y-%m-%d_%H%M%S")
az group create \
--location $LOCATION \
--name "$RESOURCE_GROUP_NAME"
az deployment group create \
--resource-group "$RESOURCE_GROUP_NAME" \
--name "$DEPLOYMENT_NAME" \
--template-file ".\main.bicep" \
--confirm-with-what-if
FUNCTION_NAME=$(az deployment group show --name "$DEPLOYMENT_NAME" --resource-group "$RESOURCE_GROUP_NAME" --query properties.outputs.functionName.value -o tsv)
cd ./src || exit
func azure functionapp publish "$FUNCTION_NAME" |
#!/bin/sh
#
# builder_defaults.sh
#
# part of pfSense (https://www.pfsense.org)
# Copyright (c) 2004-2016 Electric Sheep Fencing, LLC
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################
# Product builder configuration file #
# Please don't modify this file, you #
# can put your settings and options #
# in build.conf, which is sourced at the #
# beginning of this file #
###########################################
if [ -z "${BUILDER_ROOT}" ]; then
echo ">>> ERROR: BUILDER_ROOT must be defined by script that includes builder_defaults.sh"
exit 1
fi
if [ ! -d "${BUILDER_ROOT}" ]; then
echo ">>> ERROR: BUILDER_ROOT is invalid"
exit 1
fi
export BUILDER_TOOLS=${BUILDER_TOOLS:-"${BUILDER_ROOT}/tools"}
export BUILDER_SCRIPTS=${BUILDER_SCRIPTS:-"${BUILDER_ROOT}/build/scripts"}
if [ ! -d "${BUILDER_TOOLS}" ]; then
echo ">>> ERROR: BUILDER_TOOLS is invalid"
exit 1
fi
BUILD_CONF="${BUILDER_ROOT}/build.conf"
# Ensure file exists
if [ -f ${BUILD_CONF} ]; then
. ${BUILD_CONF}
fi
# Make sure pkg will not be interactive
export ASSUME_ALWAYS_YES=true
# Architecture, supported ARCH values are:
# Tier 1: i386, AMD64, and PC98
# Tier 2: ARM, PowerPC, ia64, Sparc64 and sun4v
# Tier 3: MIPS and S/390
# Tier 4: None at the moment
# Source: http://www.freebsd.org/doc/en/articles/committers-guide/archs.html
export TARGET=${TARGET:-"`uname -m`"}
export TARGET_ARCH=${TARGET_ARCH:-${TARGET}}
# Set TARGET_ARCH_CONF_DIR
if [ "$TARGET_ARCH" = "" ]; then
export TARGET_ARCH=`uname -p`
fi
# Directory to be used for writing temporary information
export SCRATCHDIR=${SCRATCHDIR:-"${BUILDER_ROOT}/tmp"}
if [ ! -d ${SCRATCHDIR} ]; then
mkdir -p ${SCRATCHDIR}
fi
# Product details
export PRODUCT_NAME=${PRODUCT_NAME:-"nonSense"}
export PRODUCT_NAME_SUFFIX=${PRODUCT_NAME_SUFFIX:-"-CE"}
export PRODUCT_URL=${PRODUCT_URL:-""}
export PRODUCT_SRC=${PRODUCT_SRC:-"${BUILDER_ROOT}/src"}
export PRODUCT_EMAIL=${PRODUCT_EMAIL:-"[email protected]"}
export XML_ROOTOBJ=${XML_ROOTOBJ:-$(echo "${PRODUCT_NAME}" | tr '[[:upper:]]' '[[:lower:]]')}
if [ "${PRODUCT_NAME}" = "pfSense" -a "${BUILD_AUTHORIZED_BY_ELECTRIC_SHEEP_FENCING}" != "yes" ]; then
echo ">>>ERROR: According the following license, only Electric Sheep Fencing can build genuine pfSense® software"
echo ""
cat ${BUILDER_ROOT}/LICENSE
exit 1
fi
if [ -z "${PRODUCT_VERSION}" ]; then
if [ ! -f ${PRODUCT_SRC}/etc/version ]; then
echo ">>> ERROR: PRODUCT_VERSION is not defined and ${PRODUCT_SRC}/etc/version was not found"
print_error_pfS
fi
export PRODUCT_VERSION=$(head -n 1 ${PRODUCT_SRC}/etc/version)
fi
export PRODUCT_REVISION=${PRODUCT_REVISION:-""}
# Product repository tag to build
_cur_git_repo_branch_or_tag=$(git -C ${BUILDER_ROOT} rev-parse --abbrev-ref HEAD)
if [ "${_cur_git_repo_branch_or_tag}" = "HEAD" ]; then
# We are on a tag, lets find out its name
export GIT_REPO_BRANCH_OR_TAG=$(git -C ${BUILDER_ROOT} describe --tags)
else
export GIT_REPO_BRANCH_OR_TAG="${_cur_git_repo_branch_or_tag}"
fi
# Use vX_Y instead of RELENG_X_Y for poudriere to make it shorter
# Replace . by _ to make tag names look correct
POUDRIERE_BRANCH=$(echo "${GIT_REPO_BRANCH_OR_TAG}" | sed 's,RELENG_,v,; s,\.,_,g')
GIT_REPO_BASE=$(git -C ${BUILDER_ROOT} config --get remote.origin.url | sed -e 's,/[^/]*$,,')
# This is used for using svn for retrieving src
export FREEBSD_REPO_BASE=${FREEBSD_REPO_BASE:-"${GIT_REPO_BASE}/freebsd-src.git"}
export FREEBSD_BRANCH=${FREEBSD_BRANCH:-"devel-11"}
export FREEBSD_SRC_DIR=${FREEBSD_SRC_DIR:-"${SCRATCHDIR}/FreeBSD-src"}
export BUILD_KERNELS=${BUILD_KERNELS:-"${PRODUCT_NAME}"}
# XXX: Poudriere doesn't like ssh short form
case "${FREEBSD_REPO_BASE}" in
git@*)
export FREEBSD_REPO_BASE_POUDRIERE="ssh://$(echo ${FREEBSD_REPO_BASE} | sed 's,:,/,')"
;;
*)
export FREEBSD_REPO_BASE_POUDRIERE="${FREEBSD_REPO_BASE}"
;;
esac
# Leave this alone.
export SRCCONF=${SRCCONF:-"${FREEBSD_SRC_DIR}/release/conf/${PRODUCT_NAME}_src.conf"}
export SRC_ENV_CONF=${SRC_CONF:-"${FREEBSD_SRC_DIR}/release/conf/${PRODUCT_NAME}_src-env.conf"}
export __MAKE_CONF=${__MAKE_CONF:-"${FREEBSD_SRC_DIR}/release/conf/${PRODUCT_NAME}_make.conf"}
# Extra tools to be added to ITOOLS
export LOCAL_ITOOLS=${LOCAL_ITOOLS:-"uuencode uudecode ex"}
# Path to kernel files being built
export KERNEL_BUILD_PATH=${KERNEL_BUILD_PATH:-"${SCRATCHDIR}/kernels"}
# Do not touch builder /usr/obj
export MAKEOBJDIRPREFIX=${MAKEOBJDIRPREFIX:-"${SCRATCHDIR}/obj"}
export MODULES_OVERRIDE=${MODULES_OVERRIDE:-"i2c ipmi ndis ipfw ipdivert dummynet fdescfs opensolaris zfs glxsb if_stf coretemp amdtemp aesni sfxge hwpmc vmm nmdm ix ixv"}
# Area that the final image will appear in
export IMAGES_FINAL_DIR=${IMAGES_FINAL_DIR:-"${SCRATCHDIR}/${PRODUCT_NAME}/"}
export BUILDER_LOGS=${BUILDER_LOGS:-"${BUILDER_ROOT}/logs"}
if [ ! -d ${BUILDER_LOGS} ]; then
mkdir -p ${BUILDER_LOGS}
fi
# This is where files will be staged
export INSTALLER_CHROOT_DIR=${INSTALLER_CHROOT_DIR:-"${SCRATCHDIR}/installer-dir"}
# This is where files will be staged
export STAGE_CHROOT_DIR=${STAGE_CHROOT_DIR:-"${SCRATCHDIR}/stage-dir"}
# Directory that will clone to in order to create
# iso staging area.
export FINAL_CHROOT_DIR=${FINAL_CHROOT_DIR:-"${SCRATCHDIR}/final-dir"}
# OVF/vmdk parms
# Name of ovf file included inside OVA archive
export OVFTEMPLATE=${OVFTEMPLATE:-"${BUILDER_TOOLS}/templates/ovf/${PRODUCT_NAME}.ovf"}
# / partition to be used by mkimg
export OVFUFS=${OVFUFS:-"${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-disk1.ufs"}
# Raw disk to be converted to vmdk
export OVFRAW=${OVFRAW:-"${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-disk1.raw"}
# On disk name of VMDK file included in OVA
export OVFVMDK=${OVFVMDK:-"${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-disk1.vmdk"}
# 8 gigabyte on disk VMDK size
export VMDK_DISK_CAPACITY_IN_GB=${VMDK_DISK_CAPACITY_IN_GB:-"8"}
# swap partition size (freebsd-swap)
export OVA_SWAP_PART_SIZE_IN_GB=${OVA_SWAP_PART_SIZE_IN_GB:-"0"}
# Temporary place to save files
export OVA_TMP=${OVA_TMP:-"${SCRATCHDIR}/ova_tmp"}
# end of OVF
# Number of code images on media (1 or 2)
export NANO_IMAGES=2
# 0 -> Leave second image all zeroes so it compresses better.
# 1 -> Initialize second image with a copy of the first
export NANO_INIT_IMG2=1
export NANO_NEWFS="-b 4096 -f 512 -i 8192 -O1"
export FLASH_SIZE=${FLASH_SIZE:-"2g"}
# Size of code file system in 512 bytes sectors
# If zero, size will be as large as possible.
export NANO_CODESIZE=0
# Size of data file system in 512 bytes sectors
# If zero: no partition configured.
# If negative: max size possible
export NANO_DATASIZE=0
# Size of Product /conf partition # 102400 = 50 megabytes.
export NANO_CONFSIZE=102400
# packet is OK for 90% of embedded
export NANO_BOOT0CFG="-o packet -s 1 -m 3"
# NOTE: Date string is used for creating file names of images
# The file is used for sharing the same value with build_snapshots.sh
export DATESTRINGFILE=${DATESTRINGFILE:-"$SCRATCHDIR/version.snapshots"}
if [ -z "${DATESTRING}" ]; then
if [ -f "${DATESTRINGFILE}" -a -n "${_USE_OLD_DATESTRING}" ]; then
export DATESTRING=$(cat $DATESTRINGFILE)
else
export DATESTRING=$(date "+%Y%m%d-%H%M")
fi
fi
echo "$DATESTRING" > $DATESTRINGFILE
# NOTE: Date string is placed on the final image etc folder to help detect new updates
# The file is used for sharing the same value with build_snapshots.sh
export BUILTDATESTRINGFILE=${BUILTDATESTRINGFILE:-"$SCRATCHDIR/version.buildtime"}
if [ -z "${BUILTDATESTRING}" ]; then
if [ -f "${BUILTDATESTRINGFILE}" -a -n "${_USE_OLD_DATESTRING}" ]; then
export BUILTDATESTRING=$(cat $BUILTDATESTRINGFILE)
else
export BUILTDATESTRING=$(date "+%a %b %d %T %Z %Y")
fi
fi
echo "$BUILTDATESTRING" > $BUILTDATESTRINGFILE
STAGING_HOSTNAME=${STAGING_HOSTNAME:-"release-staging.netgate.com"}
# Poudriere
export ZFS_TANK=${ZFS_TANK:-"zroot"}
export ZFS_ROOT=${ZFS_ROOT:-"/poudriere"}
export POUDRIERE_BULK=${POUDRIERE_BULK:-"${BUILDER_TOOLS}/conf/pfPorts/poudriere_bulk"}
export POUDRIERE_PORTS_GIT_URL=${POUDRIERE_PORTS_GIT_URL:-"${GIT_REPO_BASE}/freebsd-ports.git"}
export POUDRIERE_PORTS_GIT_BRANCH=${POUDRIERE_PORTS_GIT_BRANCH:-"devel"}
# Use vX_Y instead of RELENG_X_Y for poudriere to make it shorter
POUDRIERE_PORTS_BRANCH=$(echo "${POUDRIERE_PORTS_GIT_BRANCH}" | sed 's,RELENG_,v,')
export POUDRIERE_PORTS_NAME=${POUDRIERE_PORTS_NAME:-"${PRODUCT_NAME}_${POUDRIERE_PORTS_BRANCH}"}
# XXX: Poudriere doesn't like ssh short form
case "${POUDRIERE_PORTS_GIT_URL}" in
git@*)
POUDRIERE_PORTS_GIT_URL="ssh://$(echo ${POUDRIERE_PORTS_GIT_URL} | sed 's,:,/,')"
;;
esac
unset _IS_RELEASE
unset CORE_PKG_DATESTRING
export TIMESTAMP_SUFFIX="-${DATESTRING}"
# pkg doesn't like - as version separator, use . instead
export PKG_DATESTRING=$(echo "${DATESTRING}" | sed 's,-,.,g')
case "${PRODUCT_VERSION##*-}" in
RELEASE)
export _IS_RELEASE=yes
unset TIMESTAMP_SUFFIX
;;
ALPHA|DEVELOPMENT)
export CORE_PKG_DATESTRING=".a.${PKG_DATESTRING}"
;;
BETA*)
export CORE_PKG_DATESTRING=".b.${PKG_DATESTRING}"
;;
RC*)
export CORE_PKG_DATESTRING=".r.${PKG_DATESTRING}"
;;
*)
echo ">>> ERROR: Invalid PRODUCT_VERSION format ${PRODUCT_VERSION}"
exit 1
esac
# Host to rsync pkg repos from poudriere
export PKG_RSYNC_HOSTNAME=${PKG_RSYNC_HOSTNAME:-${STAGING_HOSTNAME}}
export PKG_RSYNC_USERNAME=${PKG_RSYNC_USERNAME:-"wwwsync"}
export PKG_RSYNC_SSH_PORT=${PKG_RSYNC_SSH_PORT:-"22"}
export PKG_RSYNC_DESTDIR=${PKG_RSYNC_DESTDIR:-"/staging/ce/packages"}
export PKG_RSYNC_LOGS=${PKG_RSYNC_LOGS:-"/staging/ce/packages/logs/${POUDRIERE_BRANCH}/${TARGET}"}
# Final packages server
if [ -n "${_IS_RELEASE}" ]; then
export PKG_FINAL_RSYNC_HOSTNAME=${PKG_FINAL_RSYNC_HOSTNAME:-"pkg.pfsense.org"}
export PKG_FINAL_RSYNC_DESTDIR=${PKG_FINAL_RSYNC_DESTDIR:-"/usr/local/www/pkg"}
else
export PKG_FINAL_RSYNC_HOSTNAME=${PKG_FINAL_RSYNC_HOSTNAME:-"beta.pfsense.org"}
export PKG_FINAL_RSYNC_DESTDIR=${PKG_FINAL_RSYNC_DESTDIR:-"/usr/local/www/beta/packages"}
fi
export PKG_FINAL_RSYNC_USERNAME=${PKG_FINAL_RSYNC_USERNAME:-"wwwsync"}
export PKG_FINAL_RSYNC_SSH_PORT=${PKG_FINAL_RSYNC_SSH_PORT:-"22"}
export SKIP_FINAL_RSYNC=${SKIP_FINAL_RSYNC:-}
# pkg repo variables
export USE_PKG_REPO_STAGING="1"
export PKG_REPO_SERVER_DEVEL=${PKG_REPO_SERVER_DEVEL:-"pkg+https://beta.pfsense.org/packages"}
export PKG_REPO_SERVER_RELEASE=${PKG_REPO_SERVER_RELEASE:-"pkg+https://beta.pfsense.org/packages"}
export PKG_REPO_SERVER_STAGING=${PKG_REPO_SERVER_STAGING:-"pkg+http://${STAGING_HOSTNAME}/ce/packages"}
if [ -n "${_IS_RELEASE}" ]; then
export PKG_REPO_BRANCH_RELEASE=${PKG_REPO_BRANCH_RELEASE:-${POUDRIERE_BRANCH}}
export PKG_REPO_BRANCH_DEVEL=${PKG_REPO_BRANCH_DEVEL:-${POUDRIERE_BRANCH}}
export PKG_REPO_BRANCH_STAGING=${PKG_REPO_BRANCH_STAGING:-${PKG_REPO_BRANCH_RELEASE}}
else
export PKG_REPO_BRANCH_RELEASE=${PKG_REPO_BRANCH_RELEASE:-${POUDRIERE_BRANCH}}
export PKG_REPO_BRANCH_DEVEL=${PKG_REPO_BRANCH_DEVEL:-${POUDRIERE_BRANCH}}
export PKG_REPO_BRANCH_STAGING=${PKG_REPO_BRANCH_STAGING:-${PKG_REPO_BRANCH_DEVEL}}
fi
if [ -n "${_IS_RELEASE}" ]; then
export PKG_REPO_SIGN_KEY=${PKG_REPO_SIGN_KEY:-"release${PRODUCT_NAME_SUFFIX}"}
else
export PKG_REPO_SIGN_KEY=${PKG_REPO_SIGN_KEY:-"beta${PRODUCT_NAME_SUFFIX}"}
fi
# Command used to sign pkg repo
export PKG_REPO_SIGNING_COMMAND=${PKG_REPO_SIGNING_COMMAND:-"ssh [email protected] sudo ./sign.sh ${PKG_REPO_SIGN_KEY}"}
export DO_NOT_SIGN_PKG_REPO=${DO_NOT_SIGN_PKG_REPO:-}
# Define base package version, based on date for snaps
export CORE_PKG_VERSION="${PRODUCT_VERSION%%-*}${CORE_PKG_DATESTRING}${PRODUCT_REVISION:+_}${PRODUCT_REVISION}"
export CORE_PKG_PATH=${CORE_PKG_PATH:-"${SCRATCHDIR}/${PRODUCT_NAME}_${POUDRIERE_BRANCH}_${TARGET_ARCH}-core"}
export CORE_PKG_REAL_PATH="${CORE_PKG_PATH}/.real_${DATESTRING}"
export CORE_PKG_ALL_PATH="${CORE_PKG_PATH}/All"
export PKG_REPO_BASE=${PKG_REPO_BASE:-"${FREEBSD_SRC_DIR}/release/pkg_repos"}
export PKG_REPO_DEFAULT=${PKG_REPO_DEFAULT:-"${PKG_REPO_BASE}/${PRODUCT_NAME}-repo.conf"}
export PKG_REPO_PATH=${PKG_REPO_PATH:-"/usr/local/etc/pkg/repos/${PRODUCT_NAME}.conf"}
export PRODUCT_SHARE_DIR=${PRODUCT_SHARE_DIR:-"/usr/local/share/${PRODUCT_NAME}"}
# Package overlay. This gives people a chance to build product
# installable image that already contains certain extra packages.
#
# Needs to contain comma separated package names. Of course
# package names must be valid. Using non existent
# package name would yield an error.
#
#export custom_package_list=""
# General builder output filenames
export ISOPATH=${ISOPATH:-"${IMAGES_FINAL_DIR}/installer/${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-${PRODUCT_VERSION}${PRODUCT_REVISION:+-p}${PRODUCT_REVISION}-${TARGET}${TIMESTAMP_SUFFIX}.iso"}
export MEMSTICKPATH=${MEMSTICKPATH:-"${IMAGES_FINAL_DIR}/installer/${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-memstick-${PRODUCT_VERSION}${PRODUCT_REVISION:+-p}${PRODUCT_REVISION}-${TARGET}${TIMESTAMP_SUFFIX}.img"}
export MEMSTICKSERIALPATH=${MEMSTICKSERIALPATH:-"${IMAGES_FINAL_DIR}/installer/${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-memstick-serial-${PRODUCT_VERSION}${PRODUCT_REVISION:+-p}${PRODUCT_REVISION}-${TARGET}${TIMESTAMP_SUFFIX}.img"}
export MEMSTICKADIPATH=${MEMSTICKADIPATH:-"${IMAGES_FINAL_DIR}/installer/${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-memstick-ADI-${PRODUCT_VERSION}${PRODUCT_REVISION:+-p}${PRODUCT_REVISION}-${TARGET}${TIMESTAMP_SUFFIX}.img"}
export OVAPATH=${OVAPATH:-"${IMAGES_FINAL_DIR}/virtualization/${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-${PRODUCT_VERSION}${PRODUCT_REVISION:+-p}${PRODUCT_REVISION}-${TARGET}${TIMESTAMP_SUFFIX}.ova"}
export MEMSTICK_VARIANTS=${MEMSTICK_VARIANTS:-}
export VARIANTIMAGES=""
export VARIANTUPDATES=""
# nanobsd templates
export NANOBSD_IMG_TEMPLATE=${NANOBSD_IMG_TEMPLATE:-"${PRODUCT_NAME}${PRODUCT_NAME_SUFFIX}-${PRODUCT_VERSION}${PRODUCT_REVISION:+-p}${PRODUCT_REVISION}-%%SIZE%%-${TARGET}-%%TYPE%%${TIMESTAMP_SUFFIX}.img"}
# Rsync data to send snapshots
export RSYNCUSER=${RSYNCUSER:-"snapshots"}
export RSYNCPATH=${RSYNCPATH:-"/usr/local/www/snapshots/${TARGET}/${PRODUCT_NAME}_${GIT_REPO_BRANCH_OR_TAG}"}
export RSYNCKBYTELIMIT=${RSYNCKBYTELIMIT:-"248000"}
export SNAPSHOTSLOGFILE=${SNAPSHOTSLOGFILE:-"${SCRATCHDIR}/snapshots-build.log"}
export SNAPSHOTSLASTUPDATE=${SNAPSHOTSLASTUPDATE:-"${SCRATCHDIR}/snapshots-lastupdate.log"}
if [ -n "${POUDRIERE_SNAPSHOTS}" ]; then
export SNAPSHOTS_RSYNCIP=${PKG_RSYNC_HOSTNAME}
export SNAPSHOTS_RSYNCUSER=${PKG_RSYNC_USERNAME}
else
export SNAPSHOTS_RSYNCIP=${RSYNCIP}
export SNAPSHOTS_RSYNCUSER=${RSYNCUSER}
fi
if [ "${PRODUCT_NAME}" = "pfSense" ]; then
export VENDOR_NAME=${VENDOR_NAME:-"Electric Sheep Fencing, LLC"}
export OVF_INFO=${OVF_INFO:-"pfSense is a free, open source customized distribution of FreeBSD tailored for use as a firewall and router. In addition to being a powerful, flexible firewalling and routing platform, it includes a long list of related features and a package system allowing further expandability without adding bloat and potential security vulnerabilities to the base distribution. pfSense is a popular project with more than 1 million downloads since its inception, and proven in countless installations ranging from small home networks protecting a PC and an Xbox to large corporations, universities and other organizations protecting thousands of network devices."}
else
export VENDOR_NAME=${VENDOR_NAME:-"nonSense"}
export OVF_INFO=${OVF_INFO:-"none"}
fi
|
#!/usr/bin/env bash
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Build the Python PIP installation package for TensorFlow
# and run the Python unit tests from the source code on the installation
#
# Usage:
# test_installation.sh [--virtualenv] [--gpu]
#
# If the flag --virtualenv is set, the script will use "python" as the Python
# binary path. Otherwise, it will use tools/python_bin_path.sh to determine
# the Python binary path.
#
# The --gpu flag informs the script that this is a GPU build, so that the
# appropriate test blacklists can be applied accordingly.
#
# When executing the Python unit tests, the script obeys the shell
# variables: PY_TEST_WHITELIST, PY_TEST_BLACKLIST, PY_TEST_GPU_BLACKLIST,
#
# To select only a subset of the Python tests to run, set the environment
# variable PY_TEST_WHITELIST, e.g.,
# PY_TEST_WHITELIST="tensorflow/python/kernel_tests/shape_ops_test.py"
# Separate the tests with a colon (:). Leave this environment variable empty
# to disable the whitelist.
#
# You can also ignore a set of the tests by using the environment variable
# PY_TEST_BLACKLIST. For example, you can include in PY_TEST_BLACKLIST the
# tests that depend on Python modules in TensorFlow source that are not
# exported publicly.
#
# In addition, you can put blacklist for only GPU build inthe environment
# variable PY_TEST_GPU_BLACKLIST.
#
# TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the
# script to perform bazel clean prior to main build and test steps.
#
# TF_BUILD_USE_GPU, if set to 1, limits the number of concurrent tests to
# the number stored in TF_GPU_COUNT and assigns each test to a different GPU.
#
# TF_GPU_COUNT, Set the number of GPUs in the system. We run only this many
# concurrent tests when running GPU tests.
#
# TF_BUILD_EXTRA_EXCLUSIVE_INSTALL_TESTS, add to the default list of
# Python unit tests to run in exclusive mode (i.e., not concurrently with
# other tests), separated with colons
#
# TF_BUILD_FILTER_INSTALL_TESTS_BY_TAG: If set to a non-empty string
# (e.g., "local"), will filter the Python install-tests by that string as
# bazel tags. Multiple filter tags can be used. Both the inclusive filtering
# mode and the exclusive filtering mode can be used. For example:
#
# TF_BUILD_FILTER_INSTALL_TESTS_BY_TAG="local,-manual"
#
# will let the script run the Python unit tests that have the tag "local"
# and do not have the tag "manual". The "-" marks the exclusive filtering
# mode. The inclusive mode is the default. Use commas to separate the tags.
#
# If the environmental variable NO_TEST_ON_INSTALL is set to any non-empty
# value, the script will exit after the pip install step.
# =============================================================================
# Test blacklist: General
#
# tensorflow/python/framework/ops_test.py
# depends on depends on "test_ops", which is defined in a C++ file wrapped as
# a .py file through the Bazel rule “tf_gen_ops_wrapper_py”.
# tensorflow/util/protobuf/compare_test.py:
# depends on compare_test_pb2 defined outside Python
# tensorflow/python/framework/device_test.py:
# depends on CheckValid() and ToString(), both defined externally
# tensorflow/python/framework/file_system_test.py:
# depends on having the .so which is not shipped in the pip package.
# tensorflow/contrib/quantization/*:
# These depend on an .so mechanism that's not shipped in the pip package.
# tensorflow/python/platform/default/*_test.py:
# These are obsolete and replaced by corresponding files in python/platform.
# They will be removed in the future.
PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:"\
"tensorflow/python/framework/ops_test.py:"\
"tensorflow/python/util/protobuf/compare_test.py:"\
"tensorflow/python/framework/device_test.py:"\
"tensorflow/python/framework/file_system_test.py:"\
"tensorflow/contrib/quantization/python/dequantize_op_test.py:"\
"tensorflow/contrib/quantization/python/quantized_conv_ops_test.py:"\
"tensorflow/contrib/quantization/tools/quantize_graph_test.py:"\
"tensorflow/contrib/session_bundle/exporter_test.py:"\
"tensorflow/contrib/session_bundle/session_bundle_test.py:"\
"tensorflow/python/platform/default/_resource_loader_test.py:"\
"tensorflow/python/platform/default/flags_test.py:"\
"tensorflow/python/platform/default/logging_test.py:"\
"tensorflow/contrib/learn/nonlinear_test.py:"
# Test blacklist: GPU-only
PY_TEST_GPU_BLACKLIST="${PY_TEST_GPU_BLACKLIST}:"\
"tensorflow/python/client/session_test.py:"\
"tensorflow/python/framework/function_test.py:"\
"tensorflow/contrib/tensor_forest/python/kernel_tests/scatter_add_ndim_op_test.py"
# Tests that should be run in the exclusive mode (i.e., not parallel with
# other tests)
PY_TEST_EXCLUSIVE_LIST=""
# Append custom list of exclusive tests
if [[ ! -z "${TF_BUILD_EXTRA_EXCLUSIVE_INSTALL_TESTS}" ]]; then
PY_TEST_EXCLUSIVE_LIST="${PY_TEST_EXCLUSIVE_LIST}:"\
"${TF_BUILD_EXTRA_EXCLUSIVE_INSTALL_TESTS}"
fi
# =============================================================================
echo "PY_TEST_WHITELIST: ${PY_TEST_WHITELIST}"
echo "PY_TEST_BLACKLIST: ${PY_TEST_BLACKLIST}"
echo "PY_TEST_GPU_BLACKLIST: ${PY_TEST_GPU_BLACKLIST}"
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/builds_common.sh"
TF_GPU_COUNT=${TF_GPU_COUNT:-8}
# Process input arguments
IS_VIRTUALENV=0
IS_GPU=0
while true; do
if [[ "$1" == "--virtualenv" ]]; then
IS_VIRTUALENV=1
elif [[ "$1" == "--gpu" ]]; then
IS_GPU=1
fi
shift
if [[ -z "$1" ]]; then
break
fi
done
# Obtain the path to Python binary
if [[ ${IS_VIRTUALENV} == "1" ]]; then
PYTHON_BIN_PATH="$(which python)"
else
source tools/python_bin_path.sh
# Assume: PYTHON_BIN_PATH is exported by the script above
fi
# Obtain the path to head/ghead binary (for log file printing)
HEAD_BIN="ghead"
if [[ -z $(which "${HEAD_BIN}") ]]; then
# This is not Mac (which uses coreutils/ghead), use head.
HEAD_BIN="head"
if [[ -z $(which "${HEAD_BIN}") ]]; then
die "Unable to obtain path to head or ghead"
fi
fi
if [[ -z "${PYTHON_BIN_PATH}" ]]; then
die "PYTHON_BIN_PATH was not provided. If this is not virtualenv, "\
"did you run configure?"
fi
# Append GPU-only test blacklist
if [[ ${IS_GPU} == "1" ]]; then
PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:${PY_TEST_GPU_BLACKLIST}"
fi
# Determine the major and minor versions of Python being used (e.g., 2.7)
# This info will be useful for determining the directory of the local pip
# installation of Python
PY_MAJOR_MINOR_VER=$(${PYTHON_BIN_PATH} -V 2>&1 | awk '{print $NF}' | cut -d. -f-2)
echo "Python binary path to be used in PIP install-test: ${PYTHON_BIN_PATH} "\
"(Major.Minor version: ${PY_MAJOR_MINOR_VER})"
# Avoid permission issues outside container
umask 000
# Directory from which the unit-test files will be run
PY_TEST_DIR_REL="pip_test/tests"
PY_TEST_DIR=$(realpath ${PY_TEST_DIR_REL}) # Get absolute path
rm -rf ${PY_TEST_DIR} && mkdir -p ${PY_TEST_DIR}
# Create test log directory
PY_TEST_LOG_DIR_REL=${PY_TEST_DIR_REL}/logs
PY_TEST_LOG_DIR=$(realpath ${PY_TEST_LOG_DIR_REL}) # Absolute path
mkdir ${PY_TEST_LOG_DIR}
# Copy source files that are required by the tests but are not included in the
# PIP package
# Look for local Python library directory
# pushd/popd avoids importing TensorFlow from the source directory.
pushd /tmp > /dev/null
TF_INSTALL_PATH=$(dirname \
$("${PYTHON_BIN_PATH}" -c "import tensorflow as tf; print(tf.__file__)"))
popd > /dev/null
if [[ -z ${TF_INSTALL_PATH} ]]; then
die "Failed to find path where TensorFlow is installed."
else
echo "Found TensorFlow install path: ${TF_INSTALL_PATH}"
fi
echo "Copying some source directories required by Python unit tests but "\
"not included in install to TensorFlow install path: ${TF_INSTALL_PATH}"
# Files for tensorflow.python.tools
rm -rf ${TF_INSTALL_PATH}/python/tools
cp -r tensorflow/python/tools \
${TF_INSTALL_PATH}/python/tools
touch ${TF_INSTALL_PATH}/python/tools/__init__.py # Make module visible
# Files for tensorflow.examples
rm -rf ${TF_INSTALL_PATH}/examples/image_retraining
mkdir -p ${TF_INSTALL_PATH}/examples/image_retraining
cp -r tensorflow/examples/image_retraining/retrain.py \
${TF_INSTALL_PATH}/examples/image_retraining/retrain.py
touch ${TF_INSTALL_PATH}/examples/__init__.py
touch ${TF_INSTALL_PATH}/examples/image_retraining/__init__.py
echo "Copying additional files required by tests to working directory "\
"for test: ${PY_TEST_DIR}"
# Image files required by some tests, e.g., images_ops_test.py
mkdir -p ${PY_TEST_DIR}/tensorflow/core/lib
rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/jpeg
cp -r tensorflow/core/lib/jpeg ${PY_TEST_DIR}/tensorflow/core/lib
rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/png
cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib
rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/gif
cp -r tensorflow/core/lib/gif ${PY_TEST_DIR}/tensorflow/core/lib
# Copy test data from tensorflow/contrib/ffmpeg
mkdir -p ${PY_TEST_DIR}/tensorflow/contrib/ffmpeg
rm -rf ${PY_TEST_DIR}/tensorflow/contrib/ffmpeg/testdata
cp -r tensorflow/contrib/ffmpeg/testdata ${PY_TEST_DIR}
# Run tests
DIR0=$(pwd)
ALL_PY_TESTS_0=$(find tensorflow/{contrib,examples,models,python,tensorboard} \
-type f \( -name "*_test.py" -o -name "test_*.py" \) | sort)
# Subroutine for filtering test file names by a bazel tag.
filter_tests_by_bazel_tag() {
# Usage: filter_tests_by_bazel_tag (--inclusive | --exclusive)
# <BAZEL_TAG> <INPUT_TESTS>
#
# E.g., filter_tests_by_bazel_tag --inclusive "local"
# "dir1/test1.py dir2/test2.py"
#
# Use the flag --inclusive so that only the tests that have the tag will be
# included in the returned string.
# Use the flag --exclusive so that the returned string will consist of only
# the tests that do not have the tag.
# INPUT_TESTS are the name of the input Python unit test files, seperated by
# spaces.
#
# The output string (through stdout) is: OUTPUT_TESTS | DISCARDED_TESTS
# That is: a list of tests that passed the filter, followed by " | ",
# followed by a list of tests that are discarded
FILTER_MODE=$1
TAG=$2
INPUT_TESTS=$3
# Input sanity checks
if [[ "${FILTER_MODE}" != "--inclusive" ]] &&
[[ "${FILTER_MODE}" != "--exclusive" ]]; then
echo "ERROR: Unrecognized filter mode: ${FILTER_MODE}"
exit 1
fi
if [[ -z "${TAG}" ]]; then
echo "ERROR: Bazal tag is not supplied"
exit 1
fi
if [[ -z "${INPUT_TESTS}" ]]; then
echo "ERROR: INPUT_TESTS is not supplied"
exit 1
fi
# Check bazel on path
if [[ -z $(which bazel) ]]; then
echo "ERROR: bazel is not on path"
exit 1
fi
# Get all bazel targets that have the specified tag
BAZEL_TARGETS=\
$(bazel query "kind(py_test, attr(tags, "${TAG}", //tensorflow/...))" | sort)
TARGET_ALIASES=":"
for TARGET in ${BAZEL_TARGETS}; do
# Transform, e.g., //tensorflow/python/kernel_tests:xent_op_test -->
# python-xent_op_test
# to be compared with the transformed strings from the Python unit test
# file names.
TARGET_1=$(echo "${TARGET}" | sed "s/:/ /g")
TARGET_PATH_1=$(echo "${TARGET_1}" | sed "s/\/\// /g" | sed "s/\// /g" \
| awk '{print $2}')
TARGET_BASE_NAME=$(echo "${TARGET_1}" | awk '{print $NF}')
TARGET_ALIAS="${TARGET_PATH_1}-${TARGET_BASE_NAME}"
TARGET_ALIASES="${TARGET_ALIASES}${TARGET_ALIAS}:"
done
TARGET_ALIASES="${TARGET_ALIASES}:"
# Filter the list of tests obtained from listing files with the bazel query
# results.
TESTS_PASSED_FILTER=""
TESTS_BLOCKED_BY_FILTER=""
for PY_TEST in ${INPUT_TESTS}; do
# Transform, e.g., tensorflow/python/kernel_tests/xent_op_test.py -->
# python-xent_op_test
PY_TEST_PATH_1=$(echo "${PY_TEST}" | sed "s/\// /g" | awk '{print $2}')
PY_TEST_BASE_NAME=$(echo "${PY_TEST}" | sed "s/\// /g" \
| awk '{print $NF}' | sed "s/\.py//g")
PY_TEST_ALIAS="${PY_TEST_PATH_1}-${PY_TEST_BASE_NAME}"
TO_INCLUDE=0
if [[ "${TARGET_ALIASES}" == *"${PY_TEST_ALIAS}"* ]] && \
[[ "${FILTER_MODE}" == "--inclusive" ]]; then
TO_INCLUDE=1
elif [[ "${TARGET_ALIASES}" != *"${PY_TEST_ALIAS}"* ]] && \
[[ "${FILTER_MODE}" == "--exclusive" ]]; then
TO_INCLUDE=1
fi
if [[ ${TO_INCLUDE} == 1 ]]; then
TESTS_PASSED_FILTER="${TESTS_PASSED_FILTER} ${PY_TEST}"
else
TESTS_BLOCKED_BY_FILTER="${TESTS_BLOCKED_BY_FILTER} ${PY_TEST}"
fi
done
echo "${TESTS_PASSED_FILTER} | ${TESTS_BLOCKED_BY_FILTER}"
}
if [[ ${TF_BUILD_FILTER_INSTALL_TESTS_BY_TAG} != "" ]]; then
# Iteratively apply the filter tags
TAGS=(${TF_BUILD_FILTER_INSTALL_TESTS_BY_TAG//,/ })
for TAG in ${TAGS[@]}; do
if [[ ${TAG} == "-"* ]]; then
MODE="--exclusive"
TAG_1=$(echo ${TAG} | sed 's/-//')
else
MODE="--inclusive"
TAG_1=${TAG}
fi
FILTER_OUTPUT=$(filter_tests_by_bazel_tag ${MODE} \
"${TAG_1}" "${ALL_PY_TESTS_0}")
ALL_PY_TESTS_0=$(echo "${FILTER_OUTPUT}" | cut -d \| -f 1)
DISCARDED_TESTS=$(echo "${FILTER_OUTPUT}" | cut -d \| -f 2)
N_DISCARDED=$(echo "${DISCARDED_TESTS}" | wc -w)
echo ""
echo "Skipping ${N_DISCARDED} test(s) due to filter tag \"${TAG}\":"
echo "${DISCARDED_TESTS}"
echo ""
done
fi
# Move the exclusive tests to the back of the list
EXCLUSIVE_LIST="$(echo "${PY_TEST_EXCLUSIVE_LIST}" | sed -e 's/:/ /g')"
ALL_PY_TESTS=""
for TEST in ${ALL_PY_TESTS_0}; do
if [[ ! ${PY_TEST_EXCLUSIVE_LIST} == *"${TEST}"* ]]; then
ALL_PY_TESTS="${ALL_PY_TESTS} ${TEST}"
fi
done
# Number of parallel (non-exclusive) tests
N_PAR_TESTS=$(echo ${ALL_PY_TESTS} | wc -w)
echo "Number of non-exclusive tests: ${N_PAR_TESTS}"
for TEST in ${EXCLUSIVE_LIST}; do
ALL_PY_TESTS="${ALL_PY_TESTS} ${TEST}"
done
PY_TEST_COUNT=$(echo ${ALL_PY_TESTS} | wc -w)
if [[ ${PY_TEST_COUNT} -eq 0 ]]; then
die "ERROR: Cannot find any tensorflow Python unit tests to run on install"
fi
# Iterate through all the Python unit test files using the installation
TEST_COUNTER=0
PASS_COUNTER=0
FAIL_COUNTER=0
SKIP_COUNTER=0
FAILED_TESTS=""
FAILED_TEST_LOGS=""
if [[ "${TF_BUILD_USE_GPU}" == "1" ]]; then
N_JOBS=$TF_GPU_COUNT
else
N_JOBS=$(grep -c ^processor /proc/cpuinfo)
if [[ -z ${N_JOBS} ]]; then
# Try the Mac way of getting number of CPUs
N_JOBS=$(sysctl -n hw.ncpu)
fi
# If still cannot determine the number of CPUs, pick 8.
if [[ -z ${N_JOBS} ]]; then
N_JOBS=8
echo "Cannot determine the number of processors"
echo "Using default concurrent job counter ${N_JOBS}"
fi
fi
echo "Running Python tests-on-install with ${N_JOBS} concurrent jobs..."
ALL_PY_TESTS=(${ALL_PY_TESTS})
while true; do
TEST_LOGS=""
TEST_INDICES=""
TEST_FILE_PATHS=""
TEST_BASENAMES=""
ITER_COUNTER=0
while true; do
# Break if the end is reached
if [[ "${TEST_COUNTER}" -ge "${PY_TEST_COUNT}" ]]; then
break;
fi
# for TEST_FILE_PATH in ${ALL_PY_TESTS}; do
TEST_FILE_PATH=${ALL_PY_TESTS[TEST_COUNTER]}
((TEST_COUNTER++))
((ITER_COUNTER++))
# If PY_TEST_WHITELIST is not empty, only the white-listed tests will be run
if [[ ! -z ${PY_TEST_WHITELIST} ]] && \
[[ ! ${PY_TEST_WHITELIST} == *"${TEST_FILE_PATH}"* ]]; then
((SKIP_COUNTER++))
echo "Non-whitelisted test SKIPPED: ${TEST_FILE_PATH}"
continue
fi
# If the test is in the black list, skip it
if [[ ${PY_TEST_BLACKLIST} == *"${TEST_FILE_PATH}"* ]]; then
((SKIP_COUNTER++))
echo "Blacklisted test SKIPPED: ${TEST_FILE_PATH}"
continue
fi
TEST_INDICES="${TEST_INDICES} ${TEST_COUNTER}"
TEST_FILE_PATHS="${TEST_FILE_PATHS} ${TEST_FILE_PATH}"
# Copy to a separate directory to guard against the possibility of picking
# up modules in the source directory
cp ${TEST_FILE_PATH} ${PY_TEST_DIR}/
TEST_BASENAME=$(basename "${TEST_FILE_PATH}")
TEST_BASENAMES="${TEST_BASENAMES} ${TEST_BASENAME}"
# Relative path of the test log. Use long path in case there are duplicate
# file names in the Python tests
TEST_LOG_REL="${PY_TEST_LOG_DIR_REL}/${TEST_FILE_PATH}.log"
mkdir -p $(dirname ${TEST_LOG_REL}) # Create directory for log
TEST_LOG=$(realpath ${TEST_LOG_REL}) # Absolute path
TEST_LOGS="${TEST_LOGS} ${TEST_LOG}"
# Launch test asynchronously
if [[ "${TF_BUILD_USE_GPU}" == "1" ]]; then
"${SCRIPT_DIR}/../gpu_build/parallel_gpu_execute.sh" \
"${SCRIPT_DIR}/py_test_delegate.sh" \
"${PYTHON_BIN_PATH}" "${PY_TEST_DIR}/${TEST_BASENAME}" "${TEST_LOG}" &
else
"${SCRIPT_DIR}/py_test_delegate.sh" \
"${PYTHON_BIN_PATH}" "${PY_TEST_DIR}/${TEST_BASENAME}" "${TEST_LOG}" &
fi
if [[ "${TEST_COUNTER}" -ge "${N_PAR_TESTS}" ]]; then
# Run in exclusive mode
if [[ "${TEST_COUNTER}" -gt "${N_PAR_TESTS}" ]]; then
echo "Run test exclusively: ${PY_TEST_DIR}/${TEST_BASENAME}"
fi
break
fi
if [[ "${ITER_COUNTER}" -ge "${N_JOBS}" ]] ||
[[ "${TEST_COUNTER}" -ge "${PY_TEST_COUNT}" ]]; then
break
fi
done
# Wait for all processes to complete
wait
TEST_LOGS=(${TEST_LOGS})
TEST_FILE_PATHS=(${TEST_FILE_PATHS})
TEST_BASENAMES=(${TEST_BASENAMES})
K=0
for TEST_INDEX in ${TEST_INDICES}; do
TEST_FILE_PATH=${TEST_FILE_PATHS[K]}
TEST_RESULT=$(tail -1 "${TEST_LOGS[K]}" | awk '{print $1}')
ELAPSED_TIME=$(tail -1 "${TEST_LOGS[K]}" | cut -d' ' -f2-)
PROG_STR="(${TEST_INDEX} / ${PY_TEST_COUNT})"
# Check for pass or failure status of the test outtput and exit
if [[ ${TEST_RESULT} -eq 0 ]]; then
((PASS_COUNTER++))
echo "${PROG_STR} Python test-on-install PASSED (${ELAPSED_TIME}): ${TEST_FILE_PATH}"
else
((FAIL_COUNTER++))
FAILED_TESTS="${FAILED_TESTS} ${TEST_FILE_PATH}"
FAILED_TEST_LOGS="${FAILED_TEST_LOGS} ${TEST_LOGS[K]}"
echo "${PROG_STR} Python test-on-install FAILED (${ELAPSED_TIME}): ${TEST_FILE_PATH}"
echo " Log @: ${TEST_LOGS[K]}"
echo "============== BEGINS failure log content =============="
"${HEAD_BIN}" --lines=-1 "${TEST_LOGS[K]}"
echo "============== ENDS failure log content =============="
echo ""
fi
cd ${DIR0}
# Clean up files for this test
rm -f ${TEST_BASENAMES[K]}
((K++))
done
# Stop if the end is reached
if [[ "${TEST_COUNTER}" -ge "${PY_TEST_COUNT}" ]]; then
break;
fi
done
# Clean up files copied for Python unit tests:
rm -rf ${TF_INSTALL_PATH}/python/tools
rm -rf ${TF_INSTALL_PATH}/examples/image_retraining
rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/jpeg
rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/png
rm -rf ${PY_TEST_DIR}/testdata
echo ""
echo "${PY_TEST_COUNT} Python test(s):" \
"${PASS_COUNTER} passed;" \
"${FAIL_COUNTER} failed; " \
"${SKIP_COUNTER} skipped"
echo "Test logs directory: ${PY_TEST_LOG_DIR_REL}"
if [[ ${FAIL_COUNTER} -eq 0 ]]; then
echo ""
echo "Python test-on-install SUCCEEDED"
exit 0
else
echo "FAILED test(s):"
FAILED_TEST_LOGS=($FAILED_TEST_LOGS)
FAIL_COUNTER=0
for TEST_NAME in ${FAILED_TESTS}; do
echo " ${TEST_NAME} (Log @: ${FAILED_TEST_LOGS[${FAIL_COUNTER}]})"
((FAIL_COUNTER++))
done
echo ""
echo "Python test-on-install FAILED"
exit 1
fi
|
#!/bin/bash
gcloud config set compute/region us-east4
gcloud config set compute/zone us-east4-a |
#!/bin/bash
git clone https://github.com/julienlopez/ETL
git clone https://github.com/Microsoft/GSL
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
sudo apt-get update -qq
sudo apt-get update
sudo apt-get install -y cmake libboost-all-dev gcc-7 g++-7 qt5-default qt5-qmake lcov
sudo update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-7 90 |
#!/bin/bash
toilet -f wideterm --gay Starting tests... -S
cd ansible
echo -e "[defaults]\nremote_tmp = $HOME/.ansible/tmp" > ansible.cfg
echo -e "inventory = inventory" >> ansible.cfg
mkdir inventory
echo -e "[lxdhosts]\nubuntu-adi-test-lxdserver ansible_connection=lxd" > ./inventory/development_inventory
LXD_BRIDGE_IP=$(ip addr show dev lxdbr0 scope global | grep inet | grep -v inet6 | awk 'BEGIN {FS=" "}{print $2}' | cut -f1 -d"/")
# Update hosts file to resolv hostname of LXD container.
sudo cp /etc/resolv.conf /tmp/.resolv.conf.backup-$(date +%s)
sudo perl -0 -pi -e "s/nameserver /nameserver $LXD_BRIDGE_IP\nnameserver /" /etc/resolv.conf
echo -e "\n$(sudo lxc list ubuntu-adi-test-lxdserver -c4 | grep eth0 | cut -d' ' -f2)\tubuntu-adi-test-lxdserver" | sudo tee -a /etc/hosts
# Execute tests.
source ./hacking/env-setup
#sudo ansible --version
#sudo ./inventory/lxd.nex --list
#cat /etc/hosts
#sudo lxc list ubuntu-adi-test-lxdserver:
sudo rm ./inventory/lxd.js
sudo chmod +x ./inventory/lxd.js
sudo chmod +x ./inventory/lxd.nex
sudo ansible --version
sudo ansible-playbook ../ping.yml
# Recover state of resolv and hosts file
sudo perl -pi -e "s/nameserver $LXD_BRIDGE_IP\n//" /etc/resolv.conf
sudo cp /etc/hosts /tmp/hosts-$(date +%s)
sudo grep -v ubuntu-adi-test-lxdserver /etc/hosts | sudo tee /etc/hosts
|
#!/bin/sh
cmd=/usr/local/mysql-8.0/bin/mysqld_safe
conf=/data1/mysqldata/3306/my.cnf
${cmd} --defaults-file=${conf} --user=mysql &
|
#!/bin/sh
set -e
set -x
pod install
if [ $1 == 'xcframework' ]
then
# archive the framework for iOS, macOS, Catalyst and the Simulator
xcodebuild archive -workspace MatrixSDK.xcworkspace -scheme MatrixSDK-iOS -destination "generic/platform=iOS" -archivePath build/MatrixSDK-iOS SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES IPHONEOS_DEPLOYMENT_TARGET=11.0 GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS=NO
xcodebuild archive -workspace MatrixSDK.xcworkspace -scheme MatrixSDK-iOS -destination "generic/platform=iOS Simulator" -archivePath build/MatrixSDK-iOSSimulator SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES IPHONEOS_DEPLOYMENT_TARGET=11.0 GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS=NO
xcodebuild archive -workspace MatrixSDK.xcworkspace -scheme MatrixSDK-macOS -destination "generic/platform=macOS" -archivePath build/MatrixSDK-macOS SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES MACOSX_DEPLOYMENT_TARGET=10.10 GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS=NO
xcodebuild archive -workspace MatrixSDK.xcworkspace -scheme MatrixSDK-iOS -destination "generic/platform=macOS,variant=Mac Catalyst" -archivePath ./build/MatrixSDK-MacCatalyst SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES IPHONEOS_DEPLOYMENT_TARGET=13.0 GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS=NO
cd build
# clean xcframework artifacts
if [ -d 'MatrixSDK.xcframework' ]; then rm -rf MatrixSDK.xcframework; fi
# build the xcframework
xcodebuild -create-xcframework -framework MatrixSDK-iOS.xcarchive/Products/Library/Frameworks/MatrixSDK.framework -framework MatrixSDK-iOSSimulator.xcarchive/Products/Library/Frameworks/MatrixSDK.framework -framework MatrixSDK-macOS.xcarchive/Products/Library/Frameworks/MatrixSDK.framework -framework MatrixSDK-MacCatalyst.xcarchive/Products/Library/Frameworks/MatrixSDK.framework -output MatrixSDK.xcframework
else
xcodebuild -workspace MatrixSDK.xcworkspace/ -scheme MatrixSDK -sdk iphonesimulator -destination 'name=iPhone 5s'
fi |
#!/bin/bash
#
# Run project tests
#
# NOTE: This script expects to be run from the project root with
# ./scripts/run_tests.sh
PYTEST_ARGS="$@" make test
|
branch=$1
if [ -z "$branch" ]; then
branch=zgroup
fi
branch_name=$(git symbolic-ref -q HEAD)
branch_name=${branch_name##refs/heads/}
branch_name=${branch_name:-HEAD}
for file in $(find "../src/entries" -type f)
do
file_name=$(basename $file)
module_name="${file_name%.*}"
./build.sh $module_name $branch_name
done |
#!/bin/bash
function usage()
{
echo "Usage:"
echo " $0 [options]"
echo "Options:"
echo " -h Display help or usage"
echo " -p <opensbi_source_path> OpenSBI source path"
echo " -o <build_output_path> Build output path"
echo " -d Build and install documentation"
echo " -t Build only with no archive created"
echo " -j <num_threads> Number of threads for Make (Default: 1)"
echo " -s <archive_suffix> Archive name suffix (Default: unknown)"
echo " -x <riscv_xlen> RISC-V XLEN (Default: 64)"
exit 1;
}
# Command line options
BUILD_NUM_THREADS=1
BUILD_OUTPUT_PATH="$(pwd)/build"
BUILD_OPENSBI_SOURCE_PATH="$(pwd)"
BUILD_DOCS="no"
BUILD_ONLY="no"
BUILD_ARCHIVE_SUFFIX="unknown"
BUILD_RISCV_XLEN=64
while getopts "hdtj:o:p:s:x:" o; do
case "${o}" in
h)
usage
;;
d)
BUILD_DOCS="yes"
;;
t)
BUILD_ONLY="yes"
;;
j)
BUILD_NUM_THREADS=${OPTARG}
;;
o)
BUILD_OUTPUT_PATH=${OPTARG}
;;
p)
BUILD_OPENSBI_SOURCE_PATH=${OPTARG}
;;
s)
BUILD_ARCHIVE_SUFFIX=${OPTARG}
;;
x)
BUILD_RISCV_XLEN=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${BUILD_OPENSBI_SOURCE_PATH}" ]; then
echo "Must specify OpenSBI source path"
usage
fi
if [ ! -d "${BUILD_OPENSBI_SOURCE_PATH}" ]; then
echo "OpenSBI source path does not exist"
usage
fi
if [ -z "${BUILD_ARCHIVE_SUFFIX}" ]; then
echo "Archive suffice cannot be empty"
usage
fi
# Get version of OpenSBI
BUILD_VERSION_MAJOR=$(grep "define OPENSBI_VERSION_MAJOR" "${BUILD_OPENSBI_SOURCE_PATH}/include/sbi/sbi_version.h" | sed 's/.*MAJOR.*\([0-9][0-9]*\)/\1/')
BUILD_VERSION_MINOR=$(grep "define OPENSBI_VERSION_MINOR" "${BUILD_OPENSBI_SOURCE_PATH}/include/sbi/sbi_version.h" | sed 's/.*MINOR.*\([0-9][0-9]*\)/\1/')
# Setup archive name
BUILD_ARCHIVE_NAME="opensbi-${BUILD_VERSION_MAJOR}.${BUILD_VERSION_MINOR}-rv${BUILD_RISCV_XLEN}-${BUILD_ARCHIVE_SUFFIX}"
# Setup platform list
case "${BUILD_RISCV_XLEN}" in
32)
# Setup 32-bit platform list
BUILD_PLATFORM_SUBDIR=("qemu/virt")
BUILD_PLATFORM_SUBDIR+=("qemu/sifive_u")
;;
64)
# Setup 64-bit platform list
BUILD_PLATFORM_SUBDIR=("qemu/virt")
BUILD_PLATFORM_SUBDIR+=("qemu/sifive_u")
BUILD_PLATFORM_SUBDIR+=("sifive/fu540")
BUILD_PLATFORM_SUBDIR+=("kendryte/k210")
BUILD_PLATFORM_SUBDIR+=("ariane-fpga")
BUILD_PLATFORM_SUBDIR+=("andes/ae350")
BUILD_PLATFORM_SUBDIR+=("thead/c910")
;;
*)
echo "Invalid RISC-V XLEN"
usage
;;
esac
# Ensure output directory is present
mkdir -p "${BUILD_OUTPUT_PATH}"
# Do a clean build first
make distclean
# Build and install generic library
echo "Build and install generic library XLEN=${BUILD_RISCV_XLEN}"
echo ""
make -C "${BUILD_OPENSBI_SOURCE_PATH}" O="${BUILD_OUTPUT_PATH}" I="${BUILD_OUTPUT_PATH}/${BUILD_ARCHIVE_NAME}" PLATFORM_RISCV_XLEN="${BUILD_RISCV_XLEN}" install_libsbi install_libsbiutils -j "${BUILD_NUM_THREADS}"
echo ""
# Build and install relevant platforms
for INDEX in $(seq 0 1 "$(expr ${#BUILD_PLATFORM_SUBDIR[*]} - 1)")
do
echo "Build and install PLATFORM=${BUILD_PLATFORM_SUBDIR[${INDEX}]} XLEN=${BUILD_RISCV_XLEN}"
echo ""
make -C "${BUILD_OPENSBI_SOURCE_PATH}" O="${BUILD_OUTPUT_PATH}" I="${BUILD_OUTPUT_PATH}/${BUILD_ARCHIVE_NAME}" PLATFORM="${BUILD_PLATFORM_SUBDIR[${INDEX}]}" PLATFORM_RISCV_XLEN="${BUILD_RISCV_XLEN}" install_libplatsbi install_firmwares -j "${BUILD_NUM_THREADS}"
echo ""
done
# Build and install docs
if [ "${BUILD_DOCS}" == "yes" ]; then
echo "Build and install docs"
echo ""
make -C "${BUILD_OPENSBI_SOURCE_PATH}" O="${BUILD_OUTPUT_PATH}" I="${BUILD_OUTPUT_PATH}/${BUILD_ARCHIVE_NAME}" install_docs
echo ""
fi
# Create archive file
if [ "${BUILD_ONLY}" == "no" ]; then
echo "Create archive ${BUILD_ARCHIVE_NAME}.tar.xz"
echo ""
tar -C "${BUILD_OUTPUT_PATH}" -cJvf "${BUILD_OUTPUT_PATH}/${BUILD_ARCHIVE_NAME}.tar.xz" "${BUILD_ARCHIVE_NAME}"
echo ""
fi
|
#!/bin/bash
set -e
# Colour constants
bold=`tput bold`
green=`tput setaf 2`
red=`tput setaf 1`
reset=`tput sgr0`
echo "${bold}Running all the tests! This might take a while...!${reset}"
./teechain_send.sh
sleep 1
./teechain_deposits.sh
sleep 1
./teechain_single_hop.sh
sleep 1
./teechain_settle.sh
sleep 1
./teechain_benchmark.sh
sleep 1
./teechain_backups_simple.sh
sleep 1
./teechain_backups_break.sh
sleep 1
./teechain_backups_settle.sh
sleep 1
./teechain_backups_send.sh
sleep 1
./teechain_backups_benchmark.sh
echo "...${bold}Looks like all the tests passed!!${reset}"
|
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script handles the creation of multiple clusters using kind and the
# ability to create and configure an insecure container registry.
set -o errexit
set -o nounset
set -o pipefail
# shellcheck source=util.sh
source "${BASH_SOURCE%/*}/util.sh"
NUM_CLUSTERS="${NUM_CLUSTERS:-2}"
KIND_IMAGE="${KIND_IMAGE:-}"
KIND_TAG="${KIND_TAG:-}"
OS="$(uname)"
function create-clusters() {
local num_clusters=${1}
local image_arg=""
if [[ "${KIND_IMAGE}" ]]; then
image_arg="--image=${KIND_IMAGE}"
elif [[ "${KIND_TAG}" ]]; then
image_arg="--image=kindest/node:${KIND_TAG}"
fi
for i in $(seq "${num_clusters}"); do
kind create cluster --name "cluster${i}" "${image_arg}"
fixup-cluster "${i}"
echo
done
echo "Waiting for clusters to be ready"
check-clusters-ready "${num_clusters}"
}
function fixup-cluster() {
local i=${1} # cluster num
if [ "$OS" != "Darwin" ];then
# Set container IP address as kube API endpoint in order for clusters to reach kube API servers in other clusters.
local docker_ip
docker_ip=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "cluster${i}-control-plane")
kubectl config set-cluster "kind-cluster${i}" --server="https://${docker_ip}:6443"
fi
# Simplify context name
kubectl config rename-context "kind-cluster${i}" "cluster${i}"
}
function check-clusters-ready() {
for i in $(seq "${1}"); do
util::wait-for-condition 'ok' "kubectl --context cluster${i} get --raw=/healthz &> /dev/null" 120
done
}
echo "Creating ${NUM_CLUSTERS} clusters"
create-clusters "${NUM_CLUSTERS}"
kubectl config use-context cluster1
echo "Complete"
|
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=Linux;;
Darwin*) machine=Mac;;
CYGWIN*) machine=Cygwin;;
MINGW*) machine=MinGw;;
*) machine="UNKNOWN:${unameOut}"
esac
echo ${machine}
|
#!/bin/bash
sudo parallel ::: ./config_physical_sensors.sh ./config_dedup.sh
exit 0
|
#!/bin/bash
# shell script to prepend i3Status with mediaInfo
#####################
### SCRIPT_SETUP: #########################################################
#####################
## Defining default variable states and other setup configurations.
## Runtime params:
FOLLOW_PLAYERCTL=false
PLAYERCTL_SELECTED "spotify"
## Symbolic glyphs used:
SYMB_MEDIA_NOTE="♪♪"
SYMB_MEDIA_PLAY="⏵"
SYMB_MEDIA_PAUSE="="
SYMB_MEDIA_STOP="■"
## Parsed info buffer vars:
MEDIA_INFO=""
MEDIA_INFO_SHORT=""
#########################
### DEFINE_FUNCTIONS: #####################################################
#########################
## Common function definitions.
xml_escape() {
local JSON_TOPIC_RAW="$1"
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW//\\/\\\\} # \
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW//\//\\\/} # /
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW//\"/\\\"} # "
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW// /\\t} # \t (tab)
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW///\\\n} # \n (newline)
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW//^M/\\\r} # \r (carriage return)
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW//^L/\\\f} # \f (form feed)
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW//^H/\\\b} # \b (backspace)
local JSON_TOPIC_RAW=${JSON_TOPIC_RAW//&/\&} # & (ampersand)
echo "$JSON_TOPIC_RAW"
}
pollMediaInfoUpdate() {
local STATUS MEDIA_ARTIST_TITLE MEDIA_STATE
#Pull Media Info
STATUS="$(playerctl --player=spotify,%any status || echo "null")"
if [ "$STATUS" = "null" ]; then
MEDIA_INFO=""; MEDIA_INFO_LONG=""; return 0
fi
MEDIA_ARTIST_TITLE="$( playerctl metadata --format '{{ markup_escape(artist) }} - {{ markup_escape(title) }}' )"
if [ "$STATUS" = "Playing" ]; then
MEDIA_STATE="$SYMB_MEDIA_PLAY"
elif [ "$STATUS" = "Paused" ]; then
MEDIA_STATE="$SYMB_MEDIA_PAUSE"
else
MEDIA_STATE="$SYMB_MEDIA_STOP"
fi
MEDIA_PREPEND="$SYMB_MEDIA_NOTE($MEDIA_STATE)"
MEDIA_INFO="$MEDIA_PREPEND $MEDIA_ARTIST_TITLE "
#MEDIA_INFO_SHORT="$MEDIA_PREPEND - $MEDIA_TITLE "
MEDIA_INFO_SHORT="$MEDIA_INFO"
}
followMediaInfoUpdate() {
local STATUS MEDIA_TITLE MEDIA_ARTIST MEDIA_STATE
#Pull Media Info
STATUS="$(playerctl --player=${PLAYERCTL_SELECTED},%any status || echo "noPlayers")"
if [ "$STATUS" = "noPlayers" ]; then
MEDIA_INFO=""; MEDIA_INFO_LONG=""; return 0; fi
MEDIA_TITLE="$(playerctl --player=${PLAYERCTL_SELECTED},%any metadata title)"
MEDIA_ARTIST="$(playerctl --player=${PLAYERCTL_SELECTED},%any metadata artist)"
MEDIA_STATE=""
##
if [ "$STATUS" = "Playing" ]; then
MEDIA_STATE="$SYMB_MEDIA_PLAY"
elif [ "$STATUS" = "Paused" ]; then
MEDIA_STATE="$SYMB_MEDIA_PAUSE"
else
MEDIA_STATE="$SYMB_MEDIA_STOP"
fi
MEDIA_TITLE=$(xml_escape "$MEDIA_TITLE")
MEDIA_ARTIST=$(xml_escape "$MEDIA_ARTIST")
MEDIA_PREPEND="$SYMB_MEDIA_NOTE($MEDIA_STATE)"
MEDIA_INFO="$MEDIA_PREPEND - $MEDIA_TITLE "
MEDIA_INFO_LONG="$MEDIA_PREPEND $MEDIA_TITLE - $MEDIA_ARTIST "
}
#######################
### EXECUTE_SCRIPT: #######################################################
#######################
## Execute script linearly from this point.
i3status | (read -r line && echo "$line" && read -r line && echo "$line" && read -r line && echo "$line" && pollMediaInfoUpdate && while :
do
if $FOLLOW_PLAYERCTL ; then
read -r line
else
read -r line && pollMediaInfoUpdate
fi
#Format MEDIA_INFO for i3Status XML
MEDIA_LINE="{\"name\":\"media_info\",\"markup\":\"pango\",\"border_bottom\":3,\"separator_block_width\":3,\"align\":\"right\",\"short_text\":\"${MEDIA_INFO_SHORT}\",\"full_text\":\"${MEDIA_INFO}\"}"
echo ",[${MEDIA_LINE},${line#,\[}" || echo "$line" || exit 1
done)
#######################
### SCRIPT_CLEANUP: #######################################################
#######################
## Cleanup vars and backround proccesss from script execution.
unset MEDIA_INFO MEDIA_INFO_LONG FOLLOW_PLAYERCTL
#
# FILE_END
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2352-1
#
# Security announcement date: 2011-11-22 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:19 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - puppet:2.6.2-5+squeeze3
#
# Last versions recommanded by security team:
# - puppet:2.6.2-5+squeeze10
#
# CVE List:
# - CVE-2011-3872
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade puppet=2.6.2-5+squeeze10 -y
|
#!/usr/bin/env bash
set -e
echo "Generating Sidekick Template Bundle"
rm -rf lib/src/templates/
mason bundle cli_template/bricks/package -t dart -o lib/src/templates/
mason bundle cli_template/bricks/entrypoint -t dart -o lib/src/templates/
mv lib/src/templates/package_bundle.dart lib/src/templates/package_bundle.g.dart
mv lib/src/templates/entrypoint_bundle.dart lib/src/templates/entrypoint_bundle.g.dart
dart format lib/src/templates/*.g.dart |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright © 2015 IBM Corporation
# This script checks the relocations of a vmlinux for "suspicious"
# relocations.
# based on relocs_check.pl
# Copyright © 2009 IBM Corporation
if [ $# -lt 3 ]; then
echo "$0 [path to objdump] [path to nm] [path to vmlinux]" 1>&2
exit 1
fi
# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
objdump="$1"
nm="$2"
vmlinux="$3"
# Remove from the bad relocations those that match an undefined weak symbol
# which will result in an absolute relocation to 0.
# Weak unresolved symbols are of that form in nm output:
# " w _binary__btf_vmlinux_bin_end"
undef_weak_symbols=$($nm "$vmlinux" | awk '$1 ~ /w/ { print $2 }')
bad_relocs=$(
$objdump -R "$vmlinux" |
# Only look at relocation lines.
grep -E '\<R_' |
# These relocations are okay
# On PPC64:
# R_PPC64_RELATIVE, R_PPC64_NONE
# On PPC:
# R_PPC_RELATIVE, R_PPC_ADDR16_HI,
# R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
# R_PPC_NONE
grep -F -w -v 'R_PPC64_RELATIVE
R_PPC64_NONE
R_PPC_ADDR16_LO
R_PPC_ADDR16_HI
R_PPC_ADDR16_HA
R_PPC_RELATIVE
R_PPC_NONE' |
([ "$undef_weak_symbols" ] && grep -F -w -v "$undef_weak_symbols" || cat)
)
if [ -z "$bad_relocs" ]; then
exit 0
fi
num_bad=$(echo "$bad_relocs" | wc -l)
echo "WARNING: $num_bad bad relocations"
echo "$bad_relocs"
# If we see this type of relocation it's an idication that
# we /may/ be using an old version of binutils.
if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then
echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel"
fi
|
#!/bin/bash
JMETER_VERSION="5.1.1"
# Example build line
# --build-arg IMAGE_TIMEZONE="Europe/Amsterdam"
docker build --build-arg JMETER_VERSION=${JMETER_VERSION} -t "justb4/jmeter:${JMETER_VERSION}" .
|
#!/bin/bash
# LinuxGSM command_test_alert.sh function
# Author: Daniel Gibbs
# Website: https://linuxgsm.com
# Description: Sends a test alert.
local commandname="ALERT"
local commandaction="Alert"
local function_selfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")"
fn_print_dots "${servername}"
check.sh
info_config.sh
alert="test"
alert.sh
core_exit.sh
|