content
stringlengths 1
1.02M
⌀ |
---|
#!/bin/bash
export JBOSS_CLI=$WILDFLY_HOME/bin/jboss-cli.sh
if [ ! -f wildfly.started ]; then
function wait_for_server() {
until `$JBOSS_CLI -c "ls /deployment" &> /dev/null`; do
echo "Waiting"
sleep 1
done
}
source $WILDFLY_HOME/bin/setup_admin_password.sh
source $WILDFLY_HOME/bin/install_eclipselink.sh
mkdir -p /tmp/deployments
mv $DEPLOYMENTS_DIR/* /tmp/deployments
echo "=> Starting WildFly server"
$WILDFLY_HOME/bin/standalone.sh -b=0.0.0.0 -c standalone.xml > /dev/null &
source $WILDFLY_HOME/bin/download_postgresql_driver.sh
echo "=> Waiting for the server to boot"
wait_for_server
source $WILDFLY_HOME/bin/setup_datasource.sh
echo "=> Shutdown Wildfly"
$JBOSS_CLI -c ":shutdown"
mv /tmp/deployments/* $DEPLOYMENTS_DIR
rm -rf /tmp/deployments
touch wildfly.started
fi
echo "=> Start Wildfly"
$WILDFLY_HOME/bin/standalone.sh -b=0.0.0.0 -bmanagement=0.0.0.0 -c standalone.xml |
pkg_name=hab
_pkg_distname=$pkg_name
pkg_origin=core
pkg_version=$(cat "$PLAN_CONTEXT/../../VERSION")
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_license=('Apache-2.0')
pkg_source=nosuchfile.tar.gz
# The result is a portible, static binary in a zero-dependency package.
pkg_deps=()
pkg_build_deps=(
core/musl core/zlib-musl core/xz-musl core/bzip2-musl core/libarchive-musl
core/openssl-musl core/libsodium-musl
core/coreutils core/rust core/gcc
)
pkg_bin_dirs=(bin)
bin=$_pkg_distname
_common_prepare() {
do_default_prepare
# Can be either `--release` or `--debug` to determine cargo build strategy
build_type="--release"
build_line "Building artifacts with \`${build_type#--}' mode"
# Used by the `build.rs` program to set the version of the binaries
export PLAN_VERSION="${pkg_version}/${pkg_release}"
build_line "Setting PLAN_VERSION=$PLAN_VERSION"
# Used by Cargo to use a pristine, isolated directory for all compilation
export CARGO_TARGET_DIR="$HAB_CACHE_SRC_PATH/$pkg_dirname"
build_line "Setting CARGO_TARGET_DIR=$CARGO_TARGET_DIR"
}
do_prepare() {
_common_prepare
export rustc_target="x86_64-unknown-linux-musl"
build_line "Setting rustc_target=$rustc_target"
la_ldflags="-L$(pkg_path_for zlib-musl)/lib -lz"
la_ldflags="$la_ldflags -L$(pkg_path_for xz-musl)/lib -llzma"
la_ldflags="$la_ldflags -L$(pkg_path_for bzip2-musl)/lib -lbz2"
la_ldflags="$la_ldflags -L$(pkg_path_for openssl-musl)/lib -lssl -lcrypto"
export LIBARCHIVE_LIB_DIR=$(pkg_path_for libarchive-musl)/lib
export LIBARCHIVE_INCLUDE_DIR=$(pkg_path_for libarchive-musl)/include
export LIBARCHIVE_LDFLAGS="$la_ldflags"
export LIBARCHIVE_STATIC=true
export OPENSSL_LIB_DIR=$(pkg_path_for openssl-musl)/lib
export OPENSSL_INCLUDE_DIR=$(pkg_path_for openssl-musl)/include
export OPENSSL_STATIC=true
export SODIUM_LIB_DIR=$(pkg_path_for libsodium-musl)/lib
export SODIUM_STATIC=true
# Used to find libgcc_s.so.1 when compiling `build.rs` in dependencies. Since
# this used only at build time, we will use the version found in the gcc
# package proper--it won't find its way into the final binaries.
export LD_LIBRARY_PATH=$(pkg_path_for gcc)/lib
build_line "Setting LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
}
do_build() {
pushd "$PLAN_CONTEXT" > /dev/null
cargo build ${build_type#--debug} --target=$rustc_target --verbose
popd > /dev/null
}
do_install() {
install -v -D $CARGO_TARGET_DIR/$rustc_target/${build_type#--}/$bin \
$pkg_prefix/bin/$bin
}
do_strip() {
if [[ "$build_type" != "--debug" ]]; then
do_default_strip
fi
}
# Turn the remaining default phases into no-ops
do_download() {
return 0
}
do_verify() {
return 0
}
do_unpack() {
return 0
}
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3358-1
#
# Security announcement date: 2015-09-13 00:00:00 UTC
# Script generation date: 2017-02-07 21:05:53 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - php5:5.4.45-0+deb7u1
#
# Last versions recommanded by security team:
# - php5:5.4.45-0+deb7u7
#
# CVE List:
# - CVE-2015-6834
# - CVE-2015-6835
# - CVE-2015-6836
# - CVE-2015-6837
# - CVE-2015-6838
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade php5=5.4.45-0+deb7u7 -y
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#################################################################################
# This script will
# 1. Check prerequisite libraries. Including:
# cmake byacc flex automake libtool binutils-dev libiberty-dev bison
# 2. Compile and install all thirdparties which are downloaded
# using *download-thirdparty.sh*.
#
# This script will run *download-thirdparty.sh* once again
# to check if all thirdparties have been downloaded, unpacked and patched.
#################################################################################
set -e
curdir=`dirname "$0"`
curdir=`cd "$curdir"; pwd`
export DORIS_HOME=$curdir/..
export TP_DIR=$curdir
# include custom environment variables
if [[ -f ${DORIS_HOME}/env.sh ]]; then
. ${DORIS_HOME}/env.sh
fi
if [[ ! -f ${TP_DIR}/download-thirdparty.sh ]]; then
echo "Download thirdparty script is missing".
exit 1
fi
if [ ! -f ${TP_DIR}/vars.sh ]; then
echo "vars.sh is missing".
exit 1
fi
. ${TP_DIR}/vars.sh
cd $TP_DIR
# Download thirdparties.
${TP_DIR}/download-thirdparty.sh
export LD_LIBRARY_PATH=$TP_DIR/installed/lib:$LD_LIBRARY_PATH
# set COMPILER
if [[ ! -z ${DORIS_GCC_HOME} ]]; then
export CC=${DORIS_GCC_HOME}/bin/gcc
export CPP=${DORIS_GCC_HOME}/bin/cpp
export CXX=${DORIS_GCC_HOME}/bin/g++
else
echo "DORIS_GCC_HOME environment variable is not set"
exit 1
fi
# prepare installed prefix
mkdir -p ${TP_DIR}/installed
check_prerequest() {
local CMD=$1
local NAME=$2
if ! $CMD; then
echo $NAME is missing
exit 1
else
echo $NAME is found
fi
}
# sudo apt-get install cmake
# sudo yum install cmake
check_prerequest "${CMAKE_CMD} --version" "cmake"
# sudo apt-get install byacc
# sudo yum install byacc
check_prerequest "byacc -V" "byacc"
# sudo apt-get install flex
# sudo yum install flex
check_prerequest "flex -V" "flex"
# sudo apt-get install automake
# sudo yum install automake
check_prerequest "automake --version" "automake"
# sudo apt-get install libtool
# sudo yum install libtool
check_prerequest "libtoolize --version" "libtool"
# sudo apt-get install binutils-dev
# sudo yum install binutils-devel
#check_prerequest "locate libbfd.a" "binutils-dev"
# sudo apt-get install libiberty-dev
# no need in centos 7.1
#check_prerequest "locate libiberty.a" "libiberty-dev"
# sudo apt-get install bison
# sudo yum install bison
#check_prerequest "bison --version" "bison"
#########################
# build all thirdparties
#########################
check_if_source_exist() {
if [ -z $1 ]; then
echo "dir should specified to check if exist."
exit 1
fi
if [ ! -d $TP_SOURCE_DIR/$1 ];then
echo "$TP_SOURCE_DIR/$1 does not exist."
exit 1
fi
echo "===== begin build $1"
}
check_if_archieve_exist() {
if [ -z $1 ]; then
echo "archieve should specified to check if exist."
exit 1
fi
if [ ! -f $TP_SOURCE_DIR/$1 ];then
echo "$TP_SOURCE_DIR/$1 does not exist."
exit 1
fi
}
# libevent
build_libevent() {
check_if_source_exist $LIBEVENT_SOURCE
cd $TP_SOURCE_DIR/$LIBEVENT_SOURCE
if [ ! -f configure ]; then
./autogen.sh
fi
CFLAGS="-std=c99 -fPIC -D_BSD_SOURCE -fno-omit-frame-pointer -g -ggdb -O2 -I${TP_INCLUDE_DIR}" \
LDFLAGS="-L${TP_LIB_DIR}" \
./configure --prefix=$TP_INSTALL_DIR --enable-shared=no --disable-samples --disable-libevent-regress
make -j$PARALLEL && make install
}
build_openssl() {
MACHINE_TYPE=$(uname -m)
OPENSSL_PLATFORM="linux-x86_64"
if [[ "${MACHINE_TYPE}" == "aarch64" ]]; then
OPENSSL_PLATFORM="linux-aarch64"
fi
check_if_source_exist $OPENSSL_SOURCE
cd $TP_SOURCE_DIR/$OPENSSL_SOURCE
CPPFLAGS="-I${TP_INCLUDE_DIR} -fPIC" \
CXXFLAGS="-I${TP_INCLUDE_DIR} -fPIC" \
LDFLAGS="-L${TP_LIB_DIR}" \
CFLAGS="-fPIC" \
LIBDIR="lib" \
./Configure --prefix=$TP_INSTALL_DIR -zlib -shared ${OPENSSL_PLATFORM}
make -j$PARALLEL && make install
if [ -f $TP_INSTALL_DIR/lib64/libcrypto.a ]; then
mkdir -p $TP_INSTALL_DIR/lib && \
cp $TP_INSTALL_DIR/lib64/libcrypto.a $TP_INSTALL_DIR/lib/libcrypto.a && \
cp $TP_INSTALL_DIR/lib64/libssl.a $TP_INSTALL_DIR/lib/libssl.a
fi
# NOTE(zc): remove this dynamic library files to make libcurl static link.
# If I don't remove this files, I don't known how to make libcurl link static library
if [ -f $TP_INSTALL_DIR/lib64/libcrypto.so ]; then
rm -rf $TP_INSTALL_DIR/lib64/libcrypto.so*
fi
if [ -f $TP_INSTALL_DIR/lib64/libssl.so ]; then
rm -rf $TP_INSTALL_DIR/lib64/libssl.so*
fi
if [ -f $TP_INSTALL_DIR/lib/libcrypto.so ]; then
rm -rf $TP_INSTALL_DIR/lib/libcrypto.so*
fi
if [ -f $TP_INSTALL_DIR/lib/libssl.so ]; then
rm -rf $TP_INSTALL_DIR/lib/libssl.so*
fi
}
# thrift
build_thrift() {
check_if_source_exist $THRIFT_SOURCE
cd $TP_SOURCE_DIR/$THRIFT_SOURCE
if [ ! -f configure ]; then
./bootstrap.sh
fi
echo ${TP_LIB_DIR}
./configure CPPFLAGS="-I${TP_INCLUDE_DIR}" LDFLAGS="-L${TP_LIB_DIR} -static-libstdc++ -static-libgcc" LIBS="-lcrypto -ldl -lssl" CFLAGS="-fPIC" \
--prefix=$TP_INSTALL_DIR --docdir=$TP_INSTALL_DIR/doc --enable-static --disable-shared --disable-tests \
--disable-tutorial --without-qt4 --without-qt5 --without-csharp --without-erlang --without-nodejs \
--without-lua --without-perl --without-php --without-php_extension --without-dart --without-ruby \
--without-haskell --without-go --without-haxe --without-d --without-python -without-java --with-cpp \
--with-libevent=$TP_INSTALL_DIR --with-boost=$TP_INSTALL_DIR --with-openssl=$TP_INSTALL_DIR
if [ -f compiler/cpp/thrifty.hh ];then
mv compiler/cpp/thrifty.hh compiler/cpp/thrifty.h
fi
make -j$PARALLEL && make install
}
# llvm
build_llvm() {
MACHINE_TYPE=$(uname -m)
LLVM_TARGET="X86"
if [[ "${MACHINE_TYPE}" == "aarch64" ]]; then
LLVM_TARGET="AArch64"
fi
check_if_source_exist $LLVM_SOURCE
check_if_source_exist $CLANG_SOURCE
check_if_source_exist $COMPILER_RT_SOURCE
if [ ! -d $TP_SOURCE_DIR/$LLVM_SOURCE/tools/clang ]; then
cp -rf $TP_SOURCE_DIR/$CLANG_SOURCE $TP_SOURCE_DIR/$LLVM_SOURCE/tools/clang
fi
if [ ! -d $TP_SOURCE_DIR/$LLVM_SOURCE/projects/compiler-rt ]; then
cp -rf $TP_SOURCE_DIR/$COMPILER_RT_SOURCE $TP_SOURCE_DIR/$LLVM_SOURCE/projects/compiler-rt
fi
cd $TP_SOURCE_DIR
mkdir llvm-build -p && cd llvm-build
rm -rf CMakeCache.txt CMakeFiles/
LDFLAGS="-L${TP_LIB_DIR} -static-libstdc++ -static-libgcc" \
$CMAKE_CMD -DLLVM_REQUIRES_RTTI:Bool=True -DLLVM_TARGETS_TO_BUILD=${LLVM_TARGET} -DLLVM_ENABLE_TERMINFO=OFF LLVM_BUILD_LLVM_DYLIB:BOOL=OFF -DLLVM_ENABLE_PIC=true -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE="RELEASE" -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR/llvm ../$LLVM_SOURCE
make -j$PARALLEL REQUIRES_RTTI=1 && make install
}
# protobuf
build_protobuf() {
check_if_source_exist $PROTOBUF_SOURCE
cd $TP_SOURCE_DIR/$PROTOBUF_SOURCE
rm -fr gmock
mkdir gmock && cd gmock && tar xf ${TP_SOURCE_DIR}/googletest-release-1.8.0.tar.gz \
&& mv googletest-release-1.8.0 gtest && cd $TP_SOURCE_DIR/$PROTOBUF_SOURCE && ./autogen.sh
CXXFLAGS="-fPIC -O2 -I ${TP_INCLUDE_DIR}" \
LDFLAGS="-L${TP_LIB_DIR} -static-libstdc++ -static-libgcc" \
./configure --prefix=${TP_INSTALL_DIR} --disable-shared --enable-static --with-zlib=${TP_INSTALL_DIR}/include
cd src
sed -i 's/^AM_LDFLAGS\(.*\)$/AM_LDFLAGS\1 -all-static/' Makefile
cd -
make -j$PARALLEL && make install
}
# gflags
build_gflags() {
check_if_source_exist $GFLAGS_SOURCE
cd $TP_SOURCE_DIR/$GFLAGS_SOURCE
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
$CMAKE_CMD -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR \
-DCMAKE_POSITION_INDEPENDENT_CODE=On ../
make -j$PARALLEL && make install
}
# glog
build_glog() {
check_if_source_exist $GLOG_SOURCE
cd $TP_SOURCE_DIR/$GLOG_SOURCE
# to generate config.guess and config.sub to support aarch64
rm -rf config.*
autoreconf -i
CPPFLAGS="-I${TP_INCLUDE_DIR} -fpermissive -fPIC" \
LDFLAGS="-L${TP_LIB_DIR}" \
CFLAGS="-fPIC" \
./configure --prefix=$TP_INSTALL_DIR --enable-frame-pointers --disable-shared --enable-static
make -j$PARALLEL && make install
}
# gtest
build_gtest() {
check_if_source_exist $GTEST_SOURCE
cd $TP_SOURCE_DIR/$GTEST_SOURCE
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
$CMAKE_CMD -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR \
-DCMAKE_POSITION_INDEPENDENT_CODE=On ../
make -j$PARALLEL && make install
}
# rapidjson
build_rapidjson() {
check_if_source_exist $RAPIDJSON_SOURCE
rm $TP_INSTALL_DIR/rapidjson -rf
cp $TP_SOURCE_DIR/$RAPIDJSON_SOURCE/include/rapidjson $TP_INCLUDE_DIR/ -r
}
# snappy
build_snappy() {
check_if_source_exist $SNAPPY_SOURCE
cd $TP_SOURCE_DIR/$SNAPPY_SOURCE
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
CFLAGS="-O3" CXXFLAGS="-O3" $CMAKE_CMD -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DCMAKE_POSITION_INDEPENDENT_CODE=On \
-DCMAKE_INSTALL_INCLUDEDIR=$TP_INCLUDE_DIR/snappy \
-DSNAPPY_BUILD_TESTS=0 ../
make -j$PARALLEL && make install
if [ -f $TP_INSTALL_DIR/lib64/libsnappy.a ]; then
mkdir -p $TP_INSTALL_DIR/lib && cp $TP_INSTALL_DIR/lib64/libsnappy.a $TP_INSTALL_DIR/lib/libsnappy.a
fi
#build for libarrow.a
cp $TP_INCLUDE_DIR/snappy/snappy-c.h $TP_INCLUDE_DIR/snappy-c.h && \
cp $TP_INCLUDE_DIR/snappy/snappy-sinksource.h $TP_INCLUDE_DIR/snappy-sinksource.h && \
cp $TP_INCLUDE_DIR/snappy/snappy-stubs-public.h $TP_INCLUDE_DIR/snappy-stubs-public.h && \
cp $TP_INCLUDE_DIR/snappy/snappy.h $TP_INCLUDE_DIR/snappy.h && \
cp $TP_INSTALL_DIR/lib/libsnappy.a $TP_INSTALL_DIR/libsnappy.a
}
# gperftools
build_gperftools() {
check_if_source_exist $GPERFTOOLS_SOURCE
cd $TP_SOURCE_DIR/$GPERFTOOLS_SOURCE
if [ ! -f configure ]; then
./autogen.sh
fi
CPPFLAGS="-I${TP_INCLUDE_DIR}" \
LDFLAGS="-L${TP_LIB_DIR}" \
LD_LIBRARY_PATH="${TP_LIB_DIR}" \
CFLAGS="-fPIC" \
LDFLAGS="-L${TP_LIB_DIR}" \
LD_LIBRARY_PATH="${TP_LIB_DIR}" \
CFLAGS="-fPIC" \
./configure --prefix=$TP_INSTALL_DIR/gperftools --disable-shared --enable-static --disable-libunwind --with-pic --enable-frame-pointers
make -j$PARALLEL && make install
}
# zlib
build_zlib() {
check_if_source_exist $ZLIB_SOURCE
cd $TP_SOURCE_DIR/$ZLIB_SOURCE
CPPFLAGS="-I${TP_INCLUDE_DIR}" \
LDFLAGS="-L${TP_LIB_DIR}" \
CFLAGS="-fPIC" \
./configure --prefix=$TP_INSTALL_DIR --static
make -j$PARALLEL && make install
}
# lz4
build_lz4() {
check_if_source_exist $LZ4_SOURCE
cd $TP_SOURCE_DIR/$LZ4_SOURCE
make -j$PARALLEL install PREFIX=$TP_INSTALL_DIR \
INCLUDEDIR=$TP_INCLUDE_DIR/lz4/
}
# bzip
build_bzip() {
check_if_source_exist $BZIP_SOURCE
cd $TP_SOURCE_DIR/$BZIP_SOURCE
CFLAGS="-fPIC"
make -j$PARALLEL install PREFIX=$TP_INSTALL_DIR
}
# lzo2
build_lzo2() {
check_if_source_exist $LZO2_SOURCE
cd $TP_SOURCE_DIR/$LZO2_SOURCE
CPPFLAGS="-I${TP_INCLUDE_DIR} -fPIC" \
LDFLAGS="-L${TP_LIB_DIR}" \
CFLAGS="-fPIC" \
./configure --prefix=$TP_INSTALL_DIR --disable-shared --enable-static
make -j$PARALLEL && make install
}
# curl
build_curl() {
check_if_source_exist $CURL_SOURCE
cd $TP_SOURCE_DIR/$CURL_SOURCE
CPPFLAGS="-I${TP_INCLUDE_DIR}" \
LDFLAGS="-L${TP_LIB_DIR}" LIBS="-lcrypto -lssl -lcrypto -ldl" \
CFLAGS="-fPIC" \
./configure --prefix=$TP_INSTALL_DIR --disable-shared --enable-static \
--without-librtmp --with-ssl=${TP_INSTALL_DIR} --without-libidn2 --disable-ldap --enable-ipv6
make -j$PARALLEL && make install
}
# re2
build_re2() {
check_if_source_exist $RE2_SOURCE
cd $TP_SOURCE_DIR/$RE2_SOURCE
$CMAKE_CMD -DBUILD_SHARED_LIBS=0 -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR
make -j$PARALLEL install
}
# boost
build_boost() {
check_if_source_exist $BOOST_SOURCE
cd $TP_SOURCE_DIR/$BOOST_SOURCE
echo "using gcc : doris : ${CXX} ; " > tools/build/src/user-config.jam
./bootstrap.sh --prefix=$TP_INSTALL_DIR
./b2 --toolset=gcc-doris link=static -d0 -j$PARALLEL --without-mpi --without-graph --without-graph_parallel --without-python cxxflags="-std=c++11 -fPIC -I$TP_INCLUDE_DIR -L$TP_LIB_DIR" install
}
# mysql
build_mysql() {
check_if_source_exist $MYSQL_SOURCE
check_if_source_exist $BOOST_FOR_MYSQL_SOURCE
cd $TP_SOURCE_DIR/$MYSQL_SOURCE
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
if [ ! -d $BOOST_FOR_MYSQL_SOURCE ]; then
cp $TP_SOURCE_DIR/$BOOST_FOR_MYSQL_SOURCE ./ -rf
fi
$CMAKE_CMD ../ -DWITH_BOOST=`pwd`/$BOOST_FOR_MYSQL_SOURCE -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR/mysql/ \
-DCMAKE_INCLUDE_PATH=$TP_INCLUDE_DIR -DCMAKE_LIBRARY_PATH=$TP_LIB_DIR -DWITHOUT_SERVER=1 \
-DCMAKE_CXX_FLAGS_RELWITHDEBINFO="-O3 -g -fabi-version=2 -fno-omit-frame-pointer -fno-strict-aliasing -std=gnu++11" \
-DDISABLE_SHARED=1 -DBUILD_SHARED_LIBS=0
make -j$PARALLEL mysqlclient
# copy headers manually
rm ../../../installed/include/mysql/ -rf
mkdir ../../../installed/include/mysql/ -p
cp -R ./include/* ../../../installed/include/mysql/
cp -R ../include/* ../../../installed/include/mysql/
cp ../libbinlogevents/export/binary_log_types.h ../../../installed/include/mysql/
echo "mysql headers are installed."
# copy libmysqlclient.a
cp libmysql/libmysqlclient.a ../../../installed/lib/
echo "mysql client lib is installed."
}
#leveldb
build_leveldb() {
check_if_source_exist $LEVELDB_SOURCE
cd $TP_SOURCE_DIR/$LEVELDB_SOURCE
CXXFLAGS="-fPIC" make -j$PARALLEL
cp out-static/libleveldb.a ../../installed/lib/libleveldb.a
cp -r include/leveldb ../../installed/include/
}
# brpc
build_brpc() {
check_if_source_exist $BRPC_SOURCE
cd $TP_SOURCE_DIR/$BRPC_SOURCE
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
LDFLAGS="-L${TP_LIB_DIR} -static-libstdc++ -static-libgcc" \
$CMAKE_CMD -v -DBUILD_SHARED_LIBS=0 -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR \
-DBRPC_WITH_GLOG=ON -DWITH_GLOG=ON -DCMAKE_INCLUDE_PATH="$TP_INSTALL_DIR/include" \
-DCMAKE_LIBRARY_PATH="$TP_INSTALL_DIR/lib;$TP_INSTALL_DIR/lib64" \
-DPROTOBUF_PROTOC_EXECUTABLE=$TP_INSTALL_DIR/bin/protoc \
-DProtobuf_PROTOC_EXECUTABLE=$TP_INSTALL_DIR/bin/protoc ..
make -j$PARALLEL && make install
if [ -f $TP_INSTALL_DIR/lib/libbrpc.a ]; then
mkdir -p $TP_INSTALL_DIR/lib64 && cp $TP_INSTALL_DIR/lib/libbrpc.a $TP_INSTALL_DIR/lib64/libbrpc.a
fi
}
# rocksdb
build_rocksdb() {
check_if_source_exist $ROCKSDB_SOURCE
cd $TP_SOURCE_DIR/$ROCKSDB_SOURCE
CFLAGS="-I ${TP_INCLUDE_DIR} -I ${TP_INCLUDE_DIR}/snappy -I ${TP_INCLUDE_DIR}/lz4" CXXFLAGS="-fPIC -Wno-deprecated-copy -Wno-stringop-truncation -Wno-pessimizing-move" LDFLAGS="-static-libstdc++ -static-libgcc" \
PORTABLE=1 make USE_RTTI=1 -j$PARALLEL static_lib
cp librocksdb.a ../../installed/lib/librocksdb.a
cp -r include/rocksdb ../../installed/include/
}
# librdkafka
build_librdkafka() {
check_if_source_exist $LIBRDKAFKA_SOURCE
cd $TP_SOURCE_DIR/$LIBRDKAFKA_SOURCE
CPPFLAGS="-I${TP_INCLUDE_DIR}" \
LDFLAGS="-L${TP_LIB_DIR}" \
CFLAGS="-fPIC" \
./configure --prefix=$TP_INSTALL_DIR --enable-static --disable-sasl
make -j$PARALLEL && make install
}
# flatbuffers
build_flatbuffers() {
check_if_source_exist $FLATBUFFERS_SOURCE
cd $TP_SOURCE_DIR/$FLATBUFFERS_SOURCE
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
CXXFLAGS="-fPIC -Wno-class-memaccess" \
LDFLAGS="-static-libstdc++ -static-libgcc" \
${CMAKE_CMD} ..
make -j$PARALLEL
cp flatc ../../../installed/bin/flatc
cp -r ../include/flatbuffers ../../../installed/include/flatbuffers
cp libflatbuffers.a ../../../installed/lib/libflatbuffers.a
}
# arrow
build_arrow() {
check_if_source_exist $ARROW_SOURCE
cd $TP_SOURCE_DIR/$ARROW_SOURCE/cpp && mkdir -p release && cd release
export ARROW_BROTLI_URL=${TP_SOURCE_DIR}/${BROTLI_NAME}
export ARROW_DOUBLE_CONVERSION_URL=${TP_SOURCE_DIR}/${DOUBLE_CONVERSION_NAME}
export ARROW_GLOG_URL=${TP_SOURCE_DIR}/${GLOG_NAME}
export ARROW_LZ4_URL=${TP_SOURCE_DIR}/${LZ4_NAME}
export ARROW_FLATBUFFERS_URL=${TP_SOURCE_DIR}/${FLATBUFFERS_NAME}
export ARROW_ZSTD_URL=${TP_SOURCE_DIR}/${ZSTD_NAME}
export ARROW_JEMALLOC_URL=${TP_SOURCE_DIR}/${JEMALLOC_NAME}
export LDFLAGS="-L${TP_LIB_DIR} -static-libstdc++ -static-libgcc"
${CMAKE_CMD} -DARROW_PARQUET=ON -DARROW_IPC=ON -DARROW_USE_GLOG=off -DARROW_BUILD_SHARED=OFF \
-DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DARROW_BOOST_USE_SHARED=OFF -DARROW_GFLAGS_USE_SHARED=OFF -DBoost_NO_BOOST_CMAKE=ON -DBOOST_ROOT=$TP_INSTALL_DIR \
-Dgflags_ROOT=$TP_INSTALL_DIR/ \
-DSnappy_ROOT=$TP_INSTALL_DIR/ \
-DGLOG_ROOT=$TP_INSTALL_DIR/ \
-DLZ4_ROOT=$TP_INSTALL_DIR/ \
-DThrift_ROOT=$TP_INSTALL_DIR/ ..
make -j$PARALLEL && make install
#copy dep libs
cp -rf ./jemalloc_ep-prefix/src/jemalloc_ep/dist/lib/libjemalloc_pic.a $TP_INSTALL_DIR/lib64/libjemalloc.a
cp -rf ./brotli_ep/src/brotli_ep-install/lib/libbrotlienc-static.a $TP_INSTALL_DIR/lib64/libbrotlienc.a
cp -rf ./brotli_ep/src/brotli_ep-install/lib/libbrotlidec-static.a $TP_INSTALL_DIR/lib64/libbrotlidec.a
cp -rf ./brotli_ep/src/brotli_ep-install/lib/libbrotlicommon-static.a $TP_INSTALL_DIR/lib64/libbrotlicommon.a
if [ -f ./zstd_ep-install/lib64/libzstd.a ]; then
cp -rf ./zstd_ep-install/lib64/libzstd.a $TP_INSTALL_DIR/lib64/libzstd.a
else
cp -rf ./zstd_ep-install/lib/libzstd.a $TP_INSTALL_DIR/lib64/libzstd.a
fi
cp -rf ./double-conversion_ep/src/double-conversion_ep/lib/libdouble-conversion.a $TP_INSTALL_DIR/lib64/libdouble-conversion.a
}
# s2
build_s2() {
check_if_source_exist $S2_SOURCE
cd $TP_SOURCE_DIR/s2geometry-0.9.0
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
CXXFLAGS="-O3" \
LDFLAGS="-L${TP_LIB_DIR} -static-libstdc++ -static-libgcc" \
$CMAKE_CMD -v -DBUILD_SHARED_LIBS=0 -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR \
-DCMAKE_INCLUDE_PATH="$TP_INSTALL_DIR/include" \
-DBUILD_SHARED_LIBS=OFF \
-DGFLAGS_ROOT_DIR="$TP_INSTALL_DIR/include" \
-DWITH_GFLAGS=ON \
-DGLOG_ROOT_DIR="$TP_INSTALL_DIR/include" \
-DWITH_GLOG=ON \
-DCMAKE_LIBRARY_PATH="$TP_INSTALL_DIR/lib;$TP_INSTALL_DIR/lib64" ..
make -j$PARALLEL && make install
}
# bitshuffle
build_bitshuffle() {
check_if_source_exist $BITSHUFFLE_SOURCE
cd $TP_SOURCE_DIR/$BITSHUFFLE_SOURCE
PREFIX=$TP_INSTALL_DIR
# This library has significant optimizations when built with -mavx2. However,
# we still need to support non-AVX2-capable hardware. So, we build it twice,
# once with the flag and once without, and use some linker tricks to
# suffix the AVX2 symbols with '_avx2'.
arches="default avx2"
MACHINE_TYPE=$(uname -m)
# Becuase aarch64 don't support avx2, disable it.
if [[ "${MACHINE_TYPE}" == "aarch64" ]]; then
arches="default"
fi
to_link=""
for arch in $arches ; do
arch_flag=""
if [ "$arch" == "avx2" ]; then
arch_flag="-mavx2"
fi
tmp_obj=bitshuffle_${arch}_tmp.o
dst_obj=bitshuffle_${arch}.o
${CC:-gcc} $EXTRA_CFLAGS $arch_flag -std=c99 -I$PREFIX/include/lz4/ -O3 -DNDEBUG -fPIC -c \
"src/bitshuffle_core.c" \
"src/bitshuffle.c" \
"src/iochain.c"
# Merge the object files together to produce a combined .o file.
ld -r -o $tmp_obj bitshuffle_core.o bitshuffle.o iochain.o
# For the AVX2 symbols, suffix them.
if [ "$arch" == "avx2" ]; then
# Create a mapping file with '<old_sym> <suffixed_sym>' on each line.
nm --defined-only --extern-only $tmp_obj | while read addr type sym ; do
echo ${sym} ${sym}_${arch}
done > renames.txt
objcopy --redefine-syms=renames.txt $tmp_obj $dst_obj
else
mv $tmp_obj $dst_obj
fi
to_link="$to_link $dst_obj"
done
rm -f libbitshuffle.a
ar rs libbitshuffle.a $to_link
mkdir -p $PREFIX/include/bitshuffle
cp libbitshuffle.a $PREFIX/lib/
cp $TP_SOURCE_DIR/$BITSHUFFLE_SOURCE/src/bitshuffle.h $PREFIX/include/bitshuffle/bitshuffle.h
cp $TP_SOURCE_DIR/$BITSHUFFLE_SOURCE/src/bitshuffle_core.h $PREFIX/include/bitshuffle/bitshuffle_core.h
}
# croaring bitmap
build_croaringbitmap() {
check_if_source_exist $CROARINGBITMAP_SOURCE
cd $TP_SOURCE_DIR/CRoaring-0.2.60
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
CXXFLAGS="-O3" \
LDFLAGS="-L${TP_LIB_DIR} -static-libstdc++ -static-libgcc" \
$CMAKE_CMD -v -DROARING_BUILD_STATIC=ON -DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR \
-DCMAKE_INCLUDE_PATH="$TP_INSTALL_DIR/include" \
-DENABLE_ROARING_TESTS=OFF \
-DCMAKE_LIBRARY_PATH="$TP_INSTALL_DIR/lib;$TP_INSTALL_DIR/lib64" ..
make -j$PARALLEL && make install
}
#orc
build_orc() {
check_if_source_exist $ORC_SOURCE
cd $TP_SOURCE_DIR/orc-1.5.8
mkdir build -p && cd build
rm -rf CMakeCache.txt CMakeFiles/
CXXFLAGS="-O3 -Wno-array-bounds" \
$CMAKE_CMD ../ -DBUILD_JAVA=OFF \
-DPROTOBUF_HOME=$TP_INSTALL_DIR \
-DSNAPPY_HOME=$TP_INSTALL_DIR \
-DGTEST_HOME=$TP_INSTALL_DIR \
-DLZ4_HOME=$TP_INSTALL_DIR \
-DLZ4_INCLUDE_DIR=$TP_INSTALL_DIR/include/lz4 \
-DZLIB_HOME=$TP_INSTALL_DIR\
-DBUILD_LIBHDFSPP=OFF \
-DBUILD_CPP_TESTS=OFF \
-DCMAKE_INSTALL_PREFIX=$TP_INSTALL_DIR
make -j$PARALLEL && make install
}
#cctz
build_cctz() {
check_if_source_exist $CCTZ_SOURCE
cd $TP_SOURCE_DIR/cctz-2.3
export PREFIX=$TP_INSTALL_DIR
make -j$PARALLEL && make install
}
# See https://github.com/apache/incubator-doris/issues/2910
# LLVM related codes have already be removed in master, so there is
# no need to build llvm tool here.
# Currently, some old release of Doris may still need it, so for now
# we just comment it, instead of remove it.
# build_llvm
build_libevent
build_zlib
build_lz4
build_bzip
build_lzo2
build_openssl
build_boost # must before thrift
build_protobuf
build_gflags
build_gtest
build_glog
build_rapidjson
build_snappy
build_gperftools
build_curl
build_re2
build_mysql
build_thrift
build_leveldb
build_brpc
build_rocksdb
build_librdkafka
build_flatbuffers
build_arrow
build_s2
build_bitshuffle
build_croaringbitmap
build_orc
build_cctz
echo "Finihsed to build all thirdparties"
|
#
# Used to generate the envs for the boot and task scripts
#
<% _.each(obj.commonEnvs, function (commonEnv) { %>
{
<% if (commonEnv.surroundWithSingleQuotes === true) { %>
export <%= commonEnv.key %>='<%= commonEnv.value %>';
<% } else { %>
export <%= commonEnv.key %>="<%= commonEnv.value %>";
<% } %>
} || {
exec_cmd "echo 'An error occurred while trying to export an environment variable: <%= commonEnv.key %> '"
return 1
}
<% }); %>
<% _.each(obj.taskEnvs, function (taskEnv) { %>
{
<% if (taskEnv.surroundWithSingleQuotes === true) { %>
export <%= taskEnv.key %>='<%= taskEnv.value %>';
<% } else { %>
export <%= taskEnv.key %>="<%= taskEnv.value %>";
<% } %>
} || {
exec_cmd "echo 'An error occurred while trying to export an environment variable: <%= taskEnv.key %> '"
return 1
}
<% }); %>
export SHIPPABLE_NODE_ARCHITECTURE="<%= obj.shippableRuntimeEnvs.shippableNodeArchitecture %>"
export SHIPPABLE_NODE_OPERATING_SYSTEM="<%= obj.shippableRuntimeEnvs.shippableNodeOperatingSystem %>"
export TASK_NAME="<%= obj.shippableRuntimeEnvs.taskName %>"
export TASK_IN_CONTAINER=<%= obj.shippableRuntimeEnvs.isTaskInContainer %>
if [ "$TASK_IN_CONTAINER" == true ]; then
export TASK_CONTAINER_OPTIONS="<%= obj.shippableRuntimeEnvs.taskContainerOptions %>"
export TASK_CONTAINER_IMAGE="<%= obj.shippableRuntimeEnvs.taskContainerImage %>"
export TASK_CONTAINER_IMAGE_SHOULD_PULL="<%= obj.shippableRuntimeEnvs.shouldPullTaskContainerImage %>"
export TASK_CONTAINER_COMMAND="<%= obj.shippableRuntimeEnvs.taskContainerCommand %>"
export TASK_CONTAINER_NAME="<%= obj.shippableRuntimeEnvs.taskContainerName %>"
fi
|
#!/usr/bin/env bash
# exit immediately if an error happens
set -e
OPT_DIR_PARAM=$1
# get setup directory path
SETUP_DIR=$(dirname "${BASH_SOURCE[0]}")
SETUP_DIR=$(readlink -f "${SETUP_DIR}")
# find the environment variables
set --
source "${SETUP_DIR}/env_config_gen.sh" "${OPT_DIR_PARAM}"
##################################################################
# make polyml binaries and libraries available
POLY_VERSION="v5.7.1"
# if poly version is specified in the environment, use this
if [[ ! -z "${HOLBA_POLYML_VERSION}" ]]; then
POLY_VERSION=${HOLBA_POLYML_VERSION}
fi
POLY_DIR=${HOLBA_OPT_DIR}/polyml_${POLY_VERSION}
export PATH=${POLY_DIR}/bin:$PATH
export LD_LIBRARY_PATH=${POLY_DIR}/lib:$LD_LIBRARY_PATH
# HOL4 source and branch
GIT_URL=https://github.com/kth-step/HOL.git
GIT_BRANCH=for_holba
HOL4_DIR=${HOLBA_OPT_DIR}/hol_k12_holba
##################################################################
# if HOL does exist, check if it is up-to-date and remove it in case it is not
if [[ -d "${HOL4_DIR}" ]]; then
cd "${HOL4_DIR}"
git fetch origin
# does the remote branch exist locally?
if [[ ! `git branch --all --list origin/${GIT_BRANCH}` ]]; then
echo "the cached HOL4 version seems to be on another branch, deleting it now"
# delete the stale HOL4 build
cd "${HOLBA_OPT_DIR}"
rm -rf "${HOL4_DIR}"
else
# is there a difference between the current and the remote branch?
GIT_DIFF=$(git diff)
GIT_DIFF_REMOTE=$(git diff HEAD remotes/origin/${GIT_BRANCH})
if [[ "${GIT_DIFF}${GIT_DIFF_REMOTE}" ]]; then
echo "the cached HOL4 version has differences, deleting it now"
# delete the stale HOL4 build
cd "${HOLBA_OPT_DIR}"
rm -rf "${HOL4_DIR}"
else
echo "the cached HOL4 version is correct, keeping it"
fi
fi
fi
cd "${HOLBA_OPT_DIR}"
# if HOL does not exist already, clone and build it
if [[ ! -d "${HOL4_DIR}" ]]; then
# clone the specified HOL4 branch only
git clone -b ${GIT_BRANCH} --single-branch ${GIT_URL} "${HOL4_DIR}"
cd "${HOL4_DIR}"
# compile HOL4
poly < tools/smart-configure.sml
bin/build --nograph
fi
cd "${HOLBA_OPT_DIR}"
# extra builds (e.g. some l3 models)
declare -a hol4_extrabuild=(
"examples/l3-machine-code/common"
"examples/l3-machine-code/arm8/model"
"examples/l3-machine-code/arm8/step"
"examples/l3-machine-code/m0/model"
"examples/l3-machine-code/m0/step"
)
for dir in "${hol4_extrabuild[@]}"
do
echo "Holmaking: ${dir}"
cd "${HOL4_DIR}/${dir}"
${HOL4_DIR}/bin/Holmake
done
|
#!/bin/bash
set -e
BASE_TEST_FOLDER="$(pwd)/$(dirname $0)/$(basename $0 .sh)"
TESTCAFE_TESTS_FOLDER="$BASE_TEST_FOLDER/testcafe"
# Start the backend that serves the media files to be migrated
# Listens internally on port 80 (addressed as http://<assets_container>/assets/)
startMigrationAssetsContainer
# Execute migrations using testcafe
docker run --env-file=$(pwd)/.env --network gateway -v "${TESTCAFE_TESTS_FOLDER}":/tests testcafe/testcafe:"${TESTCAFE_VERSION}" --screenshots path=/tests/screenshots,takeOnFails=true chromium /tests/**/*.js
# Execute tests in docker image, on the same docker network (gateway, idc_default?) as Drupal
# TODO: expose logs when failing tests?
# N.B. trailing slash on the BASE_ASSETS_URL is important. uses the internal URL.
# TODO UNDO
#docker run --network gateway --rm -e BASE_ASSETS_URL=http://${assets_container}/assets/ local/migration-backend-tests
|
python3 bigbird.py \
--accumulate_grad_batches=2 \
--drop_mult=0.3 \
--gradient_clip_val=10.0 \
--lr=0.0003 \
--stochastic_weight_avg=True \
--thresh_step=0.49 \
--use_special_classifier=none \
--project_name=predicting_outcome_appeals \
--experiment_name=bigbird \
--batch_size=8 \
--epochs=15 \
--lowercase=false \
--train_path=../data/train_en.parquet \
--valid_path=../data/valid_en.parquet \
--test_path=../data/test_en.parquet |
chmod +w chain.out
./secimage chain -out chain.out -rsa -sha256 -key rsa_eng_key.pem 0x003E0000 0 -key rsa_sbi1.pem 0x003E0000 0 -key rsa_sbi2.pem 0x003E0000 0 -key rsa_sbi3.pem 0x003E0000 0
# rsa sign with chain of trust file
# NOTE: must use the last -key in the chain of trust file to sign SBI
./secimage sbi -out fabi.bin -config unauth.cfg -sha256 -chain chain.out -rsa rsa_sbi3.pem -bl f.bin -depth 3
#### final:
cat add0.bin fabi.bin > final_c0_eng_2k_sha256.bin
rm -Rf fabi.bin
|
#!/bin/bash
set -Eeuo pipefail
cd /opt/powerdns-admin
GUNICORN_TIMEOUT="${GUINCORN_TIMEOUT:-120}"
GUNICORN_WORKERS="${GUNICORN_WORKERS:-4}"
GUNICORN_LOGLEVEL="${GUNICORN_LOGLEVEL:-info}"
if [ ! -f ./config.py ]; then
cat ./config_template.py ./docker/Production/config_docker.py > ./config.py
fi
GUNICORN_ARGS="-t ${GUNICORN_TIMEOUT} --workers ${GUNICORN_WORKERS} --bind 0.0.0.0:80 --log-level ${GUNICORN_LOGLEVEL}"
if [ "$1" == gunicorn ]; then
flask db upgrade
exec "$@" $GUNICORN_ARGS
else
exec "$@"
fi
|
#!/usr/bin/env bash
# Pretrain model on past data
#python train_ord_universal.py -m seresnet152_gapv2 \
# -a medium -b 42 --size 512 --fp16 -o Ranger -wd 1e-4 -s simple -lr 3e-4\
# -e 5 \
# --train-on aptos-2015-train aptos-2015-test-private aptos-2015-test-public\
# --valid-on aptos-2019-train idrid-train idrid-test messidor\
# -v --criterion-ord mse -x seresnet152_gapv2_pretrain
#sleep 15
# Train 4 folds on this data
python train_ord.py -m seresnet152_gap \
-a medium -d 0.5 -b 42 --size 512 --fp16 -o Ranger -wd 1e-3 -s simple -lr 3e-5\
--warmup 10 \
--epochs 75 \
--use-aptos2019 --use-idrid --use-messidor \
-f 0 --seed 104 \
-v --criterion-ord huber -t seresnet152_gapv2_pretrain.pth
sleep 15
python train_ord.py -m seresnet152_gap \
-a medium -d 0.5 -b 42 --size 512 --fp16 -o Ranger -wd 1e-3 -s simple -lr 3e-5\
--warmup 10 \
--epochs 75 \
--use-aptos2019 --use-idrid --use-messidor \
-f 1 --seed 105 \
-v --criterion-ord huber -t seresnet152_gapv2_pretrain.pth
sleep 15
python train_ord.py -m seresnet152_gap \
-a medium -d 0.5 -b 42 --size 512 --fp16 -o Ranger -wd 1e-3 -s simple -lr 3e-5\
--warmup 10 \
--epochs 75 \
--use-aptos2019 --use-idrid --use-messidor \
-f 2 --seed 106 \
-v --criterion-ord huber -t seresnet152_gapv2_pretrain.pth
sleep 15
python train_ord.py -m seresnet152_gap \
-a medium -d 0.5 -b 42 --size 512 --fp16 -o Ranger -wd 1e-3 -s simple -lr 3e-5\
--warmup 10 \
--epochs 75 \
--use-aptos2019 --use-idrid --use-messidor \
-f 3 --seed 107 \
-v --criterion-ord huber -t seresnet152_gapv2_pretrain.pth
sleep 15 |
#!/bin/bash
echo "Remote machine hostname: "
read HOSTNAME
USERNAME=$USER
SCRIPT="/usr/local/bin/connect_display.py"
ssh -X ${USERNAME}@${HOSTNAME} "${SCRIPT}"
|
#!/bin/bash
set -e
function cleanup {
# delete docker creds
set +e
docker logout "$DOCKER_REGISTRY"
docker logout "$GCR_REGISTRY"
set -e
rm -rf "$DOCKER_CONFIG"
}
trap cleanup EXIT
trap cleanup INT
echo "Using Docker config $DOCKER_CONFIG"
# config will be saved to location explicitly specified in $DOCKER_CONFIG (set by Doppler)
echo "$DOCKER_HUB_TOKEN" | docker login -u "$DOCKER_HUB_USER" --password-stdin "$DOCKER_REGISTRY"
echo "$GOOGLE_CREDS" | docker login -u "$GCR_USER" --password-stdin "$GCR_REGISTRY"
# pull in latest docker images
docker pull alpine
GOOGLE_APPLICATION_CREDENTIALS=<(echo "$GOOGLE_CREDS") goreleaser release --rm-dist --parallelism=4
|
#! /bin/bash
echo "===> Running Tests..."
echo "===> Flake8 Running..."
flake8 . --exclude "*venv*"
echo "===> Flake8 Finished"
echo "===> Python2 Tests Running..."
python2 -m pytest -v
echo "===> Python2 Tests Finished"
echo "===> Python3 Tests Running..."
python3 -m pytest -v
echo "===> Python3 Tests Finished"
|
#!/usr/bin/env bash
docker tag dockupdater/dockupdater:$TRAVIS_BRANCH-amd64 dockupdater/dockupdater:$TRAVIS_TAG-amd64
docker tag dockupdater/dockupdater:$TRAVIS_BRANCH-arm dockupdater/dockupdater:$TRAVIS_TAG-arm
docker tag dockupdater/dockupdater:$TRAVIS_BRANCH-arm64 dockupdater/dockupdater:$TRAVIS_TAG-arm64
docker push dockupdater/dockupdater:$TRAVIS_TAG-amd64
docker push dockupdater/dockupdater:$TRAVIS_TAG-arm
docker push dockupdater/dockupdater:$TRAVIS_TAG-arm64
docker manifest create dockupdater/dockupdater:$TRAVIS_TAG dockupdater/dockupdater:$TRAVIS_TAG-amd64 dockupdater/dockupdater:$TRAVIS_TAG-arm64 dockupdater/dockupdater:$TRAVIS_TAG-arm
docker manifest inspect dockupdater/dockupdater:$TRAVIS_TAG
docker manifest push dockupdater/dockupdater:$TRAVIS_TAG
docker tag dockupdater/dockupdater:$TRAVIS_BRANCH-amd64 dockupdater/dockupdater:latest-amd64
docker tag dockupdater/dockupdater:$TRAVIS_BRANCH-arm dockupdater/dockupdater:latest-arm
docker tag dockupdater/dockupdater:$TRAVIS_BRANCH-arm64 dockupdater/dockupdater:latest-arm64
docker push dockupdater/dockupdater:latest-amd64
docker push dockupdater/dockupdater:latest-arm
docker push dockupdater/dockupdater:latest-arm64
docker manifest create dockupdater/dockupdater:latest dockupdater/dockupdater:latest-amd64 dockupdater/dockupdater:latest-arm64 dockupdater/dockupdater:latest-arm
docker manifest inspect dockupdater/dockupdater:latest
docker manifest push dockupdater/dockupdater:latest
|
#!/bin/bash
# set default values
built_dir="pipstmp"
regions="12"
to="0.08"
tbsize="8"
np="1"
for i in "$@"
do
case $i in
-TO=*|--TO=*)
to="${i#*=}"
shift # past argument=value
;;
-DIR=*|--DIR=*)
built_dir="${i#*=}"
shift # past argument=value
;;
-NBREGIONS=*|--NBREGIONS=*)
regions="${i#*=}"
shift # past argument=value
;;
-TBSIZE=*|--TBSIZE=*)
tbsize="${i#*=}"
shift # past argument=value
;;
-NP=*|--NP=*)
np="${i#*=}"
shift # past argument=value
;;
*)
# unknown option
;;
esac
done
if [ -d "$built_dir" ]; then
rm -r "$built_dir"
fi
mkdir $built_dir
cd $built_dir
nblocks=$(echo "8760 * $to / $tbsize + ((8760*${to}) % ${tbsize} > 0) + 1" | bc)
echo "$nblocks"
gams ../simple4pips.gms --NBREGIONS=$regions --TO=$to --RESOLUTION=\(60/60\) --TBSIZE=$tbsize --METHOD=PIPS subsys=../subsysLinux.txt --SCENBLOCK=-1 > /dev/null
../../../../build_pips/gmschk -g $GAMSSYSDIR -T -X $nblocks allblocksPips.gdx > /dev/null
time mpirun -np $np ../../../../build_pips/gmspips $nblocks allblocksPips $GAMSSYSDIR 2>&1 | tee pips.out
|
#!/bin/bash
. ./common.sh
JOB_NAME='Sublime#keymaps'
ALIAS_NAME='sublime'
ALIAS_PATH=/usr/local/bin/${ALIAS_NAME}
begin
echo -e "Copying '*.sublime-keymap' to '${USER_DIR}'\n"
cp ./keymaps/*.sublime-keymap "$USER_DIR"
end
|
# !/bin/bash
# Pre-commit hook passing files through jshint
#
# This ensures that all js, html and json files are valid and conform
# to expectations.
ROOT_DIR=$(git rev-parse --show-toplevel)
JSHINT="${ROOT_DIR}/node_modules/jshint/bin/jshint -c ${ROOT_DIR}/.jshintrc"
CODE=0
for file in $(git diff-index --name-only --cached HEAD -- | grep '\.js$' | grep -v "test\/"); do
echo "jshint ${file} ..."
${JSHINT} ${file}
if [ ${?} -ne 0 ] ; then
CODE=1
fi
done
if [ ${CODE} -gt 0 ] ; then
exit ${CODE}
fi
|
#!/bin/sh
docker-compose exec asterisk asterisk -rc
|
#!/bin/bash
usage () {
cat <<END
count [-r] [-b n] [-s n] stop
Print each number up to stop, beginning at 0:
-b gives the number to begin with (default: 0)
-h show this help message
-r reverses the count
-s sets step size (default: 1)
END
}
error () {
echo "Error ${1}"
usage
exit $2
} >&2 # will go to stderr
isnum () {
# we assign the patterns to variables here becasue otherwise we would
# have to escape the special variable in the conditions below where they are used
declare -r num_re='^[0-9]+$' # $ matches the end of a string; + matches the expression before it 1 or more times
declare -r octal_re='^0(.+)' # ^ matches the start of a string
num_error="ok"
if [[ $1 =~ ${num_re} ]]; then
if [[ $1 =~ ${octal_re} ]]; then # numbers that start with 0 in bash are octal numbers and will be interpreted differently to what we want
num_error="${1} is not a number, did you mean ${BASH_REMATCH[1]}?" # BASH_REMATCH contains groups of matches. We ask for 1 because this is the second time a match is made (I think)
return 1
fi
else
num_error="${1} is not a number"
return 1
fi
return 0
}
declare reverse=""
declare -i begin=0
declare -i step=1
while getopts ":b:s:r:h" opt; do
case $opt in
r)
reverse="yes"
;;
b)
isnum ${OPTARG} || error "${OPTARG} is not a number" 1;
echo ${OPTARG}
start="${OPTARG}"
;;
h)
usage
exit 0
;;
s)
isnum ${OPTARG} || error "${OPTARG} is not a number" 1;
step="${OPTARG}"
;;
:)
echo "Option -${OPTARG} is missing an argument" >&2
exit 1
;;
\?)
echo "Unknown option: -${OPTARG}" >&2
exit 1
;;
esac
done
shift $(( OPTIND -1 ))
[[ $1 ]] || { echo "missing an argument" >&2; exit 1; }
[[ $1 =~ ^[0-9]+$ ]] || { echo "$1 is not a number" >&2; exit 1; }
declare end="$1"
if [[ ! $reverse ]]; then
for (( i=start; i <= end; i+=step )); do
echo $i
done
else
for (( i=end; i >= start; i-=step )); do
echo $i
done
fi
exit 0
|
TERMUX_PKG_HOMEPAGE=https://www.djcbsoftware.nl/code/mu/
TERMUX_PKG_DESCRIPTION="Maildir indexer/searcher and Emacs client (mu4e)"
TERMUX_PKG_LICENSE="GPL-3.0"
TERMUX_PKG_VERSION=1.4.12
TERMUX_PKG_REVISION=1
TERMUX_PKG_SRCURL=https://github.com/djcb/mu/releases/download/$TERMUX_PKG_VERSION/mu-$TERMUX_PKG_VERSION.tar.xz
TERMUX_PKG_SHA256=98295cb697da051b5c0d8048bca2cc516551fede243f7f0915903b3e7f2f3dce
TERMUX_PKG_DEPENDS="glib, libc++, libxapian, libgmime"
|
#!/bin/bash
CLUSTER_NAME=stacklynx-`openssl rand -base64 32 | base64 | head -c 4`
VPC_PREFIX=10.11
EKS_TYPE=
CLUSTER_REGION=us-east-1
AWS_KEY=
AWS_SECRET=
CORP_IPS=`curl -s https://ifconfig.co`/32
mkdir -p ./output
while getopts c:n:t:r:k:s: option
do
case $option in
c) CLUSTER_NAME=${OPTARG} ;;
n) VPC_PREFIX=${OPTARG} ;;
t) EKS_TYPE=${OPTARG} ;;
r) CLUSTER_REGION=${OPTARG} ;;
k) AWS_KEY=${OPTARG} ;;
s) AWS_SECRET=${OPTARG} ;;
\?) echo "Unknown option: -$OPTARG" >&2; phelp; exit 1;;
:) echo "Missing option argument for -$OPTARG" >&2; phelp; exit 1;;
*) echo "Unimplimented option: -$OPTARG" >&2; phelp; exit 1;;
esac
done
if [ -z "${AWS_KEY}" ] ; then
echo "Please set AWS_KEY using -k option to procced"
exit 1
fi
if [ -z "${AWS_SECRET}" ] ; then
echo "Please set AWS_SECRET using -s option to procced"
exit 1
fi
echo
cat <<EOF > ./output/eks-vars-${CLUSTER_NAME}.tfvars
aws_access_key = "${AWS_KEY}"
aws_secret_key = "${AWS_SECRET}"
aws_region = "${CLUSTER_REGION}"
cluster-name = "${CLUSTER_NAME}"
vpcnet_prefix ="${VPC_PREFIX}"
corporate_cidr_list=["${CORP_IPS}"]
EOF
source cluster/output/${CLUSTER_NAME}-auth-keys.sh
helm del --purge nginx-ingress
if [ -z "${EKS_TYPE}" ] ; then # if type is not set then create the cluster and nodes
echo "destroying the vars:./output/eks-vars-${CLUSTER_NAME}.tfvars state:./output/$CLUSTER_NAME-nodes.state"
terraform init nodes
terraform destroy -var-file=./output/eks-vars-${CLUSTER_NAME}.tfvars -state=./output/$CLUSTER_NAME-nodes.state nodes
echo "destroying the vars:./output/eks-vars-${CLUSTER_NAME}.tfvars state:./output/$CLUSTER_NAME-cluster.state"
terraform init cluster
terraform destroy -var-file=./output/eks-vars-${CLUSTER_NAME}.tfvars -state=./output/$CLUSTER_NAME-cluster.state cluster
else
echo "destroying the vars:./output/eks-vars-${CLUSTER_NAME}.tfvars state:./output/$CLUSTER_NAME-$EKS_TYPE.state"
terraform init $EKS_TYPE
terraform destroy -var-file=./output/eks-vars-${CLUSTER_NAME}.tfvars -state=./output/$CLUSTER_NAME-$EKS_TYPE.state $EKS_TYPE
fi
|
DATASET_FLAGS="
--data_dir /workspace/mnt/storage/yangdecheng/imagenet_1k/ImageNet-1k/train
--val_data_dir /workspace/mnt/storage/yangdecheng/imagenet_1k/ImageNet-1k/val
--tot_class 1000
--dataset_type imagenet-1000
"
DIFFUSION_FLAGS="
--diffusion_steps 1000 --timestep_respacing ddim25
--noise_schedule linear
--learn_sigma True
"
MODEL_FLAGS="
--learn_sigma True --class_cond False
--image_size 256
--num_channels 256 --num_res_blocks 2 --num_head_channels 64
--attention_resolutions 32,16,8
--use_fp16 True --resblock_updown True
--use_scale_shift_norm True
--num_classes 1000
"
CLASSIFIER_FLAGS="
--classifier_scale 10.0
--classifier_pool attention --classifier_out_channels 1000
--classifier_image_size 256
--classifier_num_channels 128 --classifier_num_res_blocks 2 --classifier_num_head_channels 64
--classifier_attention_resolutions 32,16,8
--classifier_use_fp16 True --classifier_resblock_updown True
--classifier_use_scale_shift_norm True
"
SAMPLE_FLAGS="
--batch_size 8 --num_samples 1000
--use_ddim True --t_range_start 0 --t_range_end 1000
"
workspace_dir=/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion
save_name="imagenet1000_classifier256x256_channel128_upperbound"
logdir=${workspace_dir}/log
pretrain_model=${workspace_dir}/pretrain_model
steps=250
predict_name=model500000_imagenet1000_steps${steps}_getClassifierGroundthGuidance
num_samples=5000
scale=10.0
NNODE=1
RANK=0
MASTER_ADDR=localhost
MASTER_PORT=23456
python -m torch.distributed.launch \
--nnode ${NNODE} --node_rank $RANK --nproc_per_node 2 \
--master_addr $MASTER_ADDR --master_port $MASTER_PORT \
get_classifier_groundth_guidance_grad_norm.py \
$DIFFUSION_FLAGS $MODEL_FLAGS $CLASSIFIER_FLAGS $SAMPLE_FLAGS $DATASET_FLAGS \
--log_root ${logdir} \
--model_path ${pretrain_model}/256x256_diffusion_uncond.pt \
--classifier_path ${pretrain_model}/256x256_classifier.pt \
--save_name ${save_name}/predict/${predict_name} \
--classifier_scale $scale --num_samples ${num_samples} --batch_size 24 \
--t_range_start 0 --t_range_end 1000 \
--use_ddim False --timestep_respacing 250
# --tot_class 1
# --selected_class ${class}
# --gpus 0,1 \
# --use_entropy_scale True
# CUDA_VISIBLE=0,1 python evaluations/evaluator.py \
# --ref_batch ${pretrain_model}/VIRTUAL_imagenet-1000_256x256_labeled_sample${num_samples}_class${class}.npz \
# --sample_batch ${logdir}/${save_name}/predict/${predict_name}/scale${scale}.0_stepsddim25_class${class}_samples_${num_samples}x256x256x3.npz \
# --save_result_path ${logdir}/${save_name}/predict/${predict_name}/result_scale${scale}.0_class${class}_stepsddim25_sample${num_samples}.yaml \
# --batch_size 256
|
#!/bin/bash
$PYTHON setup.py install --old-and-unmanageable
rm $PREFIX/bin/py.test*
rm -f $SP_DIR/__pycache__/pkg_res*
|
#!/bin/bash
#
# Build Order
# prisma-db-introspection
# prisma-yml
# prisma-cli-engine
# prisma-cli-core
# prisma-cli
set -e
set -x
if [[ -z "$CIRCLE_BRANCH" ]]; then
if [[ $CIRCLE_TAG == "*beta" ]]; then
export CIRCLE_BRANCH=beta
fi
if [[ $CIRCLE_TAG == "*alpha" ]]; then
export CIRCLE_BRANCH=alpha
fi
fi
if [ -z "$CIRCLE_TAG" ] && [ $CIRCLE_BRANCH == "master" ]; then
echo "Builds on master are only executed when a tag is provided"
exit 0
fi
if [ $CIRCLE_TAG ] && [ $CIRCLE_BRANCH != "master" ]; then
echo "The Tag ${CIRCLE_TAG} has been set, but the branch is set to ${CIRCLE_BRANCH}. Tags are only allowed on master"
fi
if [ $CIRCLE_TAG ] && [ $CIRCLE_BRANCH == "master" ]; then
echo "WARNING: CIRCLE_TAG is set to $CIRCLE_TAG. This will publish a new version on the @latest tag."
else
echo "INFO: This will deploy a new version on the @$CIRCLE_BRANCH tag"
fi
if [[ $CIRCLE_COMPARE_URL ]]; then
export lastCommits=`echo $CIRCLE_COMPARE_URL | sed -n 's/.*compare\/\(.*\)/\1/p' | sed 's/\.\.\./ /'`
else
export lastCommits="HEAD"
fi
export changedFiles=$(git diff-tree --no-commit-id --name-only -r $lastCommits)
introspectionChanged=false
ymlChanged=false
coreChanged=false
engineChanged=false
if [[ "$changedFiles" = *"cli/packages/prisma-db-introspection"* ]]; then
introspectionChanged=true
fi
if [[ "$changedFiles" = *"cli/packages/prisma-yml"* ]]; then
ymlChanged=true
fi
if [[ "$changedFiles" = *"cli/packages/prisma-cli-core"* ]]; then
coreChanged=true
fi
if [[ "$changedFiles" = *"cli/packages/prisma-cli-engine"* ]]; then
engineChanged=true
fi
echo "introspection changed: $introspectionChanged yml changed: $ymlChanged. core changed: $coreChanged. engine changed: $engineChanged"
if [ $introspectionChanged == false ] && [ $ymlChanged == false ] && [ $coreChanged == false ] && [ $engineChanged == false ] && [ -z "$CIRCLE_TAG" ]; then
echo "There are no changes in the CLI."
exit 0;
fi
latestVersion=$(npm info prisma version)
tag=${CIRCLE_TAG:-$latestVersion}
tagElements=(${tag//./ })
nextDockerMinor=${tagElements[1]}
if [[ $CIRCLE_TAG ]]; then
nextDockerTag="${tagElements[0]}.${nextDockerMinor}"
else
step=1
if [ $CIRCLE_BRANCH == "alpha" ]; then
step=2
fi
nextDockerMinor=$((nextDockerMinor + step))
nextDockerTag="${tagElements[0]}.${nextDockerMinor}-${CIRCLE_BRANCH}"
fi
node cli/scripts/waitUntilTagPublished.js $nextDockerTag
cd cli/packages/
export introspectionVersionBefore=$(cat prisma-db-introspection/package.json | jq -r '.version')
if [ $introspectionChanged ] || [ $CIRCLE_TAG ]; then
cd prisma-db-introspection
sleep 0.5
../../scripts/doubleInstall.sh
yarn build
if [[ $CIRCLE_TAG ]]; then
npm version --allow-same-version $(npm info prisma-db-introspection version)
npm version patch --no-git-tag-version
npm publish
else
npm version --allow-same-version $(npm info prisma-db-introspection version --tag $CIRCLE_BRANCH)
npm version prerelease --no-git-tag-version
npm publish --tag $CIRCLE_BRANCH
fi
cd ..
fi
export introspectionVersion=$(cat prisma-db-introspection/package.json | jq -r '.version')
export ymlVersionBefore=$(cat prisma-yml/package.json | jq -r '.version')
if [ $ymlChanged ] || [ $CIRCLE_TAG ]; then
echo "Going to publish yml"
cd prisma-yml
yarn install
yarn build
if [[ $CIRCLE_TAG ]]; then
# make sure it is the latest version
npm version --allow-same-version $(npm info prisma-yml version)
npm version patch --no-git-tag-version
npm publish
else
npm version --allow-same-version $(npm info prisma-yml version --tag $CIRCLE_BRANCH)
npm version prerelease --no-git-tag-version
npm publish --tag $CIRCLE_BRANCH
fi
yarn install
cd ..
fi
export ymlVersion=$(cat prisma-yml/package.json | jq -r '.version')
if [ $ymlVersionBefore != $ymlVersion ] || [ $engineChanged ]; then
cd prisma-cli-engine
sleep 3.0
yarn add prisma-yml@$ymlVersion
sleep 0.2
../../scripts/doubleInstall.sh
yarn build
if [[ $CIRCLE_TAG ]]; then
npm version --allow-same-version $(npm info prisma-cli-engine version)
npm version patch --no-git-tag-version
npm publish
else
npm version --allow-same-version $(npm info prisma-cli-engine version --tag $CIRCLE_BRANCH)
npm version prerelease --no-git-tag-version
npm publish --tag $CIRCLE_BRANCH
fi
cd ..
fi
export engineVersion=$(cat prisma-cli-engine/package.json | jq -r '.version')
if [ $ymlVersionBefore != $ymlVersion ] || [ $coreChanged ] || [ $introspectionChanged ]; then
cd prisma-cli-core
sleep 3.0
yarn add prisma-yml@$ymlVersion
sleep 0.2
yarn add prisma-db-introspection@$introspectionVersion
yarn install
# new docker tag
sed -i.bak "s/image: prismagraphql\/prisma:[0-9]\{1,\}\.[0-9]\{1,\}/image: prismagraphql\/prisma:$nextDockerTag/g" src/util.ts
yarn build
if [[ $CIRCLE_TAG ]]; then
npm version --allow-same-version $(npm info prisma-cli-core version)
npm version patch --no-git-tag-version
npm publish
else
npm version --allow-same-version $(npm info prisma-cli-core version --tag $CIRCLE_BRANCH)
npm version prerelease --no-git-tag-version
npm publish --tag $CIRCLE_BRANCH
fi
cd ..
fi
export coreVersion=$(cat prisma-cli-core/package.json | jq -r '.version')
cd prisma-cli
sleep 0.5
yarn add prisma-cli-engine@$engineVersion prisma-cli-core@$coreVersion
yarn install
yarn build
if [ -z "$CIRCLE_TAG" ]; then
latestBetaVersion=$(npm info prisma version --tag $CIRCLE_BRANCH)
latestVersionElements=(${latestVersion//./ })
latestBetaVersionElements=(${latestBetaVersion//./ })
betaMinor=${latestBetaVersionElements[1]}
latestMinor=${latestVersionElements[1]}
latestMajor=${latestVersionElements[0]}
betaLastNumber=`echo $latestBetaVersion | sed -n "s/.*$CIRCLE_BRANCH\.\([0-9]\{1,\}\)/\1/p"`
echo "betaLastNumber $betaLastNumber"
# calc next minor
step=1
if [ $CIRCLE_BRANCH == "alpha" ]; then
step=2
fi
nextMinor=$((latestMinor + step))
nextLastNumber=0
echo "beta minor $betaMinor latest minor $latestMinor next minor ${nextMinor}"
# calc next last number
if [ $betaMinor > $latestMinor ] && [ $betaMinor != $latestMinor ]; then
echo "$betaMinor is greater than $latestMinor"
nextLastNumber=$((betaLastNumber + step))
fi
newVersion="$latestMajor.$nextMinor.0-$CIRCLE_BRANCH.$nextLastNumber"
echo "new version: $newVersion"
npm version $newVersion
npm publish --tag $CIRCLE_BRANCH
else
newVersion=$CIRCLE_TAG
echo "new version: $newVersion"
npm version $newVersion
npm publish
fi
|
#!/bin/bash
#SBATCH --account=rrg-pbellec
#SBATCH --job-name=smriprep_sub-8923536.job
#SBATCH --output=/scratch/fnadeau/cima-q/1633992344/smriprep_sub-8923536.out
#SBATCH --error=/scratch/fnadeau/cima-q/1633992344/smriprep_sub-8923536.err
#SBATCH --time=24:00:00
#SBATCH --cpus-per-task=16
#SBATCH --mem-per-cpu=4096M
#SBATCH [email protected]
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
export SINGULARITYENV_FS_LICENSE=$HOME/.freesurfer.txt
export SINGULARITYENV_TEMPLATEFLOW_HOME=/templateflow
module load singularity/3.8
#copying input dataset into local scratch space
rsync -rltv --info=progress2 --exclude="sub*" --exclude="derivatives" /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q $SLURM_TMPDIR
rsync -rltv --info=progress2 /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/sub-8923536 $SLURM_TMPDIR/cima-q
singularity run --cleanenv -B $SLURM_TMPDIR:/DATA -B /home/fnadeau/.cache/templateflow:/templateflow -B /etc/pki:/etc/pki/ -B /scratch/fnadeau/cima-q/1633992344:/OUTPUT /lustre03/project/6003287/containers/fmriprep-20.2.1lts.sif -w /DATA/fmriprep_work --participant-label 8923536 --bids-database-dir /DATA/cima-q/.pybids_cache --bids-filter-file /OUTPUT/bids_filters.json --output-spaces MNI152NLin2009cAsym MNI152NLin6Asym fsnative anat --output-layout bids --notrack --skip_bids_validation --write-graph --omp-nthreads 8 --nprocs 16 --mem_mb 65536 --resource-monitor /DATA/cima-q /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives participant
fmriprep_exitcode=$?
if [ $fmriprep_exitcode -ne 0 ] ; then cp -R $SLURM_TMPDIR/fmriprep_work /scratch/fnadeau/cima-q/1633992344/smriprep_sub-8923536.workdir ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then cp $SLURM_TMPDIR/fmriprep_work/fmriprep_wf/resource_monitor.json /scratch/fnadeau/cima-q/1633992344/smriprep_sub-8923536_resource_monitor.json ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then mkdir -p /scratch/fnadeau/cima-q/1633992344//project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives-cima-q ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then cp -R /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives/* /scratch/fnadeau/cima-q/1633992344//project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives-cima-q/ ; fi
exit $fmriprep_exitcode
|
#!/usr/bin/env sh
# {{ cookiecutter.template_file_comment }}
# Version: {{ cookiecutter._version }}
set -eu -o pipefail
rm -rf tmp/
mkdir -p tmp/
themes_settings=$(find src -depth -mindepth 2 -maxdepth 2 -type f -name settings.yaml)
for settings_path in $themes_settings; do
# Get just the theme name by removing first and last part of the settings_path.
theme_name=${settings_path##src/}
theme_name=${theme_name%/settings.yaml}
# Generate the tmp/${theme_name}/settings.custom-properties-selector.css file.
theo $settings_path \
--setup custom-properties-selector.cjs \
--transform web \
--format custom-properties-selector.css \
--dest tmp/$theme_name
done
|
#!/usr/bin/env bash
export LC_ALL=C
KNOWN_VIOLATIONS=(
"src/base58.cpp:.*isspace"
"src/isocoin-tx.cpp.*stoul"
"src/isocoin-tx.cpp.*trim_right"
"src/isocoin-tx.cpp:.*atoi"
"src/core_read.cpp.*is_digit"
"src/dbwrapper.cpp.*stoul"
"src/dbwrapper.cpp:.*vsnprintf"
"src/httprpc.cpp.*trim"
"src/init.cpp:.*atoi"
"src/netbase.cpp.*to_lower"
"src/qt/rpcconsole.cpp:.*atoi"
"src/qt/rpcconsole.cpp:.*isdigit"
"src/rest.cpp:.*strtol"
"src/rpc/server.cpp.*to_upper"
"src/test/dbwrapper_tests.cpp:.*snprintf"
"src/test/getarg_tests.cpp.*split"
"src/torcontrol.cpp:.*atoi"
"src/torcontrol.cpp:.*strtol"
"src/uint256.cpp:.*isspace"
"src/uint256.cpp:.*tolower"
"src/util.cpp:.*atoi"
"src/util.cpp:.*fprintf"
"src/util.cpp:.*tolower"
"src/utilmoneystr.cpp:.*isdigit"
"src/utilmoneystr.cpp:.*isspace"
"src/utilstrencodings.cpp:.*atoi"
"src/utilstrencodings.cpp:.*isspace"
"src/utilstrencodings.cpp:.*strtol"
"src/utilstrencodings.cpp:.*strtoll"
"src/utilstrencodings.cpp:.*strtoul"
"src/utilstrencodings.cpp:.*strtoull"
"src/utilstrencodings.h:.*atoi"
)
REGEXP_IGNORE_EXTERNAL_DEPENDENCIES="^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/)"
LOCALE_DEPENDENT_FUNCTIONS=(
alphasort # LC_COLLATE (via strcoll)
asctime # LC_TIME (directly)
asprintf # (via vasprintf)
atof # LC_NUMERIC (via strtod)
atoi # LC_NUMERIC (via strtol)
atol # LC_NUMERIC (via strtol)
atoll # (via strtoll)
atoq
btowc # LC_CTYPE (directly)
ctime # (via asctime or localtime)
dprintf # (via vdprintf)
fgetwc
fgetws
fold_case # boost::locale::fold_case
fprintf # (via vfprintf)
fputwc
fputws
fscanf # (via __vfscanf)
fwprintf # (via __vfwprintf)
getdate # via __getdate_r => isspace // __localtime_r
getwc
getwchar
is_digit # boost::algorithm::is_digit
is_space # boost::algorithm::is_space
isalnum # LC_CTYPE
isalpha # LC_CTYPE
isblank # LC_CTYPE
iscntrl # LC_CTYPE
isctype # LC_CTYPE
isdigit # LC_CTYPE
isgraph # LC_CTYPE
islower # LC_CTYPE
isprint # LC_CTYPE
ispunct # LC_CTYPE
isspace # LC_CTYPE
isupper # LC_CTYPE
iswalnum # LC_CTYPE
iswalpha # LC_CTYPE
iswblank # LC_CTYPE
iswcntrl # LC_CTYPE
iswctype # LC_CTYPE
iswdigit # LC_CTYPE
iswgraph # LC_CTYPE
iswlower # LC_CTYPE
iswprint # LC_CTYPE
iswpunct # LC_CTYPE
iswspace # LC_CTYPE
iswupper # LC_CTYPE
iswxdigit # LC_CTYPE
isxdigit # LC_CTYPE
localeconv # LC_NUMERIC + LC_MONETARY
mblen # LC_CTYPE
mbrlen
mbrtowc
mbsinit
mbsnrtowcs
mbsrtowcs
mbstowcs # LC_CTYPE
mbtowc # LC_CTYPE
mktime
normalize # boost::locale::normalize
# printf # LC_NUMERIC
putwc
putwchar
scanf # LC_NUMERIC
setlocale
snprintf
sprintf
sscanf
stod
stof
stoi
stol
stold
stoll
stoul
stoull
strcasecmp
strcasestr
strcoll # LC_COLLATE
# strerror
strfmon
strftime # LC_TIME
strncasecmp
strptime
strtod # LC_NUMERIC
strtof
strtoimax
strtol # LC_NUMERIC
strtold
strtoll
strtoq
strtoul # LC_NUMERIC
strtoull
strtoumax
strtouq
strxfrm # LC_COLLATE
swprintf
to_lower # boost::locale::to_lower
to_title # boost::locale::to_title
to_upper # boost::locale::to_upper
tolower # LC_CTYPE
toupper # LC_CTYPE
towctrans
towlower # LC_CTYPE
towupper # LC_CTYPE
trim # boost::algorithm::trim
trim_left # boost::algorithm::trim_left
trim_right # boost::algorithm::trim_right
ungetwc
vasprintf
vdprintf
versionsort
vfprintf
vfscanf
vfwprintf
vprintf
vscanf
vsnprintf
vsprintf
vsscanf
vswprintf
vwprintf
wcrtomb
wcscasecmp
wcscoll # LC_COLLATE
wcsftime # LC_TIME
wcsncasecmp
wcsnrtombs
wcsrtombs
wcstod # LC_NUMERIC
wcstof
wcstoimax
wcstol # LC_NUMERIC
wcstold
wcstoll
wcstombs # LC_CTYPE
wcstoul # LC_NUMERIC
wcstoull
wcstoumax
wcswidth
wcsxfrm # LC_COLLATE
wctob
wctomb # LC_CTYPE
wctrans
wctype
wcwidth
wprintf
)
function join_array {
local IFS="$1"
shift
echo "$*"
}
REGEXP_IGNORE_KNOWN_VIOLATIONS=$(join_array "|" "${KNOWN_VIOLATIONS[@]}")
# Invoke "git grep" only once in order to minimize run-time
REGEXP_LOCALE_DEPENDENT_FUNCTIONS=$(join_array "|" "${LOCALE_DEPENDENT_FUNCTIONS[@]}")
GIT_GREP_OUTPUT=$(git grep -E "[^a-zA-Z0-9_\`'\"<>](${REGEXP_LOCALE_DEPENDENT_FUNCTIONS}(|_r|_s))[^a-zA-Z0-9_\`'\"<>]" -- "*.cpp" "*.h")
EXIT_CODE=0
for LOCALE_DEPENDENT_FUNCTION in "${LOCALE_DEPENDENT_FUNCTIONS[@]}"; do
MATCHES=$(grep -E "[^a-zA-Z0-9_\`'\"<>]${LOCALE_DEPENDENT_FUNCTION}(|_r|_s)[^a-zA-Z0-9_\`'\"<>]" <<< "${GIT_GREP_OUTPUT}" | \
grep -vE "\.(c|cpp|h):\s*(//|\*|/\*|\").*${LOCALE_DEPENDENT_FUNCTION}" | \
grep -vE 'fprintf\(.*(stdout|stderr)')
if [[ ${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES} != "" ]]; then
MATCHES=$(grep -vE "${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES}" <<< "${MATCHES}")
fi
if [[ ${REGEXP_IGNORE_KNOWN_VIOLATIONS} != "" ]]; then
MATCHES=$(grep -vE "${REGEXP_IGNORE_KNOWN_VIOLATIONS}" <<< "${MATCHES}")
fi
if [[ ${MATCHES} != "" ]]; then
echo "The locale dependent function ${LOCALE_DEPENDENT_FUNCTION}(...) appears to be used:"
echo "${MATCHES}"
echo
EXIT_CODE=1
fi
done
if [[ ${EXIT_CODE} != 0 ]]; then
echo "Unnecessary locale dependence can cause bugs that are very"
echo "tricky to isolate and fix. Please avoid using locale dependent"
echo "functions if possible."
echo
echo "Advice not applicable in this specific case? Add an exception"
echo "by updating the ignore list in $0"
fi
exit ${EXIT_CODE}
|
#!/bin/bash
### AWS CLI code for the EC2 with S3 lab from the acloud.guru AWS
### Certified Develper Associate course
# turn off history expansion
set +H
# Go home
region="us-east-1"
aws configure set default.region $region
# Bucket names must be globally unique, change accordingly
bucketName='acloudguru1234-jmm'
# The ARN of the desired policy
s3FullAccessArn=arn:aws:iam::aws:policy/AmazonS3FullAccess
## Create Role
# This Lab has a hard dependency on the EC2 instance created in the EC2101 lab:
# https://gist.github.com/JohnMichaelMiller/fe2c0a4d743f6f6c02fe6a5b28169b54
echo $publicDNS $instanceId
# AmazonS3FullAccess Policy
ec2AssumeRolePolicy=$( cat <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
)
testScript=$( cat <<EOF
aws s3 ls
echo "Hello Cloud Gurus 2" > hello2.txt
aws s3 mb s3://$bucketName
aws s3 cp hello2.txt s3://$bucketName
aws s3 ls s3://$bucketName
EOF
)
# MyS3AdminAccess Role
aws iam create-role --role-name MyS3AdminAccess --assume-role-policy-document "$ec2AssumeRolePolicy"
# Show that S3 cannot be accessed from instance
# SSH into EC2 Instance
ssh -i "~/.ssh/$keyName.pem" "ec2-user@$publicDNS"
aws s3 ls # Expected Error: Unable to locate credentials. You can configure credentials by running "aws configure".
exit
# Apply Role to EC2 Instance
# Attach IAM Role MyS3AdminAccess
# Attach policy to role
aws iam attach-role-policy --role-name MyS3AdminAccess --policy-arn $s3FullAccessArn
# Create an instance profile
aws iam create-instance-profile --instance-profile-name MyS3AdminAccess-Instance-Profile
# Add the role to the instance profile
aws iam add-role-to-instance-profile --role-name MyS3AdminAccess --instance-profile-name MyS3AdminAccess-Instance-Profile
# Associate the iam instance profile with the instance
iip=$(aws ec2 associate-iam-instance-profile --instance-id $instanceId --iam-instance-profile Name=MyS3AdminAccess-Instance-Profile | jq -r ".IamInstanceProfileAssociation.AssociationId")
aws ec2 describe-iam-instance-profile-associations
# Show access to S3
# SSH into EC2 Instance
ssh -i "~/.ssh/$keyName.pem" "ec2-user@$publicDNS" "$testScript"
# Teardown
# aws ec2 associate-iam-instance-profile
aws ec2 disassociate-iam-instance-profile --association-id $iip
# aws iam add-role-to-instance-profile
aws iam remove-role-from-instance-profile --role-name MyS3AdminAccess --instance-profile-name MyS3AdminAccess-Instance-Profile
# aws iam create-instance-profile
aws iam delete-instance-profile --instance-profile-name MyS3AdminAccess-Instance-Profile
# aws iam attach-role-policy
aws iam detach-role-policy --role-name MyS3AdminAccess --policy-arn $s3FullAccessArn
# aws iam create-role
aws iam delete-role --role-name MyS3AdminAccess
# Include cleanup from the EC2-101 lab:
# https://gist.github.com/JohnMichaelMiller/fe2c0a4d743f6f6c02fe6a5b28169b54
#See: https://aws.amazon.com/blogs/security/new-attach-an-aws-iam-role-to-an-existing-amazon-ec2-instance-by-using-the-aws-cli/
# This code is not idempotent. It assumes that none of these
# resources exists in the default vpc. It does try and clean up
# after itself. It is also not intended to be run as a command.
# The intent is to run each section or snippet in conjunction
# with the appropriate section of the lab. However, it should
# run attended but this hasn't been tested. This script assumes
# that none of the requisite AWS resources exist. To use existing
# resources assign the AWS resources identifiers to the appropriate
# vars and comment out the related code.
# MIT License
# Copyright (c) 2018 John Michael Miller
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
#!/bin/sh
# ubuntu
# check that owner group exists
if [ -z `getent group spinnaker` ]; then
groupadd spinnaker
fi
# check that user exists
if [ -z `getent passwd spinnaker` ]; then
useradd --gid spinnaker spinnaker -m --home-dir /home/spinnaker
fi
install --mode=755 --owner=spinnaker --group=spinnaker --directory /var/log/spinnaker/rush
|
for executable in `cat commands.txt`; do
echo $executable
rm $executable
done
rm -rf activate*
rm -rf deactivate*
rm commands.txt
rm *.sif
|
#!/bin/sh
# Check if the container has already been started
cscli machines list | grep 127.0.0.1
if [ $? == 1 ]; then
cscli machines add --force --auto -f /etc/crowdsec/local_api_credentials.yaml
fi
# registration to online API for signal push
if [ "$DISABLE_ONLINE_API" == "" ] && [ "$CONFIG_FILE" == "" ] ; then
CONFIG_EXIST=$(yq eval '.api.server.online_client | has("credentials_path")' /etc/crowdsec/config.yaml)
if [ "$CONFIG_EXIST" != "true" ]; then
yq eval '.api.server.online_client = {"credentials_path": "/etc/crowdsec/online_api_credentials.yaml"}' /etc/crowdsec/config.yaml > /etc/crowdsec/config2.yaml
mv /etc/crowdsec/config2.yaml /etc/crowdsec/config.yaml
cscli capi register > /etc/crowdsec/online_api_credentials.yaml
fi
fi
# crowdsec sqlite database permissions
if [ "$GID" != "" ]; then
IS_SQLITE=$(yq eval '.db_config.type == "sqlite"' /etc/crowdsec/config.yaml)
DB_PATH=$(yq eval '.db_config.db_path' /etc/crowdsec/config.yaml)
if [ "$IS_SQLITE" == "true" ]; then
chown :$GID $DB_PATH
fi
fi
## Install collections, parsers & scenarios
cscli hub update
cscli collections upgrade crowdsecurity/linux
if [ "$COLLECTIONS" != "" ]; then
cscli collections install $COLLECTIONS
fi
if [ "$PARSERS" != "" ]; then
cscli parsers install $PARSERS
fi
if [ "$SCENARIOS" != "" ]; then
cscli scenarios install $SCENARIOS
fi
if [ "$POSTOVERFLOWS" != "" ]; then
cscli postoverflows install $POSTOVERFLOWS
fi
ARGS=""
if [ "$CONFIG_FILE" != "" ]; then
ARGS="-c $CONFIG_FILE"
fi
if [ "$FILE_PATH" != "" ]; then
ARGS="$ARGS -file $FILE"
fi
if [ "$JOURNALCTL_FILTER" != "" ]; then
ARGS="$ARGS -jfilter $JOURNALCTL_FILTER"
fi
if [ "$TYPE" != "" ]; then
ARGS="$ARGS -type $TYPE"
fi
if [ "$TEST_MODE" == "true" ] || [ "$TEST_MODE" == "TRUE" ]; then
ARGS="$ARGS -t"
fi
if [ "$DISABLE_AGENT" == "true" ] || [ "$DISABLE_AGENT" == "TRUE" ]; then
ARGS="$ARGS -no-cs"
fi
if [ "$DISABLE_API" == "true" ] || [ "$DISABLE_API" == "TRUE" ]; then
ARGS="$ARGS -no-api"
fi
if [ "$LEVEL_TRACE" == "true" ] || [ "$LEVEL_TRACE" == "TRUE" ]; then
ARGS="$ARGS -trace"
fi
if [ "$LEVEL_DEBUG" == "true" ] || [ "$LEVEL_DEBUG" == "TRUE" ]; then
ARGS="$ARGS -debug"
fi
if [ "$LEVEL_INFO" == "true" ] || [ "$LEVEL_INFO" == "TRUE" ]; then
ARGS="$ARGS -info"
fi
exec crowdsec $ARGS
|
#!/bin/bash
###############################################################################
# Build docker image. #
###############################################################################
# ---------------------------------------------------------------------------#
# --- PREAMBLE --------------------------------------------------------------#
# ---------------------------------------------------------------------------#
## sanity check -- list all required tools here
read -r -d '' tool_reqs <<- EOM
dirname
docker
id
whoami
EOM
while read tool; do
if [ ! -x "$(command -v $tool)" ]; then
## print error using the shell builtin echo command
echo "Required tool '${tool}' not found or not executable!" >&2
exit 2
fi
done < <(echo "$tool_reqs")
# ---------------------------------------------------------------------------#
# --- MAIN ------------------------------------------------------------------#
# ---------------------------------------------------------------------------#
## change directory to the one this script is placed in
cd "$(dirname "$0")"
## go up one directory
cd ../
## variables
docker_image_config_file='./docker/docker-image.config'
. "$docker_image_config_file"
## sanity checks
for cfg_opt in \
'DOCKER_IMAGE_VENDOR' \
'DOCKER_IMAGE_NAME' \
'DOCKER_IMAGE_VERSION'
do
cfg_opt_val=$(eval "echo \${${cfg_opt}}")
if [ -z "${cfg_opt_val}" ]; then
echo "** Please set the '$cfg_opt' option in file '$docker_image_config_file'." >&2
exit 1
fi
done
## construct Docker image name
docker_image_fullname="${DOCKER_IMAGE_VENDOR}/${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_VERSION}"
## set variables
container_user="$(whoami)"
container_uid="$(id -u)"
container_gid="$(id -g)"
## build container
docker build \
`#--build-arg "user=${container_user}"` \
--build-arg "uid=${container_uid}" \
--build-arg "gid=${container_gid}" \
-t "$docker_image_fullname" \
.
|
#!/bin/sh
#BSUB -q hpc
#BSUB -J train_wifi
#BSUB -n 16
#BSUB -W 8:00
#BSUB -B
#BSUB -N
#BSUB -R span[hosts=1]
#BSUB -R "rusage[mem=4GB]"
#BSUB -o logs/%J.out
#BSUB -e logs/%J.err
module load python3/3.8.4
python3 src/train_model.py --n-epochs=500 wifi wifi-model-cpu
|
#!/usr/bin/env bash
set -Eeuo pipefail
declare -A external_network=(
['openstack-kuryr']='external'
['openstack-vexxhost']='public'
['openstack-vh-mecha']='external'
['openstack']='external'
)
declare -A compute_flavor=(
['openstack-kuryr']='m1.xlarge'
['openstack-vexxhost']='ci.m1.xlarge'
['openstack-vh-mecha']='m1.xlarge'
['openstack']='m1.s2.xlarge'
)
if [[ -z "${OPENSTACK_EXTERNAL_NETWORK:-}" ]]; then
if [[ -z "${CLUSTER_TYPE:-}" ]]; then
echo 'Set CLUSTER_TYPE or OPENSTACK_EXTERNAL_NETWORK'
exit 1
fi
if ! [[ -v external_network[$CLUSTER_TYPE] ]]; then
echo "OPENSTACK_EXTERNAL_NETWORK value for CLUSTER_TYPE '$CLUSTER_TYPE' not known."
exit 1
fi
OPENSTACK_EXTERNAL_NETWORK="${external_network[$CLUSTER_TYPE]}"
fi
if [[ -z "${OPENSTACK_COMPUTE_FLAVOR:-}" ]]; then
if [[ -z "${CLUSTER_TYPE:-}" ]]; then
echo 'Set CLUSTER_TYPE or OPENSTACK_COMPUTE_FLAVOR'
exit 1
fi
if ! [[ -v compute_flavor[$CLUSTER_TYPE] ]]; then
echo "OPENSTACK_COMPUTE_FLAVOR value for CLUSTER_TYPE '$CLUSTER_TYPE' not known."
exit 1
fi
OPENSTACK_COMPUTE_FLAVOR="${compute_flavor[$CLUSTER_TYPE]}"
fi
cat <<< "$OPENSTACK_EXTERNAL_NETWORK" > "${SHARED_DIR}/OPENSTACK_EXTERNAL_NETWORK"
cat <<< "$OPENSTACK_COMPUTE_FLAVOR" > "${SHARED_DIR}/OPENSTACK_COMPUTE_FLAVOR"
# We have to truncate cluster name to 14 chars, because there is a limitation in the install-config
# Now it looks like "ci-op-rl6z646h-65230".
# We will remove "ci-op-" prefix from there to keep just last 14 characters. and it cannot start with a "-"
UNSAFE_CLUSTER_NAME="${NAMESPACE}-${JOB_NAME_HASH}"
cat <<< "${UNSAFE_CLUSTER_NAME#"ci-op-"}" > "${SHARED_DIR}/CLUSTER_NAME"
|
#!/bin/bash
set -euo pipefail
# checksum file
scheme='Branch-xcframework-noidfa'
checksum_file=checksum_noidfa.txt
zip_file=Branch_noidfa.zip
scriptname=$(basename "${BASH_SOURCE[0]}")
scriptpath="${BASH_SOURCE[0]}"
scriptpath=$(cd "$(dirname "${scriptpath}")" && pwd)
cd ${scriptpath}/../carthage-files
# Build
echo "Building Branch.xcframework"
xcodebuild -scheme $scheme
# Move to build folder
cd ${scriptpath}/../carthage-files/build
# Zip the SDK files
echo "Zipping Branch.xcframework"
zip -rqy $zip_file Branch.xcframework/
# Checksum the zip file
echo "Creating Branch checksum"
echo '#checksum for Branch on Github' > "$checksum_file"
shasum $zip_file >> $checksum_file
# Move zip file and checksum
mv $zip_file ..
mv $checksum_file ..
# Remove source frameworks
echo "Cleaning up"
rm -rf Branch.xcframework
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2012:1221
#
# Security announcement date: 2012-09-03 13:10:57 UTC
# Script generation date: 2017-01-13 21:16:42 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - java-1.6.0-openjdk.x86_64:1.6.0.0-1.49.1.11.4.el6_3
# - java-1.6.0-openjdk-debuginfo.x86_64:1.6.0.0-1.49.1.11.4.el6_3
# - java-1.6.0-openjdk-demo.x86_64:1.6.0.0-1.49.1.11.4.el6_3
# - java-1.6.0-openjdk-devel.x86_64:1.6.0.0-1.49.1.11.4.el6_3
# - java-1.6.0-openjdk-javadoc.x86_64:1.6.0.0-1.49.1.11.4.el6_3
# - java-1.6.0-openjdk-src.x86_64:1.6.0.0-1.49.1.11.4.el6_3
#
# Last versions recommanded by security team:
# - java-1.6.0-openjdk.x86_64:1.6.0.41-1.13.13.1.el6_8
# - java-1.6.0-openjdk-debuginfo.x86_64:1.6.0.41-1.13.13.1.el6_8
# - java-1.6.0-openjdk-demo.x86_64:1.6.0.41-1.13.13.1.el6_8
# - java-1.6.0-openjdk-devel.x86_64:1.6.0.41-1.13.13.1.el6_8
# - java-1.6.0-openjdk-javadoc.x86_64:1.6.0.41-1.13.13.1.el6_8
# - java-1.6.0-openjdk-src.x86_64:1.6.0.41-1.13.13.1.el6_8
#
# CVE List:
# - CVE-2012-0547
# - CVE-2012-1682
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install java-1.6.0-openjdk.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-debuginfo.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-demo.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-devel.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-javadoc.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-src.x86_64-1.6.0.41 -y
|
#!/bin/bash
set -o pipefail
# Run the Flask server, using the pact_provider.py as the app to be able to
# inject the provider_states endpoint
FLASK_APP=tests/pact_provider.py python -m flask run -p 5001 &
FLASK_PID=$!
# Make sure the Flask server is stopped when finished to avoid blocking the port
function teardown {
echo "Tearing down Flask server: ${FLASK_PID}"
kill -9 $FLASK_PID
}
trap teardown EXIT
# Wait a little in case Flask isn't quite ready
sleep 1
VERSION=$1
if [ -z "$VERSION" ];
then
echo "Validating provider locally"
pact-verifier \
--provider-base-url=http://localhost:5001 \
--provider-states-setup-url=http://localhost:5001/_pact/provider_states \
../pacts/userserviceclient-userservice.json
else
echo "Validating against Pact Broker"
pact-verifier \
--provider-base-url=http://localhost:5001 \
--provider-app-version $VERSION \
--pact-url="http://127.0.0.1/pacts/provider/UserService/consumer/UserServiceClient/latest" \
--pact-broker-username pactbroker \
--pact-broker-password pactbroker \
--publish-verification-results \
--provider-states-setup-url=http://localhost:5001/_pact/provider_states
fi
|
#!/bin/bash
set -eo pipefail
pod trunk push --allow-warnings MQTTClientGJ.podspec
pod trunk push --allow-warnings CourierCore.podspec
pod trunk push --allow-warnings --synchronous CourierMQTT.podspec
pod trunk push --allow-warnings --synchronous CourierProtobuf.podspec |
# in theory, this command should work:
# allennlp train experiments/venue_classifier.json -s tmp/venue/out_dir --include-package text_mortality
# the reason is that `import text_mortality` works
python -c "import sys; print(sys.path)"
|
#!/usr/bin/env sh
set -x
export success=true
find . -iname '*.rs' | while read -r file; do
result=$(grep '.\{79\}' "${file}" | grep --invert 'http')
if [ "${result}" = "" ]
then
:
else
echo "file \"${file}\": $result"
exit 1
fi
done
|
#!/bin/bash
docker build . -t jhoppensteadt/castnow:$(date +%F) -t jhoppensteadt/castnow:latest
|
#!/usr/bin/env bash
set -u
ENV=$1
GRAPH_CONFIG=$2
SAVE_FILE="$(dirname $GRAPH_CONFIG)"
python -m topological_nav.tools.build_nav_graph \
--env="${ENV}" \
--graph_config="${GRAPH_CONFIG}" \
--save_file="${SAVE_FILE}/graph.pickle" \
#| tee "$(dirname $GRAPH_CONFIG)/build_log.txt"
|
#!/bin/bash
REPOS=" \
baremetal-operator \
cluster-api-provider-metal3 \
hardware-classification-controller \
ip-address-manager \
ironic-agent-image \
ironic-client \
ironic-image \
ironic-hardware-inventory-recorder-image \
ironic-ipa-downloader \
metal3-io.github.io \
metal3-dev-env \
metal3-docs \
metal3-helm-chart \
project-infra \
static-ip-manager-image \
"
all_owners_raw() {
for repo in $REPOS; do
if [ "$repo" = "metal3-io.github.io" ]; then
filter='.filters.".*".approvers'
else
filter='.approvers'
fi
curl -s "https://raw.githubusercontent.com/metal3-io/$repo/main/OWNERS" | \
yq -y $filter | \
grep -v "null" | \
grep -v "\.\.\."
done
}
echo "# All approvers from all top-level OWNERS files"
echo "# See metal3-docs/maintainers/all-owners.sh"
echo
echo "approvers:"
all_owners_raw | \
tr '[:upper:]' '[:lower:]' | \
sort -u
|
#! /bin/sh -e
###############################################################################
#
# Licensed Materials - Property of IBM
#
# (C) Copyright IBM Corp. 2022. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
#
###############################################################################
## Load default configuration values
. ./config.sh
## Initial banner
echo
echo Installing and configuring Logging Stack...
## Create Namespace for the Elasticsearch Operator.
echo
echo Creating Namespace for the Elasticsearch Operator
cp eo-namespace.template.yaml eo-namespace.yaml
oc apply -f eo-namespace.yaml
## Create Namespace for the Cluster Logging Operator
echo
echo Creating Namespace for the Cluster Logging Operator
cp clo-namespace.template.yaml clo-namespace.yaml
oc apply -f clo-namespace.yaml
## Create Operator Group object
echo
echo Creating Operator Group object for Elasticsearch Operator
cp eo-operatorgroup.template.yaml eo-operatorgroup.yaml
oc apply -f eo-operatorgroup.yaml
## Create Subscription object
echo
echo Creating Subscription object for Elasticsearch Operator
cp eo-subscription.template.yaml eo-subscription.yaml
oc apply -f eo-subscription.yaml
## Create Operator Group object
echo
echo Creating Create an Operator Group for Cluster Logging Operator
cp clo-operatorgroup.template.yaml clo-operatorgroup.yaml
oc apply -f clo-operatorgroup.yaml
## Create Subscription object
echo
echo Creating Subscription object for Cluster Logging Operator
cp clo-subscription.template.yaml clo-subscription.yaml
oc apply -f clo-subscription.yaml
## Wait for cluster resource definition to be created before continuing
WAIT_TIME=10
TRIES=0
# the next line is a little obscure but basically we are storing the number of instances found by grep.
# zero means it could not find anything. Not finding anything means the custom resource has not been defined by the operator
# just yet. Using || true after the grep command ensures that the script will not exit
# since we are using -e flag and grep returns a non zero return code if the string we are greping for is not found.
CRD=$(oc get crd | grep -c clusterloggings || true)
echo
echo Waiting up to five minutes for CRD clusterloggings.logging.openshift.io to be available
date
while [ $CRD -eq 0 ] && [ $TRIES -lt 30 ]
do
sleep $WAIT_TIME
TRIES=$(( $TRIES + 1 ))
CRD=$(oc get crd | grep -c clusterloggings || true )
done
if [ $CRD -eq 0 ]
then
echo Could not find custom resource clusterloggings.logging.openshift.io. Please check the state of the ClusterLogging operator.
exit 1
else
echo Found custom resource clusterloggings.logging.openshift.io.
fi
## Replacing template values for Cluster Logging Instance
cp clo-instance.template.yaml clo-instance.yaml
sed -i.bak "s|paramElasticSearchNodeCount|$paramElasticSearchNodeCount|g" clo-instance.yaml
sed -i.bak "s|paramClusterLoggingStorageClass|$paramClusterLoggingStorageClass|g" clo-instance.yaml
sed -i.bak "s|paramFluentdBufferTotalLimitSize|$paramFluentdBufferTotalLimitSize|g" clo-instance.yaml
sed -i.bak "s|paramFluentDBufferFlushThreadCount|$paramFluentDBufferFlushThreadCount|g" clo-instance.yaml
## Create a Cluster Logging instance.
echo
echo Creating Cluster Logging Instance
oc apply -f clo-instance.yaml
## Wait for ElasticSearch PODs to be Running
echo
echo Waiting up to 10 minutes for all $paramElasticSearchNodeCount elasticsearch PODs to be in Running state.
date
WAIT_TIME=10
TRIES=0
ES_PODS_RUNNING=$(oc get pods -l component=elasticsearch --no-headers -n openshift-logging 2>/dev/null | grep -c Running || true)
while [ $ES_PODS_RUNNING -ne $paramElasticSearchNodeCount ] && [ $TRIES -lt 60 ]
do
sleep 10
TRIES=$(( $TRIES + 1 ))
ES_PODS_RUNNING=$(oc get pods -l component=elasticsearch --no-headers -n openshift-logging 2>/dev/null | grep -c Running || true)
done
if [ $ES_PODS_RUNNING -eq $paramElasticSearchNodeCount ]
then
echo All $paramElasticSearchNodeCount elastic search PODs are now Running.
else
echo Timed out waiting for elastic search PODs to be in Running state. Exiting...
exit 1
fi
## Wait for ElasticSearch Cluster to be Ready
echo
echo Waiting up to 10 minutes for elastic search cluster to be ready.
date
WAIT_TIME=10
TRIES=0
# Find ES pod needed to run command
ES_POD=$(oc get pods -l component=elasticsearch --no-headers -n openshift-logging | cut -d " " -f1 | head -1)
# Looking for ES cluster to report green. Using internal es_cluster_health command found in ES pod to find out status
ES_CLUSTER_UP=$(oc exec -n openshift-logging -c elasticsearch $ES_POD -- es_cluster_health | grep status | grep green || true)
while [ -z "$ES_CLUSTER_UP" ] && [ $TRIES -lt 60 ]
do
sleep 10
TRIES=$(( $TRIES + 1 ))
ES_CLUSTER_UP=$(oc exec -n openshift-logging -c elasticsearch $ES_POD -- es_cluster_health 2>/dev/null | grep status | grep green || true)
done
if [ -z "$ES_CLUSTER_UP" ]
then
echo Timed out waiting for elastic search cluster to be ready. Exiting...
exit 1
else
echo Elastic search cluster is ready.
fi
## Clean up elastic search jobs that may have been triggered before ES cluster was ready.
echo
echo Cleaning failed premature cron job instances
for failedJob in $(oc get pods -l component=indexManagement --no-headers -n openshift-logging | grep Error | cut -d " " -f1)
do
oc delete pod $failedJob -n openshift-logging
done
## Wait for kibana route to be created
echo
echo Waiting up to 5 minutes for Kibana route to be created
date
KIBANA_ROUTE_NAME=kibana
EXIST=$(oc get route $KIBANA_ROUTE_NAME -n openshift-logging 2>/dev/null | grep -c $KIBANA_ROUTE_NAME || true)
WAIT_TIME=10
TRIES=0
while [ $EXIST -eq 0 ] && [ $TRIES -lt 30 ]
do
echo Could not find route resource for Kibana. Waiting for $WAIT_TIME seconds before trying again
sleep $WAIT_TIME
TRIES=$(( $TRIES + 1 ))
EXIST=$(oc get route $KIBANA_ROUTE_NAME -n openshift-logging 2>/dev/null | grep -c $KIBANA_ROUTE_NAME || true)
done
if [ $EXIST -eq 0 ]
then
echo Could not find route resource for Kibana. Please check status of ClusterLogging instance.
exit 1
else
echo Found route resource for Kibana.
fi
## Get Kibana service endpoint
KIBANA=$(oc get route $KIBANA_ROUTE_NAME -n openshift-logging -o jsonpath='{.spec.host}')
# If we got to this point things have been configured properly.
echo
echo "*********************************************************************************"
echo "********* Logging Stack configured successfully! *********"
echo "********* Please follow manual steps listed below *********"
echo "*********************************************************************************"
echo
## Remaining setup is currently manual
cat << EOF
MANUAL STEPS TO FOLLOW:
- Go to your Kibana instance found here:
https://$KIBANA
- Define the following index patters. Use @timestamp as the timefield for each pattern.
app-*
infra-*
EOF
|
#!/usr/bin/env bash
if [ -n "$(gofmt -l .)" ]; then
echo "Go code is not formatted:"
gofmt -d .
exit 1
fi |
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Groundpluscoin protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 20056. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 20056 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 20056 ! -d ${LOCALNET} -j MARK --set-mark 0x2
|
# Calls ./mvnw if found, otherwise execute the original mvn
mvn-or-mvnw() {
if [ -x ./mvnw ]; then
echo "executing mvnw instead of mvn"
./mvnw "$@"
else
command mvn "$@"
fi
}
# Wrapper function for Maven's mvn command. Based on https://gist.github.com/1027800
mvn-color() {
local BOLD=$(echoti bold)
local TEXT_RED=$(echoti setaf 1)
local TEXT_GREEN=$(echoti setaf 2)
local TEXT_YELLOW=$(echoti setaf 3)
local TEXT_BLUE=$(echoti setaf 4)
local TEXT_WHITE=$(echoti setaf 7)
local RESET_FORMATTING=$(echoti sgr0)
(
# Filter mvn output using sed. Before filtering set the locale to C, so invalid characters won't break some sed implementations
unset LANG
LC_CTYPE=C mvn "$@" | sed \
-e "s/\(\[INFO\]\)\(.*\)/${TEXT_BLUE}${BOLD}\1${RESET_FORMATTING}\2/g" \
-e "s/\(\[DEBUG\]\)\(.*\)/${TEXT_WHITE}${BOLD}\1${RESET_FORMATTING}\2/g" \
-e "s/\(\[INFO\]\ BUILD SUCCESSFUL\)/${BOLD}${TEXT_GREEN}\1${RESET_FORMATTING}/g" \
-e "s/\(\[WARNING\]\)\(.*\)/${BOLD}${TEXT_YELLOW}\1${RESET_FORMATTING}\2/g" \
-e "s/\(\[ERROR\]\)\(.*\)/${BOLD}${TEXT_RED}\1${RESET_FORMATTING}\2/g" \
-e "s/Tests run: \([^,]*\), Failures: \([^,]*\), Errors: \([^,]*\), Skipped: \([^,]*\)/${BOLD}${TEXT_GREEN}Tests run: \1${RESET_FORMATTING}, Failures: ${BOLD}${TEXT_RED}\2${RESET_FORMATTING}, Errors: ${BOLD}${TEXT_RED}\3${RESET_FORMATTING}, Skipped: ${BOLD}${TEXT_YELLOW}\4${RESET_FORMATTING}/g"
# Make sure formatting is reset
echo -ne "${RESET_FORMATTING}"
)
}
# either use orignal mvn or the mvn wrapper
alias mvn="mvn-or-mvnw"
# Run mvn against the pom found in a project's root directory (assumes a git repo)
alias 'mvn!'='mvn -f $(git rev-parse --show-toplevel 2>/dev/null || echo ".")/pom.xml'
# aliases
alias mvnag='mvn archetype:generate'
alias mvnboot='mvn spring-boot:run'
alias mvnc='mvn clean'
alias mvncd='mvn clean deploy'
alias mvnce='mvn clean eclipse:clean eclipse:eclipse'
alias mvnci='mvn clean install'
alias mvncie='mvn clean install eclipse:eclipse'
alias mvncini='mvn clean initialize'
alias mvncist='mvn clean install -DskipTests'
alias mvncisto='mvn clean install -DskipTests --offline'
alias mvncom='mvn compile'
alias mvncp='mvn clean package'
alias mvnct='mvn clean test'
alias mvncv='mvn clean verify'
alias mvncvst='mvn clean verify -DskipTests'
alias mvnd='mvn deploy'
alias mvndocs='mvn dependency:resolve -Dclassifier=javadoc'
alias mvndt='mvn dependency:tree'
alias mvne='mvn eclipse:eclipse'
alias mvnfmt='mvn fmt:format'
alias mvnjetty='mvn jetty:run'
alias mvnp='mvn package'
alias mvnqdev='mvn quarkus:dev'
alias mvns='mvn site'
alias mvnsrc='mvn dependency:sources'
alias mvnt='mvn test'
alias mvntc='mvn tomcat:run'
alias mvntc7='mvn tomcat7:run'
alias mvn-updates='mvn versions:display-dependency-updates'
alias mci='mvn clean install -DskipTests'
alias mcit='mvn clean install -DskipITs'
alias mciit='mvn clean install'
function listMavenCompletions {
local file new_file
local -a profiles POM_FILES modules
# Root POM
POM_FILES=(~/.m2/settings.xml)
# POM in the current directory
if [[ -f pom.xml ]]; then
local file=pom.xml
POM_FILES+=("${file:A}")
fi
# Look for POM files in parent directories
while [[ -n "$file" ]] && grep -q "<parent>" "$file"; do
# look for a new relativePath for parent pom.xml
new_file=$(grep -e "<relativePath>.*</relativePath>" "$file" | sed -e 's/.*<relativePath>\(.*\)<\/relativePath>.*/\1/')
# if <parent> is present but not defined, assume ../pom.xml
if [[ -z "$new_file" ]]; then
new_file="../pom.xml"
fi
# if file doesn't exist break
file="${file:h}/${new_file}"
if ! [[ -e "$file" ]]; then
break
fi
POM_FILES+=("${file:A}")
done
# Get profiles from found files
for file in $POM_FILES; do
[[ -e $file ]] || continue
profiles+=($(sed 's/<!--.*-->//' "$file" | sed '/<!--/,/-->/d' | grep -e "<profile>" -A 1 | grep -e "<id>.*</id>" | sed 's?.*<id>\(.*\)<\/id>.*?-P\1?'))
done
# List modules
modules=($(find **/pom.xml -type f | grep -v '/target/classes/META-INF/' | grep '/pom.xml' |sed 's|\(.*\)/pom\.xml|\1|'))
reply=(
# common lifecycle
clean initialize process-resources compile process-test-resources test-compile test package verify install deploy site
# integration testing
pre-integration-test integration-test
# common plugins
deploy failsafe install site surefire checkstyle javadoc jxr pmd ant antrun archetype assembly dependency enforcer gpg help release repository source eclipse idea jetty cargo jboss tomcat tomcat6 tomcat7 exec versions war ear ejb android scm buildnumber nexus repository sonar license hibernate3 liquibase flyway gwt
# deploy
deploy:deploy-file
# failsafe
failsafe:integration-test failsafe:verify
# install
install:install-file install:help
# site
site:site site:deploy site:run site:stage site:stage-deploy site:attach-descriptor site:jar site:effective-site
# surefire
surefire:test
# checkstyle
checkstyle:checkstyle checkstyle:check checkstyle:checkstyle-aggregate
# javadoc
javadoc:javadoc javadoc:test-javadoc javadoc:javadoc-no-fork javadoc:test-javadoc-no-fork javadoc:aggregate javadoc:test-aggregate javadoc:jar javadoc:test-jar javadoc:aggregate-jar javadoc:test-aggregate-jar javadoc:fix javadoc:test-fix javadoc:resource-bundle javadoc:test-resource-bundle
# jxr
jxr:jxr jxr:aggregate jxr:test-jxr jxr:test-aggregate
# pmd
pmd:pmd pmd:cpd pmd:check pmd:cpd-check
# ant
ant:ant ant:clean
# antrun
antrun:run
# archetype
archetype:generate archetype:create-from-project archetype:crawl
# assembly
assembly:single assembly:assembly
# dependency
dependency:analyze dependency:analyze-dep-mgt dependency:analyze-only dependency:analyze-report dependency:analyze-duplicate dependency:build-classpath dependency:copy dependency:copy-dependencies dependency:display-ancestors dependency:get dependency:go-offline dependency:list dependency:list-repositories dependency:properties dependency:purge-local-repository dependency:resolve dependency:resolve-plugins dependency:sources dependency:tree dependency:unpack dependency:unpack-dependencies
# enforcer
enforcer:enforce enforcer:display-info
# gpg
gpg:sign gpg:sign-and-deploy-file
# help
help:active-profiles help:all-profiles help:describe help:effective-pom help:effective-settings help:evaluate help:expressions help:system
# release
release:clean release:prepare release:prepare-with-pom release:rollback release:perform release:stage release:branch release:update-versions
# jgitflow
jgitflow:feature-start jgitflow:feature-finish jgitflow:release-start jgitflow:release-finish jgitflow:hotfix-start jgitflow:hotfix-finish jgitflow:build-number
# repository
repository:bundle-create repository:bundle-pack
# source
source:aggregate source:jar source:jar-no-fork source:test-jar source:test-jar-no-fork
# eclipse
eclipse:clean eclipse:eclipse
# idea
idea:clean idea:idea
# jetty
jetty:run jetty:run-exploded
# cargo
cargo:start cargo:run cargo:stop cargo:deploy cargo:undeploy cargo:help
# jboss
jboss:start jboss:stop jboss:deploy jboss:undeploy jboss:redeploy
# tomcat
tomcat:start tomcat:stop tomcat:deploy tomcat:undeploy tomcat:redeploy
# tomcat6
tomcat6:run tomcat6:run-war tomcat6:run-war-only tomcat6:stop tomcat6:deploy tomcat6:undeploy
# tomcat7
tomcat7:run tomcat7:run-war tomcat7:run-war-only tomcat7:deploy
# tomee
tomee:run tomee:run-war tomee:run-war-only tomee:stop tomee:deploy tomee:undeploy
# spring-boot
spring-boot:run spring-boot:repackage
# quarkus
quarkus:dev quarkus:list-extensions quarkus:add-extension quarkus:add-extensions quarkus:generate-config quarkus:help
# exec
exec:exec exec:java
# versions
versions:display-dependency-updates versions:display-plugin-updates versions:display-property-updates versions:update-parent versions:update-properties versions:update-child-modules versions:lock-snapshots versions:unlock-snapshots versions:resolve-ranges versions:set versions:use-releases versions:use-next-releases versions:use-latest-releases versions:use-next-snapshots versions:use-latest-snapshots versions:use-next-versions versions:use-latest-versions versions:commit versions:revert
# scm
scm:add scm:bootstrap scm:branch scm:changelog scm:check-local-modification scm:checkin scm:checkout scm:diff scm:edit scm:export scm:list scm:remove scm:status scm:tag scm:unedit scm:update scm:update-subprojects scm:validate
# buildnumber
buildnumber:create buildnumber:create-timestamp buildnumber:help buildnumber:hgchangeset
# war
war:war war:exploded war:inplace war:manifest
# ear
ear:ear ear:generate-application-xml
# ejb
ejb:ejb
# android
android:apk android:apklib android:deploy android:deploy-dependencies android:dex android:emulator-start android:emulator-stop android:emulator-stop-all android:generate-sources android:help android:instrument android:manifest-update android:pull android:push android:redeploy android:run android:undeploy android:unpack android:version-update android:zipalign android:devices
# nexus
nexus:staging-list nexus:staging-close nexus:staging-drop nexus:staging-release nexus:staging-build-promotion nexus:staging-profiles-list nexus:settings-download
# repository
repository:bundle-create repository:bundle-pack repository:help
# sonar
sonar:sonar
# license
license:format license:check
# hibernate3
hibernate3:hbm2ddl hibernate3:help
# liquibase
liquibase:changelogSync liquibase:changelogSyncSQL liquibase:clearCheckSums liquibase:dbDoc liquibase:diff liquibase:dropAll liquibase:help liquibase:migrate liquibase:listLocks liquibase:migrateSQL liquibase:releaseLocks liquibase:rollback liquibase:rollbackSQL liquibase:status liquibase:tag liquibase:update liquibase:updateSQL liquibase:updateTestingRollback
# flyway
flyway:clean flyway:history flyway:init flyway:migrate flyway:status flyway:validate
# gwt
gwt:browser gwt:clean gwt:compile gwt:compile-report gwt:css gwt:debug gwt:eclipse gwt:eclipseTest gwt:generateAsync gwt:help gwt:i18n gwt:mergewebxml gwt:resources gwt:run gwt:sdkInstall gwt:source-jar gwt:soyc gwt:test
# asciidoctor
asciidoctor:process-asciidoc asciidoctor:auto-refresh asciidoctor:http asciidoctor:zip
# compiler
compiler:compile compiler:testCompile
# resources
resources:resources resources:testResources resources:copy-resources
# verifier
verifier:verify
# jar
jar:jar jar:test-jar
# rar
rar:rar
# acr
acr:acr
# shade
shade:shade
# changelog
changelog:changelog changelog:dev-activity changelog:file-activity
# changes
changes:announcement-mail changes:announcement-generate changes:changes-check changes:changes-validate changes:changes-report changes:jira-report changes:trac-report changes:github-report
# doap
doap:generate
# docck
docck:check
# jdeps
jdeps:jdkinternals jdeps:test-jdkinternals
# linkcheck
linkcheck:linkcheck
# project-info-reports
project-info-reports:cim project-info-reports:dependencies project-info-reports:dependency-convergence project-info-reports:dependency-info project-info-reports:dependency-management project-info-reports:distribution-management project-info-reports:help project-info-reports:index project-info-reports:issue-tracking project-info-reports:license project-info-reports:mailing-list project-info-reports:modules project-info-reports:plugin-management project-info-reports:plugins project-info-reports:project-team project-info-reports:scm project-info-reports:summary
# surefire-report
surefire-report:failsafe-report-only surefire-report:report surefire-report:report-only
# invoker
invoker:install invoker:integration-test invoker:verify invoker:run
# jarsigner
jarsigner:sign jarsigner:verify
# patch
patch:apply
# pdf
pdf:pdf
# plugin
plugin:descriptor plugin:report plugin:updateRegistry plugin:addPluginArtifactMetadata plugin:helpmojo
# remote-resources
remote-resources:bundle remote-resources:process
# scm-publish
scm-publish:help scm-publish:publish-scm scm-publish:scmpublish
# stage
stage:copy
# toolchain
toolchain:toolchain
#liberty
liberty:clean-server liberty:compile-jsp liberty:configure-arquillian liberty:create-server liberty:debug liberty:debug-server liberty:deploy liberty:dev liberty:display-url liberty:dump-server liberty:install-apps liberty:install-feature liberty:install-server liberty:java-dump-server liberty:package-server liberty:run liberty:run-server liberty:server-status liberty:start liberty:start-server liberty:status liberty:stop liberty:stop-server liberty:test-start-server liberty:test-stop-server liberty:undeploy liberty:uninstall-feature
# options
"-Dmaven.test.skip=true" -DskipTests -DskipITs -Dmaven.surefire.debug -DenableCiProfile "-Dpmd.skip=true" "-Dcheckstyle.skip=true" "-Dtycho.mode=maven" "-Dmaven.test.failure.ignore=true" "-DgroupId=" "-DartifactId=" "-Dversion=" "-Dpackaging=jar" "-Dfile="
# arguments
-am --also-make
-amd --also-make-dependents-am
-B --batch-mode
-b --builder
-C --strict-checksums
-c --lax-checksums
-cpu --check-plugin-updates
-D --define
-e --errors
-emp --encrypt-master-password
-ep --encrypt-password
-f --file
-fae --fail-at-end
-ff --fail-fast
-fn --fail-never
-gs --global-settings
-gt --global-toolchains
-h --help
-l --log-file
-llr --legacy-local-repository
-N --non-recursive
-npr --no-plugin-registry
-npu --no-plugin-updates
-nsu --no-snapshot-updates
-o --offline
-P --activate-profiles
-pl --projects
-q --quiet
-rf --resume-from
-s --settings
-t --toolchains
-T --threads
-U --update-snapshots
-up --update-plugins
-v --version
-V --show-version
-X --debug
cli:execute cli:execute-phase
archetype:generate generate-sources
cobertura:cobertura
-Dtest=$(if [ -d ./src/test/java ] ; then find ./src/test/java -type f -name '*.java' | grep -v svn | sed 's?.*/\([^/]*\)\..*?-Dtest=\1?' ; fi)
-Dit.test=$(if [ -d ./src/test/java ] ; then find ./src/test/java -type f -name '*.java' | grep -v svn | sed 's?.*/\([^/]*\)\..*?-Dit.test=\1?' ; fi)
$profiles
$modules
)
}
compctl -K listMavenCompletions mvn mvnw
compctl -K listMavenCompletions mvn-color
compctl -K listMavenCompletions mvn-or-mvnw
|
#!/bin/sh
set -e
./get-source.sh v2
# TODO: Fix conflicts with emulator patches; revert this patch for now
patch -d work -R <Oberon2013Modifications/FontConversion/RemoveGlyphWidthLimit.patch
patch -d work <../ProposedPatches/log-allocation-failures.patch -F 3
patch -d work <../ProposedPatches/better-display-compatibility.patch
patch -d work <../ProposedPatches/filesystem-encapsulation.patch
./apply-emulator-patches.sh
cp OberonFromScratch.Tool.JavaScript.txt work/OberonFromScratch.Tool.txt
patch -d work <paravirtualized-keyboard.patch
patch -d work -F 3 <paravirtualized-disk.patch
patch -d work <power-management.patch
patch -d work <power-management-keyboard-unresponsive.patch
patch -d work <reduce-filesystem-offset.patch
patch -d work <js-bigmem-display.patch
./derive-files.sh v2
patch -d work <fix-js-start-offset.patch
[ -z "$1" ] && exit 0
./compile-image.sh "$1" FullDiskImage 'MB=?' MinimalDiskImage
|
pkg_origin=habitat
pkg_name=builder-datastore
pkg_internal_version=11.2
pkg_internal_name=postgresql11
pkg_description="Datastore service for a Habitat Builder service"
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_license=("PostgreSQL")
pkg_source="https://ftp.postgresql.org/pub/source/v${pkg_internal_version}/postgresql-${pkg_internal_version}.tar.bz2"
pkg_shasum="2676b9ce09c21978032070b6794696e0aa5a476e3d21d60afc036dc0a9c09405"
pkg_dirname="postgresql-${pkg_internal_version}"
pkg_deps=(
core/bash
core/glibc
core/openssl
core/perl
core/readline
core/zlib
core/libossp-uuid
)
pkg_build_deps=(
core/coreutils
core/gcc
core/make
core/git
)
pkg_bin_dirs=(bin)
pkg_include_dirs=(include)
pkg_lib_dirs=(lib)
pkg_exports=(
[port]=port
)
pkg_exposes=(port)
pkg_version() {
# TED: After migrating the builder repo we needed to add to
# the rev-count to keep version sorting working
echo "$(($(git rev-list master --count) + 5000))"
}
ext_semver_version=0.17.0
ext_semver_source=https://github.com/theory/pg-semver/archive/v${ext_semver_version}.tar.gz
ext_semver_filename=pg-semver-${ext_semver_version}.tar.gz
ext_semver_shasum=031046695b143eb545a2856c5d139ebf61ae4e2f68cccb1f21b700ce65d0cd60
do_before() {
update_pkg_version
ext_semver_dirname="pg-semver-${ext_semver_version}"
ext_semver_cache_path="$HAB_CACHE_SRC_PATH/${ext_semver_dirname}"
}
do_download() {
do_default_download
download_file $ext_semver_source $ext_semver_filename $ext_semver_shasum
}
do_verify() {
do_default_verify
verify_file $ext_semver_filename $ext_semver_shasum
}
do_clean() {
do_default_clean
rm -rf "$ext_semver_cache_path"
}
do_unpack() {
do_default_unpack
unpack_file $ext_semver_filename
}
do_build() {
# shellcheck disable=2154
# ld manpage: "If -rpath is not used when linking an ELF
# executable, the contents of the environment variable LD_RUN_PATH
# will be used if it is defined"
./configure --disable-rpath \
--with-openssl \
--prefix="$pkg_prefix" \
--with-uuid=ossp \
--with-includes="$LD_INCLUDE_PATH" \
--with-libraries="$LD_LIBRARY_PATH" \
--sysconfdir="$pkg_svc_config_path" \
--localstatedir="$pkg_svc_var_path"
make world
# semver can't be built until after postgresql is installed to $pkg_prefix
}
do_install() {
make install-world
# make and install semver extension
export PATH="${PATH}:${pkg_prefix}/bin"
build_line "Added postgresql binaries to PATH: ${pkg_prefix}/bin"
pushd "$ext_semver_cache_path" > /dev/null || exit
build_line "Building ${ext_semver_dirname}"
make
build_line "Installing ${ext_semver_dirname}"
make install
popd > /dev/null || exit
}
|
#!/bin/ksh
# +----------------------------------------------------------------------------+
# | Jeffrey M. Hunter |
# | [email protected] |
# | www.idevelopment.info |
# |----------------------------------------------------------------------------|
# | Copyright (c) 1998-2015 Jeffrey M. Hunter. All rights reserved. |
# |----------------------------------------------------------------------------|
# | DATABASE : Oracle |
# | FILE : iscsi-ls-map.ksh |
# | CLASS : UNIX Shell Scripts |
# | PURPOSE : Script used to generate a full mapping of iSCSI target names |
# | to local SCSI device names. |
# | PARAMETERS : None. |
# | |
# | EXAMPLE |
# | OUTPUT : Host / SCSI ID SCSI Device Name iSCSI Target Name |
# | ---------------- ------------------------ ----------------- |
# | 0 /dev/sda1 asm4 |
# | 1 /dev/sdb1 asm3 |
# | 2 /dev/sdd1 asm2 |
# | 3 /dev/sdc1 asm1 |
# | 4 /dev/sde1 crs |
# | |
# | NOTE : As with any code, ensure to test this script in a development |
# | environment before attempting to run it in production. |
# +----------------------------------------------------------------------------+
RUN_USERID=root
export RUN_USERID
RUID=`id | awk -F\( '{print $2}'|awk -F\) '{print $1}'`
if [[ ${RUID} != "$RUN_USERID" ]];then
echo " "
echo "You must be logged in as $RUN_USERID to run this script."
echo "Exiting script."
echo " "
exit 1
fi
dmesg | grep "^Attach" \
| awk -F" " '{ print "/dev/"$4 "1 " $6 }' \
| sed -e 's/,//' | sed -e 's/scsi//' \
| sort -n -k2 \
| sed -e '/disk1/d' > /tmp/tmp_scsi_dev
iscsi-ls | egrep -e "TARGET NAME" -e "HOST ID" \
| awk -F" " '{ if ($0 ~ /^TARGET.*/) printf $4; if ( $0 ~ /^HOST/) printf " %s\n",$4}' \
| sort -n -k2 \
| cut -d':' -f2- \
| cut -d'.' -f2- > /tmp/tmp_scsi_targets
join -t" " -1 2 -2 2 /tmp/tmp_scsi_dev /tmp/tmp_scsi_targets > MAP
echo "Host / SCSI ID SCSI Device Name iSCSI Target Name"
echo "---------------- ------------------------ -----------------"
cat MAP | sed -e 's/ / /g'
rm -f MAP
|
export [email protected]
export MAIL_PASSWORD=51201003@77
export SECRET_KEY= 'pitch'
python3.6 manage.py server |
#!/bin/bash
set -e
set -x
apt-get -qq update
apt-get install -y dwz wget make git gcc curl jq
dwz --version
version=$1
arch=$2
curl 'https://golang.org/dl/?mode=json&include=all' | jq '.[].version' --raw-output | egrep ^go$version'($|\.|beta|rc)' | head -1 | cut -c3-
echo "Go $version on $arch"
export GOROOT=/usr/local/go/"$version"
if [ ! -d "$GOROOT" ]; then
wget -q https://dl.google.com/go/"${version}".linux-"${arch}".tar.gz
mkdir -p /usr/local/go
tar -C /usr/local/go -xzf "${version}".linux-"${arch}".tar.gz
mv -f /usr/local/go/go "$GOROOT"
fi
GOPATH=$(pwd)/go
export GOPATH
export PATH=$PATH:$GOROOT/bin:$GOPATH/bin
go version
uname -a
echo "$PATH"
echo "$GOROOT"
echo "$GOPATH"
cd delve
make test
|
#!/bin/sh
# micro-integrator.sh
# ----------------------------------------------------------------------------
# Copyright 2018 WSO2, Inc. http://www.wso2.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cygwin=false;
darwin=false;
os400=false;
mingw=false;
case "`uname`" in
CYGWIN*) cygwin=true;;
MINGW*) mingw=true;;
OS400*) os400=true;;
Darwin*) darwin=true
if [ -z "$JAVA_VERSION" ] ; then
JAVA_VERSION="CurrentJDK"
else
echo "Using Java version: $JAVA_VERSION"
fi
if [ -z "$JAVA_HOME" ] ; then
JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/${JAVA_VERSION}/Home
fi
;;
esac
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
# Get standard environment variables
PRGDIR=`dirname "$PRG"`
# Only set CARBON_HOME if not already set
[ -z "$CARBON_HOME" ] && CARBON_HOME=`cd "$PRGDIR/.." ; pwd`
# Set AXIS2_HOME. Needed for One Click JAR Download
AXIS2_HOME="$CARBON_HOME"
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin; then
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CARBON_HOME" ] && CARBON_HOME=`cygpath --unix "$CARBON_HOME"`
[ -n "$AXIS2_HOME" ] && CARBON_HOME=`cygpath --unix "$CARBON_HOME"`
fi
# For OS400
if $os400; then
# Set job priority to standard for interactive (interactive - 6) by using
# the interactive priority - 6, the helper threads that respond to requests
# will be running at the same priority as interactive jobs.
COMMAND='chgjob job('$JOBNAME') runpty(6)'
system $COMMAND
# Enable multi threading
QIBM_MULTI_THREADED=Y
export QIBM_MULTI_THREADED
fi
# For Migwn, ensure paths are in UNIX format before anything is touched
if $mingw ; then
[ -n "$CARBON_HOME" ] &&
CARBON_HOME="`(cd "$CARBON_HOME"; pwd)`"
[ -n "$JAVA_HOME" ] &&
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
[ -n "$AXIS2_HOME" ] &&
CARBON_HOME="`(cd "$CARBON_HOME"; pwd)`"
# TODO classpath?
fi
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD=java
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly."
echo " CARBON cannot execute $JAVACMD"
exit 1
fi
# if JAVA_HOME is not set we're not happy
if [ -z "$JAVA_HOME" ]; then
echo "You must set the JAVA_HOME variable before running CARBON."
exit 1
fi
if [ -e "$CARBON_HOME/wso2carbon.pid" ]; then
PID=`cat "$CARBON_HOME"/wso2carbon.pid`
fi
# ----- Process the input command ----------------------------------------------
args=""
for c in $*
do
if [ "$c" = "--debug" ] || [ "$c" = "-debug" ] || [ "$c" = "debug" ]; then
CMD="--debug"
continue
elif [ "$CMD" = "--debug" ]; then
if [ -z "$PORT" ]; then
PORT=$c
fi
elif [ "$c" = "--stop" ] || [ "$c" = "-stop" ] || [ "$c" = "stop" ]; then
CMD="stop"
elif [ "$c" = "--start" ] || [ "$c" = "-start" ] || [ "$c" = "start" ]; then
CMD="start"
elif [ "$c" = "--version" ] || [ "$c" = "-version" ] || [ "$c" = "version" ]; then
CMD="version"
elif [ "$c" = "--restart" ] || [ "$c" = "-restart" ] || [ "$c" = "restart" ]; then
CMD="restart"
else
args="$args $c"
fi
done
if [ "$CMD" = "--debug" ]; then
if [ "$PORT" = "" ]; then
echo " Please specify the debug port after the --debug option"
exit 1
fi
if [ -n "$JAVA_OPTS" ]; then
echo "Warning !!!. User specified JAVA_OPTS will be ignored, once you give the --debug option."
fi
CMD="RUN"
JAVA_OPTS="-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=$PORT"
echo "Please start the remote debugging client to continue..."
elif [ "$CMD" = "start" ]; then
if [ -e "$CARBON_HOME/wso2carbon.pid" ]; then
if ps -p $PID > /dev/null ; then
echo "Process is already running"
exit 0
fi
fi
export CARBON_HOME="$CARBON_HOME"
# using nohup sh to avoid erros in solaris OS.TODO
nohup sh "$CARBON_HOME"/bin/micro-integrator.sh $args > /dev/null 2>&1 &
exit 0
elif [ "$CMD" = "stop" ]; then
export CARBON_HOME="$CARBON_HOME"
kill -term `cat "$CARBON_HOME"/wso2carbon.pid`
exit 0
elif [ "$CMD" = "restart" ]; then
export CARBON_HOME="$CARBON_HOME"
kill -term `cat "$CARBON_HOME"/wso2carbon.pid`
process_status=0
pid=`cat "$CARBON_HOME"/wso2carbon.pid`
while [ "$process_status" -eq "0" ]
do
sleep 1;
ps -p$pid 2>&1 > /dev/null
process_status=$?
done
# using nohup sh to avoid erros in solaris OS.TODO
nohup sh "$CARBON_HOME"/bin/micro-integrator.sh $args > /dev/null 2>&1 &
exit 0
elif [ "$CMD" = "version" ]; then
cat "$CARBON_HOME"/bin/version.txt
cat "$CARBON_HOME"/bin/wso2carbon-version.txt
exit 0
fi
# ---------- Handle the SSL Issue with proper JDK version --------------------
java_version=$("$JAVACMD" -version 2>&1 | awk -F '"' '/version/ {print $2}')
java_version_formatted=$(echo "$java_version" | awk -F. '{printf("%02d%02d",$1,$2);}')
if [ $java_version_formatted -lt 0107 ] || [ $java_version_formatted -gt 1100 ]; then
echo " Starting WSO2 MI (in unsupported JDK)"
echo " [ERROR] MI is supported only on JDK 1.8, 9, 10 and 11"
fi
CARBON_XBOOTCLASSPATH=""
for f in "$CARBON_HOME"/wso2/lib/xboot/*.jar
do
if [ "$f" != "$CARBON_HOME/wso2/lib/xboot/*.jar" ];then
CARBON_XBOOTCLASSPATH="$CARBON_XBOOTCLASSPATH":$f
fi
done
CARBON_CLASSPATH=""
if [ -e "$JAVA_HOME/lib/tools.jar" ]; then
CARBON_CLASSPATH="$JAVA_HOME/../lib/tools.jar"
fi
for f in "$CARBON_HOME"/bin/*.jar
do
if [ "$f" != "$CARBON_HOME/bin/*.jar" ];then
CARBON_CLASSPATH="$CARBON_CLASSPATH":$f
fi
done
for t in "$CARBON_HOME"/wso2/lib/*.jar
do
CARBON_CLASSPATH="$CARBON_CLASSPATH":$t
done
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
JAVA_HOME=`cygpath --absolute --windows "$JAVA_HOME"`
CARBON_HOME=`cygpath --absolute --windows "$CARBON_HOME"`
AXIS2_HOME=`cygpath --absolute --windows "$CARBON_HOME"`
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
CARBON_CLASSPATH=`cygpath --path --windows "$CARBON_CLASSPATH"`
CARBON_XBOOTCLASSPATH=`cygpath --path --windows "$CARBON_XBOOTCLASSPATH"`
fi
# ----- Execute The Requested Command -----------------------------------------
# echo JAVA_HOME environment variable is set to $JAVA_HOME
# echo CARBON_HOME environment variable is set to "$CARBON_HOME"
cd "$CARBON_HOME"
TMP_DIR="$CARBON_HOME"/tmp
if [ -d "$TMP_DIR" ]; then
rm -rf "$TMP_DIR"/*
fi
START_EXIT_STATUS=121
status=$START_EXIT_STATUS
if [ -z "$JVM_MEM_OPTS" ]; then
java_version=$("$JAVACMD" -version 2>&1 | awk -F '"' '/version/ {print $2}')
JVM_MEM_OPTS="-Xms256m -Xmx1024m"
if [ "$java_version" \< "1.8" ]; then
JVM_MEM_OPTS="$JVM_MEM_OPTS"
fi
fi
# echo "Using Java memory options: $JVM_MEM_OPTS"
#setting up profile parameter for runtime in MB
PROFILE_SELECTED="false"
for i in "$@"; do
if echo "$i" | grep -q "Dprofile"; then
PROFILE_SELECTED="true"
fi
done
if [ "$PROFILE_SELECTED" = false ] ; then
NODE_PARAMS="$NODE_PARAMS -Dprofile=micro-integrator-default"
fi
#To monitor a Carbon server in remote JMX mode on linux host machines, set the below system property.
# -Djava.rmi.server.hostname="your.IP.goes.here"
JAVA_VER_BASED_OPTS=""
if [ $java_version_formatted -ge 1100 ]; then
JAVA_VER_BASED_OPTS="--add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens java.rmi/sun.rmi.transport=ALL-UNNAMED"
fi
while [ "$status" = "$START_EXIT_STATUS" ]
do
$JAVACMD \
-Xbootclasspath/a:"$CARBON_XBOOTCLASSPATH" \
$JVM_MEM_OPTS \
-XX:+HeapDumpOnOutOfMemoryError \
-XX:HeapDumpPath="$CARBON_HOME/repository/logs/heap-dump.hprof" \
$JAVA_OPTS \
-Dcom.sun.management.jmxremote \
-classpath "$CARBON_CLASSPATH" \
-Djava.io.tmpdir="$CARBON_HOME/tmp" \
-Dcatalina.base="$CARBON_HOME/wso2/lib/tomcat" \
-Dwso2.server.standalone=true \
-Dcarbon.registry.root=/ \
-Djava.command="$JAVACMD" \
-Dqpid.conf="/conf/advanced/" \
$JAVA_VER_BASED_OPTS \
-Dcarbon.home="$CARBON_HOME" \
-Dlogger.server.name="micro-integrator" \
-Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager \
-Dcarbon.config.dir.path="$CARBON_HOME/conf" \
-Dcarbon.repository.dir.path="$CARBON_HOME/repository" \
-Dcarbon.components.dir.path="$CARBON_HOME/wso2/components" \
-Dcarbon.dropins.dir.path="$CARBON_HOME/dropins" \
-Dcarbon.external.lib.dir.path="$CARBON_HOME/lib" \
-Dcarbon.patches.dir.path="$CARBON_HOME/patches" \
-Dcarbon.internal.lib.dir.path="$CARBON_HOME/wso2/lib" \
-Dei.extendedURIBasedDispatcher=org.wso2.micro.integrator.core.handlers.IntegratorStatefulHandler \
-Djava.util.logging.config.file="$CARBON_HOME/conf/etc/logging-bridge.properties" \
-Dcomponents.repo="$CARBON_HOME/wso2/components/plugins" \
-Dconf.location="$CARBON_HOME/conf" \
-Dcom.atomikos.icatch.file="$CARBON_HOME/wso2/lib/transactions.properties" \
-Dcom.atomikos.icatch.hide_init_file_path=true \
-Dorg.apache.jasper.compiler.Parser.STRICT_QUOTE_ESCAPING=false \
-Dorg.apache.jasper.runtime.BodyContentImpl.LIMIT_BUFFER=true \
-Dcom.sun.jndi.ldap.connect.pool.authentication=simple \
-Dcom.sun.jndi.ldap.connect.pool.timeout=3000 \
-Dorg.terracotta.quartz.skipUpdateCheck=true \
-Djava.security.egd=file:/dev/./urandom \
-Dfile.encoding=UTF8 \
-Djava.net.preferIPv4Stack=true \
-DNonRegistryMode=true \
-DNonUserCoreMode=true \
-Dcom.ibm.cacheLocalHost=true \
-Dcarbon.use.registry.repo=false \
-DworkerNode=false \
-Dorg.apache.cxf.io.CachedOutputStream.Threshold=104857600 \
-Djavax.xml.xpath.XPathFactory:http://java.sun.com/jaxp/xpath/dom=net.sf.saxon.xpath.XPathFactoryImpl \
-DavoidConfigHashRead=true \
-Dproperties.file.path=default \
-DenableReadinessProbe=true \
-DenablePrometheusApi=true \
$NODE_PARAMS \
-Dorg.apache.activemq.SERIALIZABLE_PACKAGES="*" \
org.wso2.micro.integrator.bootstrap.Bootstrap $*
status=$?
done
|
#! /bin/bash
lnav_test="${top_builddir}/src/lnav-test"
export HOME="./meta-sessions"
export XDG_CONFIG_HOME="./meta-sessions/.config"
rm -rf "./meta-sessions"
mkdir -p $HOME/.config
run_test ${lnav_test} -n -dln.dbg \
-c ":comment Hello, World!" \
-c ":tag foo" \
-c ":save-session" \
-c ":write-screen-to -" \
${test_dir}/logfile_access_log.0
check_output ":tag did not work?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
// Hello, World!
-- #foo
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
ls -lha meta-sessions
find meta-sessions
cat ln.dbg
if test ! -d meta-sessions/.config/lnav; then
echo "error: configuration not stored in .config/lnav?"
exit 1
fi
if test -d meta-sessions/.lnav; then
echo "error: configuration stored in .lnav?"
exit 1
fi
run_test ${lnav_test} -n \
-c ":load-session" \
-c ";UPDATE access_log SET log_mark = 1" \
-c ":write-to -" \
${test_dir}/logfile_access_log.0
check_output "tag was not saved in session?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
// Hello, World!
-- #foo
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
run_test ${lnav_test} -n \
-c ":load-session" \
-c ":untag #foo" \
${test_dir}/logfile_access_log.0
check_output ":untag did not work?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
+ Hello, World!
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
run_test ${lnav_test} -n \
-c ":load-session" \
-c ":clear-comment" \
${test_dir}/logfile_access_log.0
check_output ":clear-comment did not work?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
+ #foo
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
run_test ${lnav_test} -n \
-c ":goto 2" \
-c "/foo" \
-c ":tag #foo" \
-c ":goto 0" \
-c ":next-mark search" \
${test_dir}/logfile_access_log.0
check_output "searching for a tag did not work?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
+ #foo
EOF
run_test ${lnav_test} -n \
-c ":load-session" \
-c ";SELECT log_line, log_comment, log_tags FROM access_log" \
${test_dir}/logfile_access_log.0
check_output "metadata columns are not working?" <<EOF
log_line log_comment log_tags
0 Hello, World! ["#foo"]
1 <NULL> <NULL>
2 <NULL> <NULL>
EOF
run_test ${lnav_test} -n \
-c ";UPDATE access_log SET log_tags = json_array('#foo', '#foo') WHERE log_line = 1" \
-c ":save-session" \
${test_dir}/logfile_access_log.0
check_output "updating log_tags is not working?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
+ #foo
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
run_test ${lnav_test} -n \
-c ";UPDATE access_log SET log_comment = 'Hello, World!' WHERE log_line = 1" \
${test_dir}/logfile_access_log.0
check_output "updating log_comment is not working?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
+ Hello, World!
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
run_test ${lnav_test} -n \
-c ";UPDATE access_log SET log_tags = 1 WHERE log_line = 1" \
${test_dir}/logfile_access_log.0
check_error_output "updating log_tags is not working?" <<EOF
command-option:1: error: command-option:line 1
unexpected JSON value
accepted paths --
<tag> -- A tag for the log line
EOF
run_test ${lnav_test} -n \
-c ";UPDATE access_log SET log_tags = json_array('foo') WHERE log_line = 1" \
-c ":save-session" \
${test_dir}/logfile_access_log.0
check_error_output "updating log_tags is not working?" <<EOF
command-option:1: error: Value does not match pattern: ^#[^\s]+$
EOF
run_test ${lnav_test} -n \
-c ":load-session" \
-c ";SELECT log_tags FROM access_log WHERE log_line = 1" \
${test_dir}/logfile_access_log.0
check_output "log_tags was updated?" <<EOF
log_tags
["#foo"]
EOF
run_test ${lnav_test} -n \
-c ":tag foo" \
-c ":delete-tags #foo" \
${test_dir}/logfile_access_log.0
check_output ":delete-tags does not work?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
run_test ${lnav_test} -n \
-c ":tag foo" \
-c ";UPDATE access_log SET log_tags = null" \
${test_dir}/logfile_access_log.0
check_output "clearing log_tags is not working?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
run_test ${lnav_test} -n \
-c ":comment foo" \
-c ";UPDATE access_log SET log_comment = null" \
${test_dir}/logfile_access_log.0
check_output "clearing log_tags is not working?" <<EOF
192.168.202.254 - - [20/Jul/2009:22:59:26 +0000] "GET /vmw/cgi/tramp HTTP/1.0" 200 134 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkboot.gz HTTP/1.0" 404 46210 "-" "gPXE/0.9.7"
192.168.202.254 - - [20/Jul/2009:22:59:29 +0000] "GET /vmw/vSphere/default/vmkernel.gz HTTP/1.0" 200 78929 "-" "gPXE/0.9.7"
EOF
|
# setup default editor
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='nvim'
fi
|
#!/bin/bash
serverExe="${1}"
deploymentDir=${2}
port=${3}
logFilePrefix="${deploymentDir}/logs/${serverExe}"
datePrefix=$(date +"%Y.%m.%d_%H.%M.%S")
cat << EOF
echo "Checking log dir"
if [ ! -d "${deploymentDir}/logs" ]; then
mkdir "${deploymentDir}/logs"
fi
if [ -f "${logFilePrefix}.log" ] ; then
mv "${logFilePrefix}.log" "${logFilePrefix}_${datePrefix}.log"
fi
touch "${logFilePrefix}.log"
nohup "./${serverExe}" serve --port ${port} > "${logFilePrefix}.log" 2>&1 &
sleep 5s
#Check if server has started
netstat -ntpl | grep "[0-9]*:${1:-8000}"
if lsof -i:8000
then
echo "${serverExe} started, processID: "
ps cax | grep "${serverExe}" | grep -o '^[ ]*[0-9]*'
else
echo "Failed to start ${serverExe}"
fi
EOF
|
#!/bin/sh
# Portions Copyright (c) 2020 Microsoft Corporation
# Overview:
# The verity-mount module is responsible for mounting a dm-verity protected read-only
# root file system. (see https://gitlab.com/cryptsetup/cryptsetup/-/wikis/DMVerity)
# To load a dm-verity disk both a hash tree and root hash must be available. The
# verity-mount module may load the hash tree from a device or as a file inside the
# initramfs. The root hash is expected as a file in the initramfs.
#
# Error Correction:
# Optionally forward error correction (FEC) may also be used. dm-verity will use the
# FEC to patch any corrupted data at run time (but will not repair the underlying data).
# Error correction normally happens only as required (when blocks are read). The
# rd.verityroot.validateonboot argument will force a full validation of all blocks
# at boot and print any issues as dracut warnings (This can take several minutes if
# the disk is degraded)
#
# Signing:
# The expectation is that the initramfs (and its enclosed root hash) will be signed.
# The root hash can then be trusted because the initramfs was validated during boot.
# dm-verity also supports cryptographically signing the root hash, the signature file is
# expected to be part of the initramfs and will be validated against the kernel key-ring.
#
# Overlays:
# Many packages expect to be able to write files to disk during day-to-day operations. To
# accomodate these programs the verity-mount module can create tmpfs overlays in targeted
# locations. These overlays are not persistant and will be created fresh on every boot.
#
# Debugging:
# The verity-mount module will mount a read-only view of the tmpfs overlays into
# rd.verityroot.overlays_debug_mount=/path/to/mount if set. This is an easy way to see
# what files are being modified during runtime.
# Parameters:
# Required:
# rd.verityroot.devicename=desired_device_mapper_name
# rd.verityroot.hashtree=/path/to/hashtree | <DEVICE_TYPE>=<DEVICE_ID>
# rd.verityroot.roothash=<SHA256_HASH>
# or
# rd.verityroot.roothashfile=/path/to/roothash
# Optional
# rd.verityroot.roothashsig=/path/to/file
# rd.verityroot.verityerrorhandling=ignore|restart|panic
# rd.verityroot.validateonboot=true/false
# rd.verityroot.fecdata=/path/to/fecdata | <DEVICE_TYPE>=<DEVICE_ID>
# rd.verityroot.fecroots=#
# rd.verityroot.overlays="/path/to/overlay/directory /other/path"
# rd.verityroot.overlays_debug_mount=/path/to/mount/debug/info
# Make sure we have dracut-lib and loaded
type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
# Look for a root device parameter of the form: root=verityroot:<DEVICE_TYPE>=<DEVICE_ID>
[ -z "$root" ] && root=$(getarg root=)
if [ "${root%%:*}" = "verityroot" ] ; then
verityroot=$root
fi
# Bail early if no 'verityroot' root is found
[ "${verityroot%%:*}" = "verityroot" ] || exit 0
# Get all other required parameters
[ -z "${veritydevicename}" ] && veritydevicename=$(getarg rd.verityroot.devicename=)
[ -n "${veritydevicename}" ] || veritydevicename="verity_root"
[ -z "${verityhashtree}" ] && verityhashtree=$(getarg rd.verityroot.hashtree=)
[ -z "${verityroothash}" ] && verityroothash=$(getarg rd.verityroot.roothash=)
[ -z "${verityroothashfile}" ] && verityroothashfile=$(getarg rd.verityroot.roothashfile=)
# Get the optional parameters
[ -z "${verityroothashsig}" ] && verityroothashsig=$(getarg rd.verityroot.roothashsig=)
[ -z "${verityerrorhandling}" ] && verityerrorhandling=$(getarg rd.verityroot.verityerrorhandling=)
[ -z "${validateonboot}" ] && validateonboot=$(getarg rd.verityroot.validateonboot=)
[ -z "${verityfecdata}" ] && verityfecdata=$(getarg rd.verityroot.fecdata=)
[ -z "${verityfecroots}" ] && verityfecroots=$(getarg rd.verityroot.fecroots=)
[ -z "${verityoverlays}" ] && overlays=$(getarg rd.verityroot.overlays=)
[ -z "${verityoverlaysize}" ] && overlaysize=$(getarg rd.verityroot.overlaysize=)
[ -z "${overlays_debug_mount}" ] && overlays_debug_mount=$(getarg rd.verityroot.overlays_debug_mount=)
# Check the required parameters are pressent
[ -n "${veritydevicename}" ] || die "verityroot requires rd.verityroot.devicename="
[ -n "${verityhashtree}" ] || die "verityroot requires rd.verityroot.hashtree="
[ -n "${verityroothash}" ] || [ -n "${verityroothashfile}" ] || die "verityroot requires rd.verityroot.roothash= or rd.verityroot.roothashfile="
[ -n "${verityroothash}" -a -n "${verityroothashfile}" ] && die "verityroot does not support using both rd.verityroot.roothash= and rd.verityroot.roothashfile= at the same time"
# Validate the optional paramters
# Make sure we have either both or neither FEC arguments (xor)
[ -n "${verityfecdata}" -a -z "${verityfecroots}" ] && die "verityroot FEC requires both rd.verityroot.fecdata= and rd.verityroot.fecroots="
[ -z "${verityfecdata}" -a -n "${verityfecroots}" ] && die "verityroot FEC requires both rd.verityroot.fecdata= and rd.verityroot.fecroots="
# Make sure we have set an overlay size if we are using overlays
if [ -n "${verityoverlays}" ]; then
[ -z "${verityoverlaysize}" ] && die "verityroot rd.verityroot.overlaysize= must be set if using rd.verityroot.overlays="
fi
# Check we have a valid error handling option
if [ -n "${verityerrorhandling}" ]; then
[ "${verityerrorhandling}" == "ignore" -o \
"${verityerrorhandling}" == "restart" -o \
"${verityerrorhandling}" == "panic" ] || die "verityroot rd.verityroot.verityerrorhandling= must be one of [ignore,restart,panic]"
fi
# Same for full validation during boot option
if [ -n "${validateonboot}" ]; then
[ "${validateonboot}" == "true" -o \
"${validateonboot}" == "false" ] || die "verityroot rd.verityroot.validateonboot= must be one of [true,false]"
fi
# dracut-functions.sh is only available during initramfs creation,
# keep a copy of this function here.
expand_persistent_dev() {
local _dev=$1
case "$_dev" in
LABEL=*)
_dev="/dev/disk/by-label/${_dev#LABEL=}"
;;
UUID=*)
_dev="${_dev#UUID=}"
_dev="${_dev,,}"
_dev="/dev/disk/by-uuid/${_dev}"
;;
PARTUUID=*)
_dev="${_dev#PARTUUID=}"
_dev="${_dev,,}"
_dev="/dev/disk/by-partuuid/${_dev}"
;;
PARTLABEL=*)
_dev="/dev/disk/by-partlabel/${_dev#PARTLABEL=}"
;;
esac
printf "%s" "$_dev"
}
# Get paths to the various devices/files we might need to wait for.
veritydisk=$(expand_persistent_dev "${verityroot#verityroot:}")
verityhashtree=$(expand_persistent_dev "${verityhashtree}")
verityroothashfile=$(expand_persistent_dev "${verityroothashfile}")
verityfecdata=$(expand_persistent_dev "${verityfecdata}")
info "Going to try to mount '$verityroot' with '$verityhashtree' and '$verityroothash$verityroothashfile'"
rootok=1
unset root
root="${verityroot}"
# Queue up a wait for each device/file
if [ "${root%%:*}" = "verityroot" ]; then
for _dev in ${veritydisk} ${verityhashtree} ${verityroothashfile} ${verityfecdata}; do
wait_for_dev "${_dev}"
done
fi
|
#!/bin/bash
#
# University of Luxembourg
# Laboratory of Algorithmics, Cryptology and Security (LACS)
#
# FELICS - Fair Evaluation of Lightweight Cryptographic Systems
#
# Copyright (C) 2015 University of Luxembourg
#
# Written in 2015 by Daniel Dinu <[email protected]>
#
# This file is part of FELICS.
#
# FELICS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# FELICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Display help information
function display_help()
{
echo ""
echo " Call this script to get the results"
echo " ./collect_ciphers_metrics.sh [{-h|--help}] [--version] [{-f|--format}=[0|1|2|3|4|5]] [{-a|--architectures}=['PC AVR MSP ARM']] [{-s|--scenarios}=['0 1 2']] [{-c|--ciphers}=['Cipher1 Cipher2 ...']] [{-p|--prefix}='...'] [{-co|--compiler_options}='...'] [{-i|incremental}=[0|1]]"
echo ""
echo " Options:"
echo " -h, --help"
echo " Display help information"
echo " --version"
echo " Display version information"
echo " -f, --format"
echo " Specifies which output format to use"
echo " 0 - use all output formats below"
echo " 1 - raw table"
echo " 2 - MediaWiki table"
echo " 3 - XML table"
echo " 4 - LaTeX table"
echo " 5 - CSV table"
echo " Default: 0"
echo " -a, --architectures"
echo " Specifies for which archiectures to get the results"
echo " List of values: 'PC AVR MSP ARM'"
echo " Default: all architectures"
echo " -s, --scenarios"
echo " Specifies for which scenarios to get the results"
echo " List of values: '0 1 2'"
echo " Default: all scenarios"
echo " -c, --ciphers"
echo " Specifies for which ciphers to get the results"
echo " List of values: 'CipherName_BlockSizeInBits_KeySizeInBits_v01 ...'"
echo " Default: all ciphers"
echo " -p, --prefix"
echo " Specifies the results file prefix"
echo " Default: current date in 'YYYY_mm_dd' format"
echo " -co,--compiler_options"
echo " Specifies the compiler options"
echo " List of values: '-O3 --param max-unroll-times=5 --param max-unrolled-insns=100 ...'"
echo " Default: all compiler options"
echo " -i, --incremental"
echo " Specifies if script should use an incremntal strategy (collect results just for new and modified implementations since the last run)"
echo " 0 - do not use incrmental strategy"
echo " 1 - use incremental strategy"
echo " Default: 1"
echo ""
echo " Examples:"
echo " ./get_results.sh -f=0"
echo " ./get_results.sh --format=1"
echo " ./get_results.sh -a='PC AVR' --scenarios=\"1 2\""
echo ""
exit
}
|
#! /bin/bash
i=0
KEY=truv
echo "Putting key of: ${KEY}-${i}"
aws --profile=ceph --endpoint=http://localhost:8000 s3api put-object --bucket test-bucket --key ${KEY}-${i} --body ./64KiB_object.bin
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2583-1
#
# Security announcement date: 2012-12-08 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:28 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - iceweasel:3.5.16-20
#
# Last versions recommanded by security team:
# - iceweasel:3.5.16-20
#
# CVE List:
# - CVE-2012-4201
# - CVE-2012-4207
# - CVE-2012-4216
# - CVE-2012-5829
# - CVE-2012-5842
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade iceweasel=3.5.16-20 -y
|
#!/bin/bash
FN="STexampleData_1.2.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/experiment/src/contrib/STexampleData_1.2.0.tar.gz"
"https://bioarchive.galaxyproject.org/STexampleData_1.2.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-stexampledata/bioconductor-stexampledata_1.2.0_src_all.tar.gz"
)
MD5="45f1bb4d91fa2163420cebc576474ac7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
#!/bin/bash
#########################################################################################
# License information
#########################################################################################
# Copyright 2018 Jamf Professional Services
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#########################################################################################
# General Information
#########################################################################################
# This script is designed to make implementation of DEPNotify very easy with limited
# scripting knowledge. The section below has variables that may be modified to customize
# the end user experience. DO NOT modify things in or below the CORE LOGIC area unless
# major testing and validation is performed.
# More information at: https://github.com/jamfprofessionalservices/DEP-Notify
#########################################################################################
# Variables to Modify
#########################################################################################
# Testing flag will enable the following things to change:
# - Auto removal of BOM files to reduce errors
# - Sleep commands instead of polcies being called
# - Quit Key set to command + control + x
TESTING_MODE=false # Set variable to true or false
# Flag the app to open fullscreen or as a window
FULLSCREEN=true # Set variable to true or false
# Banner image can be 600px wide by 100px high. Images will be scaled to fit
# If this variable is left blank, the generic image will appear
BANNER_IMAGE_PATH="/Applications/Self Service.app/Contents/Resources/AppIcon.icns"
# Flag for using the custom branding icon from Self Service and Jamf Pro
# This will override the banner image specified above
SELF_SERVICE_CUSTOM_BRANDING=true # Set variable to true or false
# Main heading that will be displayed under the image
# If this variable is left blank, the generic banner will appear
BANNER_TITLE="Welcome to Tenenz, Inc."
# Paragraph text that will display under the main heading. For a new line, use \n
# this variable is left blank, the generic message will appear. Leave single
# quotes below as double quotes will break the new line.
MAIN_TEXT='Thanks for joining us at Tenenz! We want you to have a few applications and settings configured before you get started with your new Mac. This process should take 10 to 20 minutes to complete. \n \n If you need addtional software or help, please visit the Self Service app in your Applications folder or on your Dock.'
# URL for support or help that will open when the ? is clicked
# If this variable is left blank, the ? will not appear
# If using fullscreen mode, Safari will be launched behind the DEP Notify window
SUPPORT_URL=
# Initial Start Status text that shows as things are firing up
INITAL_START_STATUS="Initial Configuration Starting..."
# EULA configuration
# CURRENTLY BROKEN - seeing issues with the EULA and contiune buttons
EULA_ENABLED=false # Set variable to true or false
# The policy array must be formatted "Progress Bar text,customTrigger". These will be
# run in order as they appear below.
POLICY_ARRAY=(
"Setting Timezone...,set_timezone"
"Naming Mac...,name_mac"
"Installing Utilities...,dockutil"
"Installing Utilities...,desktoppr"
"Installing Java for Mac...,java"
"Installing FileMaker Pro Advanced 18...,filemaker18"
"Installing Slack...,slack"
"Installing Microsoft Teams...,teams"
"Adding Printers,install_printers"
"Swabbing Deck...,set_dock_items_whs"
"Polishing Apple...,set_tenenz_wallpaper"
"Updating Inventory...,dep_update_inventory"
)
# Text that will display in the progress bar
INSTALL_COMPLETE_TEXT="Setup Complete!"
# Script designed to automatically logout user to start FileVault process if
# deferred enablement is detected. Text displayed if deferred status is on.
FV_LOGOUT_TEXT="Your Mac must logout to start the encryption process. You will be asked to enter your password and click OK or Contiune a few times. Your Mac will be usable while encryption takes place."
# Text that will display inside the alert once policies have finished
COMPLETE_ALERT_TEXT="Your Mac is now finished with initial setup and configuration. Press Quit to get started!"
#########################################################################################
# Core Script Logic - Don't Change Without Major Testing
#########################################################################################
# Variables for File Paths
JAMF_BINARY="/usr/local/bin/jamf"
FDE_SETUP_BINARY="/usr/bin/fdesetup"
DEP_NOTIFY_APP="/Applications/Utilities/DEPNotify.app"
DEP_NOTIFY_CONFIG="/var/tmp/depnotify.log"
DEP_NOTIFY_DONE="/var/tmp/com.depnotify.provisioning.done"
DEP_NOTIFY_EULA="/var/tmp/com.depnotify.agreement.done"
TMP_DEBUG_LOG="/var/tmp/depNotifyDebug.log"
# Validating true/false flags
if [ "$TESTING_MODE" != true ] && [ "$TESTING_MODE" != false ]; then
echo "$(date "+%a %h %d %H:%M:%S"): Testing configuration not set properly. Currently set to '$TESTING_MODE'. Please update to true or false." >> "$TMP_DEBUG_LOG"
exit 1
fi
if [ "$FULLSCREEN" != true ] && [ "$FULLSCREEN" != false ]; then
echo "$(date "+%a %h %d %H:%M:%S"): Fullscreen configuration not set properly. Currently set to '$FULLSCREEN'. Please update to true or false." >> "$TMP_DEBUG_LOG"
exit 1
fi
if [ "$EULA_ENABLED" != true ] && [ "$EULA_ENABLED" != false ]; then
echo "$(date "+%a %h %d %H:%M:%S"): EULA configuration not set properly. Currently set to '$EULA_ENABLED'. Please update to true or false." >> "$TMP_DEBUG_LOG"
exit 1
fi
# Run DEP Notify will run after Apple Setup Assistant and must be run as the end user.
SETUP_ASSISTANT_PROCESS=$(pgrep -l "Setup Assistant")
until [ "$SETUP_ASSISTANT_PROCESS" = "" ]; do
echo "$(date "+%a %h %d %H:%M:%S"): Setup Assistant Still Running. PID $SETUP_ASSISTANT_PROCESS." >> "$TMP_DEBUG_LOG"
sleep 1
SETUP_ASSISTANT_PROCESS=$(pgrep -l "Setup Assistant")
done
# Checking to see if the Finder is running now before continuing. This can help
# in scenarios where an end user is not configuring the device.
FINDER_PROCESS=$(pgrep -l "Finder")
until [ "$FINDER_PROCESS" != "" ]; do
echo "$(date "+%a %h %d %H:%M:%S"): Finder process not found. Assuming device is at login screen." >> "$TMP_DEBUG_LOG"
sleep 1
FINDER_PROCESS=$(pgrep -l "Finder")
done
# After the Apple Setup completed. Now safe to grab the current user.
CURRENT_USER=$(stat -f "%Su" "/dev/console")
echo "$(date "+%a %h %d %H:%M:%S"): Current user set to $CURRENT_USER." >> "$TMP_DEBUG_LOG"
# If SELF_SERVICE_CUSTOM_BRANDING is set to true. Loading the updated icon
if [ "$SELF_SERVICE_CUSTOM_BRANDING" = true ]; then
open -a "/Applications/Self Service.app" --hide
# Loop waiting on the branding image to properly show in the users library
CUSTOM_BRANDING_PNG="/Users/$CURRENT_USER/Library/Application Support/com.jamfsoftware.selfservice.mac/Documents/Images/brandingimage.png"
until [ -f "$CUSTOM_BRANDING_PNG" ]; do
echo "$(date "+%a %h %d %H:%M:%S"): Waiting for branding image from Jamf Pro." >> "$TMP_DEBUG_LOG"
sleep 1
done
# Setting Banner Image for DEP Notify to Self Service Custom Branding
BANNER_IMAGE_PATH="$CUSTOM_BRANDING_PNG"
fi
# Testing Mode Enhancements
if [ "$TESTING_MODE" = true ]; then
# Setting Quit Key set to command + control + x (Testing Mode Only)
echo "Command: QuitKey: x" >> "$DEP_NOTIFY_CONFIG"
# Removing old config file if present (Testing Mode Only)
if [ -f "$DEP_NOTIFY_CONFIG" ]; then
rm "$DEP_NOTIFY_CONFIG"
fi
if [ -f "$DEP_NOTIFY_DONE" ]; then
rm "$DEP_NOTIFY_DONE"
fi
if [ -f "$DEP_NOTIFY_EULA" ]; then
rm "$DEP_NOTIFY_EULA"
fi
fi
# Setting custom image if specified
if [ "$BANNER_IMAGE_PATH" != "" ]; then
echo "Command: Image: $BANNER_IMAGE_PATH" >> "$DEP_NOTIFY_CONFIG"
fi
# Setting custom title if specified
if [ "$BANNER_TITLE" != "" ]; then
echo "Command: MainTitle: $BANNER_TITLE" >> "$DEP_NOTIFY_CONFIG"
fi
# Setting custom main text if specified
if [ "$MAIN_TEXT" != "" ]; then
echo "Command: MainText: $MAIN_TEXT" >> "$DEP_NOTIFY_CONFIG"
fi
# Adding help url and button if specified
if [ "$SUPPORT_URL" != "" ]; then
echo "Command: Help: $SUPPORT_URL" >> "$DEP_NOTIFY_CONFIG"
fi
# Opening the app after initial configuration
if [ "$FULLSCREEN" = true ]; then
sudo -u "$CURRENT_USER" "$DEP_NOTIFY_APP"/Contents/MacOS/DEPNotify -path "$DEP_NOTIFY_CONFIG" -fullScreen&
elif [ "$FULLSCREEN" = false ]; then
sudo -u "$CURRENT_USER" "$DEP_NOTIFY_APP"/Contents/MacOS/DEPNotify -path "$DEP_NOTIFY_CONFIG"&
fi
# Adding nice text and a brief pause for prettiness
echo "Status: $INITAL_START_STATUS" >> "$DEP_NOTIFY_CONFIG"
sleep 5
# Setting the status bar
# Counter is for making the determinate look nice. Starts at one and adds
# more based on EULA or register options.
ADDITIONAL_OPTIONS_COUNTER=1
if [ "$EULA_ENABLED" = true ]; then
((ADDITIONAL_OPTIONS_COUNTER++))
fi
# Checking policy array and adding the count from the additional options above.
ARRAY_LENGTH="$((${#POLICY_ARRAY[@]}+ADDITIONAL_OPTIONS_COUNTER))"
echo "Command: Determinate: $ARRAY_LENGTH" >> "$DEP_NOTIFY_CONFIG"
# EULA prompt prior to configuration
if [ "$EULA_ENABLED" = true ]; then
echo "Status: Waiting on EULA Acceptance" >> "$DEP_NOTIFY_CONFIG"
echo "Command: ContinueButtonEULA: EULA" >> "$DEP_NOTIFY_CONFIG"
while [ ! -f "$DEP_NOTIFY_EULA" ]; do
sleep 1
done
fi
# Loop to run policies
for POLICY in "${POLICY_ARRAY[@]}"; do
echo "Status: $(echo "$POLICY" | cut -d ',' -f1)" >> "$DEP_NOTIFY_CONFIG"
if [ "$TESTING_MODE" = true ]; then
sleep 10
elif [ "$TESTING_MODE" = false ]; then
"$JAMF_BINARY" policy -event "$(echo "$POLICY" | cut -d ',' -f2)"
fi
done
# Check to see if FileVault Deferred enablement is active
FV_DEFERRED_STATUS=$($FDE_SETUP_BINARY status | grep "Deferred" | cut -d ' ' -f6)
# Exit gracefully after things are finished
echo "Status: $INSTALL_COMPLETE_TEXT" >> "$DEP_NOTIFY_CONFIG"
if [ "$FV_DEFERRED_STATUS" = "active" ]; then
echo "Command: Logout: $FV_LOGOUT_TEXT" >> "$DEP_NOTIFY_CONFIG"
else
echo "Command: Quit: $COMPLETE_ALERT_TEXT" >> "$DEP_NOTIFY_CONFIG"
fi
exit 0
|
echo "=== Acquiring datasets ==="
echo "---"
mkdir -p ../data
cd ../data
echo "- Downloading enwik8 (Character)"
if [[ ! -d 'enwik8' ]]; then
mkdir -p enwik8
cd enwik8
wget --continue http://mattmahoney.net/dc/enwik8.zip
wget https://raw.githubusercontent.com/salesforce/awd-lstm-lm/master/data/enwik8/prep_enwik8.py
python3 prep_enwik8.py
cd ..
fi
echo "---"
echo "Happy language modeling :)"
|
#!/bin/sh
mkdir -p -- "output"
ffmpeg -ss $1 -i "$2" -to $3 -c copy -copyts output/"$4" |
#!/bin/bash
docker-compose up -d mutagen
mutagen project start -f mutagen/config.yml
docker-compose up -d
|
#!/bin/bash
apt-get install python-pip
sudo pip install apache-beam[gcp] oauth2client==3.0.0
sudo pip install -U pip |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source ${DIR}/../common.sh
writeKratosLog "DEBUG" "Collecting Close Marketstack data"
python3 ${PYTHONDIR}/collect_marketstack_close.py >>${LOG}
|
#!/usr/bin/env ../../lib/tests/bats/bin/bats
load ../../lib/tests/bats-support/load
load ../../lib/tests/bats-assert/load
load ../../lib/tests/helpers
load ../../lib/git/commit
setup() {
ORGINAL_DIR=$(pwd)
cd $(create_test_repo)
COMMIT_MSG="[BATS] initial commit"
TEST_FILE="test.bats"
create_test_file ${TEST_FILE}
git_commit "${COMMIT_MSG}" | sed -e 's/^/# /g' >&3 2>&1
}
teardown() {
cd ${ORGINAL_DIR}
}
@test "actions > files_changed > files only > should have a test file" {
run ${ORGINAL_DIR}/actions/files_changed.sh
assert_success
assert_output --regexp ".*::set-output name=files::.*${TEST_FILE}.*"
refute_output --partial "::set-output name=created_files::"
refute_output --partial "::set-output name=modified_files::"
refute_output --partial "::set-output name=deleted_files::"
}
@test "actions > files_changed > files, created_files > should have a test file" {
run ${ORGINAL_DIR}/actions/files_changed.sh --created
assert_success
assert_output --regexp "::set-output name=files::.*${TEST_FILE}.*"
assert_output --regexp "::set-output name=created_files::.*${TEST_FILE}.*"
refute_output --partial "::set-output name=modified_files::"
refute_output --partial "::set-output name=deleted_files::"
}
@test "actions > files_changed > files, modified_files > should have a test file" {
echo "change #1" > ${TEST_FILE}
git_commit "[BATS] Modified ${TEST_FILE}" | sed -e 's/^/# /g' >&3 2>&1
run ${ORGINAL_DIR}/actions/files_changed.sh --modified
assert_success
assert_output --regexp "::set-output name=files::.*${TEST_FILE}.*"
refute_output --partial "::set-output name=created_files::"
assert_output --regexp "::set-output name=modified_files::.*${TEST_FILE}.*"
refute_output --partial "::set-output name=deleted_files::"
}
@test "actions > files_changed > files, deleted_files > should have a test file" {
rm ${TEST_FILE}
git_commit "[BATS] Deleted ${TEST_FILE}" | sed -e 's/^/# /g' >&3 2>&1
run ${ORGINAL_DIR}/actions/files_changed.sh --deleted
assert_success
assert_output --partial "::set-output name=files::"
refute_output --partial "::set-output name=created_files::"
refute_output --partial "::set-output name=modified_files::"
assert_output --partial "::set-output name=deleted_files::"
}
@test "actions > files_changed > multiple files, should show all files created, or modified" {
TEST_FILE2="test2.bats"
echo "change #1" >${TEST_FILE}
touch ${TEST_FILE2}
git_commit "${COMMIT_MSG}; added ${TEST_FILE2}"
run ${ORGINAL_DIR}/actions/files_changed.sh --created --modified
assert_output --regexp ".*::set-output name=files::.*${TEST_FILE}.*"
assert_output --regexp ".*::set-output name=files::.*${TEST_FILE2}.*"
assert_output --regexp ".*::set-output name=modified_files::.*${TEST_FILE}.*"
assert_output --regexp ".*::set-output name=created_files::.*${TEST_FILE2}.*"
}
@test "actions > files_changed > multiple files, filter to .*test.bats.*" {
TEST_FILE2="test2.bats"
echo "change #1" >${TEST_FILE}
touch ${TEST_FILE2}
git_commit "${COMMIT_MSG}; added ${TEST_FILE2}"
export INPUT_GIT_FILTER=".*${TEST_FILE}.*"
run ${ORGINAL_DIR}/actions/files_changed.sh --created --modified
assert_output --regexp ".*::set-output name=files::.*${TEST_FILE}.*"
refute_output --regexp ".*::set-output name=files::.*${TEST_FILE2}.*"
assert_output --regexp ".*::set-output name=modified_files::.*${TEST_FILE}.*"
refute_output --regexp ".*::set-output name=created_files::.*${TEST_FILE2}.*"
}
|
#!/bin/bash
#
# arch-bootstrap: Bootstrap a base Arch Linux system using any GNU distribution.
#
# Dependencies: bash >= 4, coreutils, wget, sed, gawk, tar, gzip, chroot, xz.
# Project: https://github.com/tokland/arch-bootstrap
#
# Install:
#
# # install -m 755 arch-bootstrap.sh /usr/local/bin/arch-bootstrap
#
# Usage:
#
# # arch-bootstrap destination
# # arch-bootstrap -a x86_64 -r ftp://ftp.archlinux.org destination-64
#
# And then you can chroot to the destination directory (user: root, password: root):
#
# # chroot destination
set -e -u -o pipefail
# Packages needed by pacman (see get-pacman-dependencies.sh)
PACMAN_PACKAGES=(
acl archlinux-keyring attr bzip2 curl expat glibc gpgme libarchive
libassuan libgpg-error libnghttp2 libssh2 lzo openssl pacman pacman-mirrorlist xz zlib
krb5 e2fsprogs keyutils libidn2 libunistring gcc-libs lz4 libpsl icu libunistring zstd
ca-certificates ca-certificates-utils
)
BASIC_PACKAGES=(${PACMAN_PACKAGES[*]} filesystem)
EXTRA_PACKAGES=(coreutils bash grep gawk file tar systemd sed)
DEFAULT_REPO_URL="http://mirrors.kernel.org/archlinux"
DEFAULT_ARM_REPO_URL="http://mirror.archlinuxarm.org"
DEFAULT_I686_REPO_URL="https://mirror.archlinux32.org"
stderr() {
echo "$@" >&2
}
debug() {
stderr "--- $@"
}
extract_href() {
sed -n '/<a / s/^.*<a [^>]*href="\([^\"]*\)".*$/\1/p'
}
fetch() {
curl -L -s "$@"
}
fetch_file() {
local FILEPATH=$1
shift
if [[ -e "$FILEPATH" ]]; then
curl -L -z "$FILEPATH" -o "$FILEPATH" "$@"
else
curl -L -o "$FILEPATH" "$@"
fi
}
uncompress() {
local FILEPATH=$1 DEST=$2
case "$FILEPATH" in
*.gz)
tar xzf "$FILEPATH" -C "$DEST";;
*.xz)
xz -dc "$FILEPATH" | tar x -C "$DEST";;
*)
debug "Error: unknown package format: $FILEPATH"
return 1;;
esac
}
###
get_default_repo() {
local ARCH=$1
if [[ "$ARCH" == arm* || "$ARCH" == aarch64 ]]; then
echo $DEFAULT_ARM_REPO_URL
elif [[ "$ARCH" == i686 ]]; then
echo $DEFAULT_I686_REPO_URL
else
echo $DEFAULT_REPO_URL
fi
}
get_core_repo_url() {
local REPO_URL=$1 ARCH=$2
if [[ "$ARCH" == arm* || "$ARCH" == aarch64 || "$ARCH" == i686 ]]; then
echo "${REPO_URL%/}/$ARCH/core"
else
echo "${REPO_URL%/}/core/os/$ARCH"
fi
}
get_template_repo_url() {
local REPO_URL=$1 ARCH=$2
if [[ "$ARCH" == arm* || "$ARCH" == aarch64 || "$ARCH" == i686 ]]; then
echo "${REPO_URL%/}/$ARCH/\$repo"
else
echo "${REPO_URL%/}/\$repo/os/$ARCH"
fi
}
configure_pacman() {
local DEST=$1 ARCH=$2
debug "configure DNS and pacman"
cp "/etc/resolv.conf" "$DEST/etc/resolv.conf"
SERVER=$(get_template_repo_url "$REPO_URL" "$ARCH")
echo "Server = $SERVER" > "$DEST/etc/pacman.d/mirrorlist"
}
configure_minimal_system() {
local DEST=$1
mkdir -p "$DEST/dev"
sed -ie 's/^root:.*$/root:$1$GT9AUpJe$oXANVIjIzcnmOpY07iaGi\/:14657::::::/' "$DEST/etc/shadow"
touch "$DEST/etc/group"
echo "bootstrap" > "$DEST/etc/hostname"
sed -i "s/^[[:space:]]*\(CheckSpace\)/# \1/" "$DEST/etc/pacman.conf"
sed -i "s/^[[:space:]]*SigLevel[[:space:]]*=.*$/SigLevel = Never/" "$DEST/etc/pacman.conf"
}
fetch_packages_list() {
local REPO=$1
debug "fetch packages list: $REPO/"
fetch "$REPO/" | extract_href | awk -F"/" '{print $NF}' | sort -rn ||
{ debug "Error: cannot fetch packages list: $REPO"; return 1; }
}
install_pacman_packages() {
local BASIC_PACKAGES=$1 DEST=$2 LIST=$3 DOWNLOAD_DIR=$4
debug "pacman package and dependencies: $BASIC_PACKAGES"
for PACKAGE in $BASIC_PACKAGES; do
local FILE=$(echo "$LIST" | grep -m1 "^$PACKAGE-[[:digit:]].*\(\.gz\|\.xz\)$")
test "$FILE" || { debug "Error: cannot find package: $PACKAGE"; return 1; }
local FILEPATH="$DOWNLOAD_DIR/$FILE"
debug "download package: $REPO/$FILE"
fetch_file "$FILEPATH" "$REPO/$FILE"
debug "uncompress package: $FILEPATH"
uncompress "$FILEPATH" "$DEST"
done
}
configure_static_qemu() {
local ARCH=$1 DEST=$2
[[ "$ARCH" == arm* ]] && ARCH=arm
QEMU_STATIC_BIN=$(which qemu-$ARCH-static || echo )
[[ -e "$QEMU_STATIC_BIN" ]] ||\
{ debug "no static qemu for $ARCH, ignoring"; return 0; }
cp "$QEMU_STATIC_BIN" "$DEST/usr/bin"
}
install_packages() {
local ARCH=$1 DEST=$2 PACKAGES=$3
debug "install packages: $PACKAGES"
LC_ALL=C chroot "$DEST" /usr/bin/pacman \
--noconfirm --arch $ARCH -Sy --force $PACKAGES
}
show_usage() {
stderr "Usage: $(basename "$0") [-q] [-a i686|x86_64|arm] [-r REPO_URL] [-d DOWNLOAD_DIR] DESTDIR"
}
main() {
# Process arguments and options
test $# -eq 0 && set -- "-h"
local ARCH=
local REPO_URL=
local USE_QEMU=
local DOWNLOAD_DIR=
local PRESERVE_DOWNLOAD_DIR=
while getopts "qa:r:d:h" ARG; do
case "$ARG" in
a) ARCH=$OPTARG;;
r) REPO_URL=$OPTARG;;
q) USE_QEMU=true;;
d) DOWNLOAD_DIR=$OPTARG
PRESERVE_DOWNLOAD_DIR=true;;
*) show_usage; return 1;;
esac
done
shift $(($OPTIND-1))
test $# -eq 1 || { show_usage; return 1; }
[[ -z "$ARCH" ]] && ARCH=$(uname -m)
[[ -z "$REPO_URL" ]] &&REPO_URL=$(get_default_repo "$ARCH")
local DEST=$1
local REPO=$(get_core_repo_url "$REPO_URL" "$ARCH")
[[ -z "$DOWNLOAD_DIR" ]] && DOWNLOAD_DIR=$(mktemp -d)
mkdir -p "$DOWNLOAD_DIR"
[[ -z "$PRESERVE_DOWNLOAD_DIR" ]] && trap "rm -rf '$DOWNLOAD_DIR'" KILL TERM EXIT
debug "destination directory: $DEST"
debug "core repository: $REPO"
debug "temporary directory: $DOWNLOAD_DIR"
# Fetch packages, install system and do a minimal configuration
mkdir -p "$DEST"
local LIST=$(fetch_packages_list $REPO)
install_pacman_packages "${BASIC_PACKAGES[*]}" "$DEST" "$LIST" "$DOWNLOAD_DIR"
configure_pacman "$DEST" "$ARCH"
configure_minimal_system "$DEST"
[[ -n "$USE_QEMU" ]] && configure_static_qemu "$ARCH" "$DEST"
install_packages "$ARCH" "$DEST" "${BASIC_PACKAGES[*]} ${EXTRA_PACKAGES[*]}"
configure_pacman "$DEST" "$ARCH" # Pacman must be re-configured
[[ -z "$PRESERVE_DOWNLOAD_DIR" ]] && rm -rf "$DOWNLOAD_DIR"
debug "Done!"
debug
debug "You may now chroot or arch-chroot from package arch-install-scripts:"
debug "$ sudo arch-chroot $DEST"
}
main "$@"
|
#! /bin/bash
echo ------------------------------------------------------------------- calibrate
echo $1
spark call $1 sample "null"
|
#!/bin/bash
# --------------------------------------------------------
# Extract final accuracy.
# classification, imagenet ILSVRC2012 5w images,
# object detection, VOC2012 1w images.
# --------------------------------------------------------
current_dir=`pwd`
exe=${current_dir}/../../build/examples/clas_offline_multicore/clas_offline_multicore
# final acc logs
# ------------------------
# densenet121 dense int8
#net=densenet121
#batch=1
#dp=4
#mp=1
#tn=8
#${exe} -offlinemodel ${current_dir}/offline-${net}-int8/offline-${net}-int8-dense-${batch}batch-${mp}.cambricon -images ${current_dir}/val-5w.txt -labels ${current_dir}/synset_words.txt -int8 1 -fifosize 2 -threads ${tn} -dataparallel ${dp} &>> ${current_dir}/${net}.log-int8-dense-${batch}batch-${dp}-${mp}-${tn}
# ------------------------
# densenet121 dense fp16
#net=densenet121
#batch=1
#dp=4
#mp=1
#tn=8
#offline_model_file=/home/Cambricon-Test/Cambricon-MLU100/models/caffe/densenet121/offline-densenet121-fp16-dense-1batch-1.cambricon
#${exe} -offlinemodel ${offline_model_file} -images ${current_dir}/val-5w.txt -labels ${current_dir}/synset_words.txt -fifosize 2 -threads ${tn} -dataparallel ${dp} &>> ${current_dir}/${net}.log-fp16-dense-${batch}batch-${dp}-${mp}-${tn}
# ------------------------
#net=densenet121
#batch=1
#dp=4
#mp=1
#tn=8
#for round in `seq 1 3`
#do
#for sparsity in `seq 0.01 0.01 0.90`
#do
#echo "net: ${net}, sparsity: ${sparsity}, batch: ${batch}, dp: ${dp}, mp: ${mp}, tn: ${tn}"
#offline_model_file=/home/Cambricon-Test/Cambricon-MLU100/models/caffe/${net}/offline-${net}-fp16-sparse-${sparsity}-1batch-1.cambricon
#log_file=${net}.log-fp16-sparse-${sparsity}-${batch}batch-${dp}-${mp}-${tn}-round-${round}
#${exe} -offlinemodel ${offline_model_file} -images ${current_dir}/val-5w.txt -labels ${current_dir}/synset_words.txt -fifosize 2 -threads ${tn} -dataparallel ${dp} &>> ${log_file}
#done
#done
net=resnet50
batch=1
dp=4
mp=1
tn=8
for round in `seq 1 3`
do
for sparsity in `seq 0.10 0.10 0.90`
do
echo "net: ${net}, sparsity: ${sparsity}, batch: ${batch}, dp: ${dp}, mp: ${mp}, tn: ${tn}"
offline_model_file=/home/Cambricon-Test/Cambricon-MLU100/models/caffe/${net}/offline-${net}-int8-sparse-${sparsity}-1batch-1.cambricon
log_file=${net}.log-int8-sparse-${sparsity}-${batch}batch-${dp}-${mp}-${tn}-round-${round}
${exe} -int8 1 -offlinemodel ${offline_model_file} -images ${current_dir}/val-5w.txt -labels ${current_dir}/synset_words.txt -fifosize 2 -threads ${tn} -dataparallel ${dp} &>> ${log_file}
done
done
|
#!/usr/bin/env bash
export PACKAGE_JSON_VERSION=`cat package.json | grep version | head -1 | awk -F: '{ print $2 }' | sed 's/[",]//g' | sed -E 's/\.[0-9]+$//g' | xargs`
export PACKAGE_PATCH_VERSION=`cat package.json | grep version | head -1 | awk -F: '{ print $2 }' | sed 's/[",]//g' | sed -E 's/[0-9]+\.[0-9]+\.//g' | xargs` |
#!/bin/bash
OUT=$1
LABEL=$2
PCAP=$3
RATES=$4
if [[ $LABEL == "" ]]; then
echo "usage $0 OUT LABEL PCAP RATES "
exit -1
fi
if [[ $RATES == "" ]]; then
RATES="1.00 10.00 30.00 50.00 70.00 90.00";
fi
SHR=../../Shremote/shremote.py
CFG=cfgs/fpga_fec_encoder.yml
mkdir -p $OUT/$LABEL
run_booster () {
python3 $SHR $CFG ${LABEL}_$1 --out $OUT/$LABEL --args="rate:$1;dataplane_flags:-f;pcap_file:$PCAP" ;
RTN=$?
RETRIES=1
while [[ $RTN != 0 ]]; do
echo "Trying again... $RETRIES"
sleep 5
python3 $SHR $CFG ${LABEL}_$1 --out $OUT/$LABEL --args="rate:$1;dataplane_flags:-f;pcap_file:$PCAP" ;
RTN=$?
RETRIES=$(( $RETRIES + 1 ))
done
echo "SUCCESS!";
sleep 5
}
for rate in $RATES; do
run_booster $rate;
done
|
#!/usr/bin/env bash
set -e
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
# Load the helpers.
. helpers.bash
function execute() {
>&2 echo "++ $@"
eval "$@"
}
# Tests to run. Defaults to all.
TESTS=${@:-. compose discovery api mesos/api mesos/compose}
# Generate a temporary binary for the tests.
export SWARM_BINARY=`mktemp`
# Download docker-compose
execute time curl -L --silent https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
execute chmod +x /usr/local/bin/docker-compose
# Build Swarm.
execute time go build -o "$SWARM_BINARY" ../..
# Start the docker engine.
execute docker daemon --log-level=panic \
--storage-driver="$STORAGE_DRIVER" &
DOCKER_PID=$!
# Wait for it to become reachable.
tries=10
until docker version &> /dev/null; do
(( tries-- ))
if [ $tries -le 0 ]; then
echo >&2 "error: daemon failed to start"
exit 1
fi
sleep 1
done
# Pre-fetch the test image.
execute time docker pull ${DOCKER_IMAGE}:${DOCKER_VERSION} > /dev/null
# Run the tests using the same client provided by the test image.
id=`execute docker create ${DOCKER_IMAGE}:${DOCKER_VERSION}`
tmp=`mktemp -d`
execute docker cp "${id}:/usr/local/bin/docker" "$tmp"
execute docker rm -f "$id" > /dev/null
export DOCKER_BINARY="${tmp}/docker"
# Run the tests.
execute time bats --tap $TESTS
|
#!/bin/bash
set -x
# command line parameters
export GENCONFIG_LAYOUT="${GENCONFIG_LAYOUT:-redhat6-64ma-debian6-64a-windows2008r2-64a}"
export BEAKER_TESTSUITE="${BEAKER_TESTSUITE:-acceptance/suites/puppet3_tests}"
export BEAKER_PRESUITE="acceptance/suites/pre_suite/puppet3_compat"
bash ./acceptance/scripts/generic/testrun-full.sh
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+0+512-pad/13-model --tokenizer_name model-configs/1024-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+0+512-pad/13-512+0+512-shuffled-N-VB-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_remove_all_but_nouns_and_verbs_first_half_full --eval_function last_element_eval |
sudo mongod --dbpath /mnt/d/sub/data/CLcheckerDB --bind_ip 0.0.0.0
# http://localhost:27017
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd ${DIR}/../common/coco_caption
printf "\nSetting up Stanford CoreNLP for SPICE ...\n"
bash get_stanford_models.sh
cd ${DIR}/../datasets/preprocessing
printf "\nRunning pre-processing script for MS-COCO ...\n"
python coco_prepro.py --dataset_dir ''
printf "\nRunning pre-processing script for InstaPIC-1.1M ...\n"
python insta_prepro.py --dataset_dir ''
cd ${DIR}/../common/scst
printf "\nRunning pre-processing script for SCST (MS-COCO) ...\n"
python prepro_ngrams.py --dataset_dir ''
printf "\nRunning pre-processing script for SCST (InstaPIC-1.1M) ...\n"
python prepro_ngrams.py --dataset_dir '' --dataset_file_pattern 'insta_{}_v25595_s15'
printf "\nSetup complete.\n"
|
#!/bin/sh
# Generate a new, self-signed root CA
openssl req -config openssl-custom.cnf -extensions v3_ca -new -x509 -days 36500 -nodes -subj "/CN=PushyTestRoot" -newkey rsa:2048 -sha512 -out ca.pem -keyout ca.key
# Generate a certificate/key for the server
openssl req -new -keyout server-key.pem -nodes -newkey rsa:2048 -subj "/CN=com.relayrides.pushy" | \
openssl x509 -extfile ./apns-extensions.cnf -extensions apns_server_extensions -req -CAkey ca.key -CA ca.pem -days 36500 -set_serial $RANDOM -sha512 -out server-certs.pem
# Generate certificates/keys for clients and sign them with the intermediate certificate
openssl req -new -keyout single-topic-client.key -nodes -newkey rsa:2048 -subj "/CN=Apple Push Services: com.relayrides.pushy/UID=com.relayrides.pushy" | \
openssl x509 -extfile ./apns-extensions.cnf -extensions apns_single_topic_client_extensions -req -CAkey ca.key -CA ca.pem -days 36500 -set_serial $RANDOM -sha512 -out single-topic-client.pem
openssl req -new -keyout multi-topic-client.key -nodes -newkey rsa:2048 -subj "/CN=Apple Push Services: com.relayrides.pushy/UID=com.relayrides.pushy" | \
openssl x509 -extfile ./apns-extensions.cnf -extensions apns_multi_topic_client_extensions -req -CAkey ca.key -CA ca.pem -days 36500 -set_serial $RANDOM -sha512 -out multi-topic-client.pem
# For simplicity, squish everything down into PKCS#12 keystores
openssl pkcs12 -export -in server-certs.pem -inkey server-key.pem -out server.p12 -password pass:pushy-test
openssl pkcs12 -export -in single-topic-client.pem -inkey single-topic-client.key -out single-topic-client.p12 -password pass:pushy-test
openssl pkcs12 -export -in multi-topic-client.pem -inkey multi-topic-client.key -out multi-topic-client.p12 -password pass:pushy-test
# We'll also want one keystore with an unprotected key to make sure no-password constructors behave correctly
openssl pkcs12 -export -in single-topic-client.pem -inkey single-topic-client.key -out single-topic-client-unprotected.p12 -nodes -password pass:pushy-test
# Generate a PKCS#12 keystore with multiple keys
for i in `seq 1 4`;
do
# Couldn't find a way to get multiple keys into a PKCS#12 file using OpenSSL directly, so we'll take the long way
# around and construct a multi-key-pair keystore with keytool, then export to PKCS#12.
keytool -genkeypair -storepass pushy-test -keypass pushy-test -dname "CN=com.relayrides.pushy.{$i}" -keystore multiple-keys.jks -alias "pair${i}"
done
keytool -importkeystore -srckeystore multiple-keys.jks -destkeystore multiple-keys.p12 -srcstoretype JKS -deststoretype PKCS12 -srcstorepass pushy-test -deststorepass pushy-test
# Generate a PKCS#12 with a certificate, but no private key
openssl pkcs12 -export -in ca.pem -nokeys -out no-keys.p12 -password pass:pushy-test
# Generate a private key for token authentication testing
openssl ecparam -name prime256v1 -genkey -noout | openssl pkcs8 -topk8 -nocrypt -out token-auth-private-key.p8
# Clean up intermediate files
rm ca.key server.key multi-topic-client.key single-topic-client.key
rm multiple-keys.jks
# Generate an elliptic key pair for token authentication testing
openssl ecparam -name prime256v1 -genkey -noout | openssl pkcs8 -topk8 -nocrypt -out token-auth-private-key.p8
openssl ec -in token-auth-private-key.p8 -pubout -out token-auth-public-key.p8
|
#!/bin/bash
export PYTHONPATH=$1/env
nohup x-terminal-emulator -e $1/env/bin/python $1/Web_Server/run/bemoss_server.py
#---------
|
#
# Script to Install
# Linux Tools
#
# The Python Quants GmbH
#
apt-get update
apt-get upgrade -y
# Linux System Tools
apt-get install -y pandoc
apt-get install -y wget screen htop
apt-get install -y tree vim git man less
# Python3 via Linux
apt-get install -y python3 python3-pip
apt-get install python-is-python3
pip3 install pip --upgrade
# pip install numpy pandas scipy
# pip install matplotlib xarray
pip install sphinx nbsphinx
pip install twine q
pip install setuptools wheels
pip install ipython jupyterlab
# Configuration
wget https://certificate.tpq.io/.vimrc -O ~/.vimrc
mkdir /root/.jupyter
mkdir -p /root/.jupyter/lab/user-settings/@jupyterlab/shortcuts-extension/
cp jupyter_shortcuts.json /root/.jupyter/lab/user-settings/@jupyterlab/shortcuts-extension/shortcuts.jupyterlab-settings
# JupyterLab
jupyter server password
# jupyter lab --allow-root --ip 0.0.0.0 --port 9999
|
#!/bin/bash
#
gfortran -c analemma.f
if [ $? -ne 0 ]; then
echo "Errors compiling analemma.f"
exit
fi
#
gfortran analemma.o
if [ $? -ne 0 ]; then
echo "Errors linking and loading analemma.o"
exit
fi
#
rm analemma.o
#
chmod ugo+x a.out
mv a.out ~/binf77/analemma
#
echo "Executable installed as ~/binf77/analemma"
|
#!/bin/sh
#### STEP 0: Setup paths
#
KAIJU_HOME=$HOME/proj/SDK/sdk_modules/kb_kaiju/bin/kaiju
KAIJU_BINDIR=$KAIJU_HOME/bin
KRONA_HOME=$HOME/proj/SDK/sdk_modules/kb_kaiju/bin/Krona
KRONA_BINDIR=$KRONA_HOME/bin/bin
KAIJU_BIN=$KAIJU_BINDIR/kaiju
KAIJU_REPORT_BIN=$KAIJU_BINDIR/kaiju2table
KAIJU2KRONA_BIN=$KAIJU_BINDIR/kaiju2krona
KRONAIMPORT_BIN=$KRONA_BINDIR/ktImportText
KAIJU_DBDIR=$HOME/proj/SDK/sdk_modules/kb_kaiju/data/kaijudb
KAIJU_DBTYPE=refseq
#KAIJU_DBTYPE=progenomes
#KAIJU_DBTYPE=nr
#KAIJU_DBTYPE=nr_euk
KAIJU_NODES=$KAIJU_DBDIR/$KAIJU_DBTYPE/nodes.dmp
KAIJU_NAMES=$KAIJU_DBDIR/$KAIJU_DBTYPE/names.dmp
if [ $KAIJU_DBTYPE = "refseq" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_refseq.fmi
elif [ $KAIJU_DBTYPE = "progenomes" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_progenomes.fmi
elif [ $KAIJU_DBTYPE = "nr" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_nr.fmi
elif [ $KAIJU_DBTYPE = "nr_euk" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_nr_euk.fmi
elif [ $KAIJU_DBTYPE = "viruses" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_viruses.fmi
elif [ $KAIJU_DBTYPE = "plasmids" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_plasmids.fmi
elif [ $KAIJU_DBTYPE = "rvdb" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_rvdb.fmi
elif [ $KAIJU_DBTYPE = "plasmids" ] ; then
KAIJU_DBPATH=$KAIJU_DBDIR/$KAIJU_DBTYPE/kaiju_db_plasmids.fmi
fi
RUN_DATA_DIR=$HOME/proj/SDK/sdk_modules/kb_kaiju/test/data
fwd_reads=$RUN_DATA_DIR/seven_species_nonuniform_10K.PE_reads_fwd-0.FASTQ.gz
#fwd_reads=$RUN_DATA_DIR/seven_species_nonuniform_100K.PE_reads_fwd-0.FASTQ.gz
rev_reads=$RUN_DATA_DIR/seven_species_nonuniform_10K.PE_reads_rev-0.FASTQ.gz
#rev_reads=$RUN_DATA_DIR/seven_species_nonuniform_100K.PE_reads_rev-0.FASTQ.gz
out_basename=test2_multi_tax
kaiju_out_file=$RUN_DATA_DIR/$out_basename.kaiju.out
kaiju_summary_out_file=$kaiju_out_file.summary
krona_in_file=$kaiju_out_file.krona
krona_out_html_file=$kaiju_out_file.krona.html
#### STEP 1: kaiju run and summary
#
threads="-z 4"
minlength="-m 11"
minscore="-s 65"
mismatches="-e 5"
e_value="-E 0.05"
verbose="-v"
greedy="-a greedy"
SEG_filter="-x"
fwd_reads_arg="-i $fwd_reads"
if [ -s rev_reads ] ; then
rev_reads_arg="-j $rev_reads"
else
rev_reads_arg=""
fi
cmd="$KAIJU_BIN -t $KAIJU_NODES -f $KAIJU_DBPATH -i $fwd_reads_arg -j $rev_reads_arg -o $kaiju_out_file $SEG_filter $minlength $minscore $greedy $mismatches $e_value $threads $verbose"
if [ ! -s $kaiju_out_file ] ; then
echo $cmd
exec $cmd
fi
# kaiju summary run
#tax_levels="phylum,class,order,family,genus,species"
tax_level="family"
#taxon_fullpath_arg="-p"
taxon_fullpath_arg=""
filter_perc=1
filter_unclassified="-u"
#filter_unclassified=""
if [ $filter_perc -gt 0 ] ; then
filter_arg="-m $filter_perc $filter_unclassified"
else
filter_arg=$filter_unclassified
fi
cmd="$KAIJU_REPORT_BIN -t $KAIJU_NODES -n $KAIJU_NAMES -r $tax_level $filter_arg $taxon_fullpath_arg -o $kaiju_summary_out_file $kaiju_out_file"
if [ ! -s $kaiju_summary_out_file ] ; then
echo
echo $cmd
exec $cmd
fi
#### STEP 2: krona prep and html gen run
#
cmd="$KAIJU2KRONA_BIN -t $KAIJU_NODES -n $KAIJU_NAMES -i $kaiju_out_file -o $krona_in_file"
if [ ! -s $krona_in_file ] ; then
echo
echo $cmd
exec $cmd
fi
cmd="$KRONAIMPORT_BIN -o $krona_out_html_file $krona_in_file"
if [ ! -s $krona_out_html_file ] ; then
echo
echo $cmd
exec $cmd
fi
#### STEP 3: Finalize
#
echo "DONE"
exit 0
|
#!/bin/bash
ln -s /usr/local/bin/clang-format /usr/bin/clang-format
ln -s /usr/local/bin/rbt /usr/bin/rbt |
#!/bin/sh -e
# Copied w/ love from the excellent hypnoglow/helm-s3
if [ -n "${HELM_PUSH_PLUGIN_NO_INSTALL_HOOK}" ]; then
echo "Development mode: not downloading versioned release."
exit 0
fi
version="$(cat plugin.yaml | grep "version" | cut -d '"' -f 2)"
echo "Coping and installing helm-push v${version} ..."
#
#helm-push_0.9.0_linux_arm64.tar.gz
mkdir -p "bin"
mkdir -p "releases/v${version}/bin"
cp ./bin/linux/arm64/helmpush releases/v${version}/bin/
cp ./LICENSE releases/v${version}/
cp ./plugin.yaml releases/v${version}/
mv "releases/v${version}/bin/helmpush" "bin/helmpush"
|
#canme.sh
#!/user/bin/env sh
set -e
echo 'comter1.github.io/document/' > document/.vuepress/dist/CNAME |
#!/usr/bin/env bash
menu_main_parse_args ()
{
local choice
read -p "$(text_blue " Please enter your choice: ")" choice
case "$choice" in
i|I)
core_install
press_to_continue
menu_main
;;
a|A)
menu_manage_core
;;
r|R)
menu_manage_relay
;;
f|F)
menu_manage_forger
;;
e|E)
menu_manage_explorer
;;
c|C)
menu_manage_commander
;;
m|M)
menu_miscellaneous
;;
p|P)
process_monitor
;;
l|L)
utils_log
if [[ $? -eq 130 ]]; then
press_to_continue
else
wait_to_continue
fi
;;
h|H)
menu_main_help
;;
x|X)
exit 0
;;
*)
echo -e "$(text_yellow " Invalid option chosen, please select a valid option and try again.")"
wait_to_continue
menu_main
;;
esac
}
|
#!/bin/bash
echo `date` ===============================
echo "Size of $1"
echo "============================================================="
du -sh $1
du -s $1
echo "============================================================="
echo "Number of Files in $1"
echo "============================================================="
find $1 -name "*" -type f | wc -l
echo `date` ===============================
|
#!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
#=================================================
# System Required: CentOS/Debian/Ubuntu
# Description: MTProxy
# Version: 1.0.8
# Author: Toyo
# Blog: https://doub.io/shell-jc7/
#=================================================
sh_ver="1.0.8"
filepath=$(cd "$(dirname "$0")"; pwd)
file_1=$(echo -e "${filepath}"|awk -F "$0" '{print $1}')
file="/usr/local/mtproxy"
mtproxy_file="/usr/local/mtproxy/mtproto-proxy"
mtproxy_conf="/usr/local/mtproxy/mtproxy.conf"
mtproxy_log="/usr/local/mtproxy/mtproxy.log"
mtproxy_secret="/usr/local/mtproxy/proxy-secret"
mtproxy_multi="/usr/local/mtproxy/proxy-multi.conf"
Crontab_file="/usr/bin/crontab"
Green_font_prefix="\033[32m" && Red_font_prefix="\033[31m" && Green_background_prefix="\033[42;37m" && Red_background_prefix="\033[41;37m" && Font_color_suffix="\033[0m"
Info="${Green_font_prefix}[信息]${Font_color_suffix}"
Error="${Red_font_prefix}[错误]${Font_color_suffix}"
Tip="${Green_font_prefix}[注意]${Font_color_suffix}"
check_root(){
[[ $EUID != 0 ]] && echo -e "${Error} 当前非ROOT账号(或没有ROOT权限),无法继续操作,请更换ROOT账号或使用 ${Green_background_prefix}sudo su${Font_color_suffix} 命令获取临时ROOT权限(执行后可能会提示输入当前账号的密码)。" && exit 1
}
#检查系统
check_sys(){
if [[ -f /etc/redhat-release ]]; then
release="centos"
elif cat /etc/issue | grep -q -E -i "debian"; then
release="debian"
elif cat /etc/issue | grep -q -E -i "ubuntu"; then
release="ubuntu"
elif cat /etc/issue | grep -q -E -i "centos|red hat|redhat"; then
release="centos"
elif cat /proc/version | grep -q -E -i "debian"; then
release="debian"
elif cat /proc/version | grep -q -E -i "ubuntu"; then
release="ubuntu"
elif cat /proc/version | grep -q -E -i "centos|red hat|redhat"; then
release="centos"
fi
#bit=`uname -m`
}
check_installed_status(){
[[ ! -e ${mtproxy_file} ]] && echo -e "${Error} MTProxy 没有安装,请检查 !" && exit 1
}
check_crontab_installed_status(){
if [[ ! -e ${Crontab_file} ]]; then
echo -e "${Error} Crontab 没有安装,开始安装..."
if [[ ${release} == "centos" ]]; then
yum install crond -y
else
apt-get install cron -y
fi
if [[ ! -e ${Crontab_file} ]]; then
echo -e "${Error} Crontab 安装失败,请检查!" && exit 1
else
echo -e "${Info} Crontab 安装成功!"
fi
fi
}
check_pid(){
PID=`ps -ef| grep "./mtproto-proxy "| grep -v "grep" | grep -v "init.d" |grep -v "service" |awk '{print $2}'`
}
Download_mtproxy(){
mkdir '/tmp/mtproxy'
cd '/tmp/mtproxy'
# wget -N --no-check-certificate "https://github.com/TelegramMessenger/MTProxy/archive/master.zip"
git clone https://github.com/TelegramMessenger/MTProxy.git
[[ ! -e "MTProxy/" ]] && echo -e "${Error} MTProxy 下载失败!" && cd '/tmp' && rm -rf '/tmp/mtproxy' && exit 1
cd MTProxy
make
[[ ! -e "objs/bin/mtproto-proxy" ]] && echo -e "${Error} MTProxy 编译失败!" && echo -e "另外,如果在上面几行看到 ${Green_font_prefix}xxxxx option \"-std=gnu11\"${Font_color_suffix} 字样,说明是系统版本过低,请尝试更换系统重试!" && make clean && cd '/tmp' && rm -rf '/tmp/mtproxy' && exit 1
[[ ! -e "${file}" ]] && mkdir "${file}"
\cp -f objs/bin/mtproto-proxy "${file}"
chmod +x "${mtproxy_file}"
cd '/tmp'
rm -rf '/tmp/mtproxy'
}
Download_secret(){
[[ -e "${mtproxy_secret}" ]] && rm -rf "${mtproxy_secret}"
wget --no-check-certificate -q "https://core.telegram.org/getProxySecret" -O "${mtproxy_secret}"
[[ ! -e "${mtproxy_secret}" ]] && echo -e "${Error} MTProxy Secret下载失败! 脚本将会继续安装但会启动失败,请尝试手动下载:${Green_font_prefix}wget --no-check-certificate -q \"https://core.telegram.org/getProxySecret\" -O \"${mtproxy_secret}\"${Font_color_suffix}"
echo -e "${Info} MTProxy Secret下载成功!"
}
Download_multi(){
[[ -e "${mtproxy_multi}" ]] && rm -rf "${mtproxy_multi}"
wget --no-check-certificate -q "https://core.telegram.org/getProxyConfig" -O "${mtproxy_multi}"
[[ ! -e "${mtproxy_multi}" ]] && echo -e "${Error} MTProxy Multi下载失败!脚本将会继续安装但会启动失败,请尝试手动下载:${Green_font_prefix}wget --no-check-certificate -q \"https://core.telegram.org/getProxyConfig\" -O \"${mtproxy_multi}\"${Font_color_suffix}"
echo -e "${Info} MTProxy Secret下载成功!"
}
Service_mtproxy(){
if [[ ${release} = "centos" ]]; then
if ! wget --no-check-certificate "https://raw.githubusercontent.com/ToyoDAdoubiBackup/doubi/master/service/mtproxy_centos" -O /etc/init.d/mtproxy; then
echo -e "${Error} MTProxy服务 管理脚本下载失败 !"
rm -rf "${file}"
exit 1
fi
chmod +x "/etc/init.d/mtproxy"
chkconfig --add mtproxy
chkconfig mtproxy on
else
if ! wget --no-check-certificate "https://raw.githubusercontent.com/ToyoDAdoubiBackup/doubi/master/service/mtproxy_debian" -O /etc/init.d/mtproxy; then
echo -e "${Error} MTProxy服务 管理脚本下载失败 !"
rm -rf "${file}"
exit 1
fi
chmod +x "/etc/init.d/mtproxy"
update-rc.d -f mtproxy defaults
fi
echo -e "${Info} MTProxy服务 管理脚本下载完成 !"
}
Installation_dependency(){
if [[ ${release} == "centos" ]]; then
Centos_yum
else
Debian_apt
fi
\cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
}
Centos_yum(){
cat /etc/redhat-release |grep 7\..*|grep -i centos>/dev/null
yum update
if [[ $? = 0 ]]; then
yum install -y openssl-devel zlib-devel git
else
yum install -y openssl-devel zlib-devel git
fi
yum groupinstall "Development Tools" -y
}
Debian_apt(){
cat /etc/issue |grep 9\..*>/dev/null
apt-get update
if [[ $? = 0 ]]; then
apt-get install -y build-essential libssl-dev zlib1g-dev git
else
apt-get install -y build-essential libssl-dev zlib1g-dev git
fi
}
Write_config(){
cat > ${mtproxy_conf}<<-EOF
PORT = ${mtp_port}
PASSWORD = ${mtp_passwd}
TAG = ${mtp_tag}
NAT = ${mtp_nat}
EOF
}
Read_config(){
[[ ! -e ${mtproxy_conf} ]] && echo -e "${Error} MTProxy 配置文件不存在 !" && exit 1
port=$(cat ${mtproxy_conf}|grep 'PORT = '|awk -F 'PORT = ' '{print $NF}')
passwd=$(cat ${mtproxy_conf}|grep 'PASSWORD = '|awk -F 'PASSWORD = ' '{print $NF}')
tag=$(cat ${mtproxy_conf}|grep 'TAG = '|awk -F 'TAG = ' '{print $NF}')
nat=$(cat ${mtproxy_conf}|grep 'NAT = '|awk -F 'NAT = ' '{print $NF}')
}
Set_port(){
while true
do
echo -e "请输入 MTProxy 端口 [1-65535]"
read -e -p "(默认: 443):" mtp_port
[[ -z "${mtp_port}" ]] && mtp_port="443"
echo $((${mtp_port}+0)) &>/dev/null
if [[ $? -eq 0 ]]; then
if [[ ${mtp_port} -ge 1 ]] && [[ ${mtp_port} -le 65535 ]]; then
echo && echo "========================"
echo -e " 端口 : ${Red_background_prefix} ${mtp_port} ${Font_color_suffix}"
echo "========================" && echo
break
else
echo "输入错误, 请输入正确的端口。"
fi
else
echo "输入错误, 请输入正确的端口。"
fi
done
}
Set_passwd(){
while true
do
echo "请输入 MTProxy 密匙(手动输入必须为32位,[0-9][a-z][A-Z],建议随机生成)"
read -e -p "(避免出错,强烈推荐随机生成,直接回车):" mtp_passwd
if [[ -z "${mtp_passwd}" ]]; then
mtp_passwd=$(date +%s%N | md5sum | head -c 32)
else
[[ ${#mtp_passwd} != 32 ]] && echo -e "${Error} 请输入正确的密匙(32位字符)。" && continue
fi
echo && echo "========================"
echo -e " 密码 : ${Red_background_prefix} dd${mtp_passwd} ${Font_color_suffix}"
echo "========================" && echo
break
done
}
Set_tag(){
echo "请输入 MTProxy 的 TAG标签(TAG标签必须是32位,TAG标签只有在通过官方机器人 @MTProxybot 分享代理账号后才会获得,不清楚请留空回车)"
read -e -p "(默认:回车跳过):" mtp_tag
if [[ ! -z "${mtp_tag}" ]]; then
echo && echo "========================"
echo -e " TAG : ${Red_background_prefix} ${mtp_tag} ${Font_color_suffix}"
echo "========================" && echo
fi
}
Set_nat(){
echo -e "\n=== 当前服务器所有网卡信息:\n"
if [[ -e "/sbin/ip" ]]; then
ip addr show
else
ifconfig
fi
echo -e "\n== 解释:网卡名 lo 指的是本机,请无视。
== 解释:一般情况下,主网卡名为 eth0,Debian9系统为 ens3,CentOS Ubuntu最新系统可能为 enpXsX(X代表数字或字母)。OpenVZ 虚拟化为 venet0\n"
echo -e "如果本机是NAT服务器(谷歌云、微软云、阿里云等,网卡绑定的IP为 10.xx.xx.xx 开头的),则请输入你的服务器内网IP,否则会导致无法使用。如果不是请直接回车!"
read -e -p "(默认:回车跳过):" mtp_nat
if [[ -z "${mtp_nat}" ]]; then
mtp_nat=""
else
getip
mtp_nat="${mtp_nat}:${ip}"
echo && echo "========================"
echo -e " NAT : ${Red_background_prefix} ${mtp_nat} ${Font_color_suffix}"
echo "========================" && echo
fi
}
Set_mtproxy(){
check_installed_status
echo && echo -e "你要做什么?
${Green_font_prefix} 1.${Font_color_suffix} 修改 端口配置
${Green_font_prefix} 2.${Font_color_suffix} 修改 密码配置
${Green_font_prefix} 3.${Font_color_suffix} 修改 TAG 配置
${Green_font_prefix} 4.${Font_color_suffix} 修改 NAT 配置
${Green_font_prefix} 5.${Font_color_suffix} 修改 全部配置
————————————————
${Green_font_prefix} 6.${Font_color_suffix} 更新 Telegram IP段(无需频繁更新)
${Green_font_prefix} 7.${Font_color_suffix} 更新 Telegram 密匙(一般不用管)
————————————————
${Green_font_prefix} 8.${Font_color_suffix} 定时 更新 Telegram IP段
${Green_font_prefix} 9.${Font_color_suffix} 监控 运行状态
${Green_font_prefix}10.${Font_color_suffix} 监控 外网IP变更" && echo
read -e -p "(默认: 取消):" mtp_modify
[[ -z "${mtp_modify}" ]] && echo "已取消..." && exit 1
if [[ "${mtp_modify}" == "1" ]]; then
Read_config
Set_port
mtp_passwd=${passwd}
mtp_tag=${tag}
mtp_nat=${nat}
Write_config
Del_iptables
Add_iptables
Restart_mtproxy
elif [[ "${mtp_modify}" == "2" ]]; then
Read_config
Set_passwd
mtp_port=${port}
mtp_tag=${tag}
mtp_nat=${nat}
Write_config
Restart_mtproxy
elif [[ "${mtp_modify}" == "3" ]]; then
Read_config
Set_tag
mtp_port=${port}
mtp_passwd=${passwd}
mtp_nat=${nat}
Write_config
Restart_mtproxy
elif [[ "${mtp_modify}" == "4" ]]; then
Read_config
Set_nat
mtp_port=${port}
mtp_passwd=${passwd}
mtp_tag=${tag}
Write_config
Restart_mtproxy
elif [[ "${mtp_modify}" == "5" ]]; then
Read_config
Set_port
Set_passwd
Set_tag
Set_nat
Write_config
Restart_mtproxy
elif [[ "${mtp_modify}" == "6" ]]; then
Update_multi
elif [[ "${mtp_modify}" == "7" ]]; then
Update_secret
elif [[ "${mtp_modify}" == "8" ]]; then
Set_crontab_update_multi
elif [[ "${mtp_modify}" == "9" ]]; then
Set_crontab_monitor_mtproxy
elif [[ "${mtp_modify}" == "10" ]]; then
Set_crontab_monitorip
else
echo -e "${Error} 请输入正确的数字(1-10)" && exit 1
fi
}
Install_mtproxy(){
check_root
[[ -e ${mtproxy_file} ]] && echo -e "${Error} 检测到 MTProxy 已安装 !" && exit 1
echo -e "${Info} 开始设置 用户配置..."
Set_port
Set_passwd
Set_tag
Set_nat
echo -e "${Info} 开始安装/配置 依赖..."
Installation_dependency
echo -e "${Info} 开始下载/安装..."
Download_mtproxy
Download_secret
Download_multi
echo -e "${Info} 开始下载/安装 服务脚本(init)..."
Service_mtproxy
echo -e "${Info} 开始写入 配置文件..."
Write_config
echo -e "${Info} 开始设置 iptables防火墙..."
Set_iptables
echo -e "${Info} 开始添加 iptables防火墙规则..."
Add_iptables
echo -e "${Info} 开始保存 iptables防火墙规则..."
Save_iptables
echo -e "${Info} 所有步骤 安装完毕,开始启动..."
Start_mtproxy
}
Start_mtproxy(){
check_installed_status
check_pid
[[ ! -z ${PID} ]] && echo -e "${Error} MTProxy 正在运行,请检查 !" && exit 1
/etc/init.d/mtproxy start
sleep 1s
check_pid
[[ ! -z ${PID} ]] && View_mtproxy
}
Stop_mtproxy(){
check_installed_status
check_pid
[[ -z ${PID} ]] && echo -e "${Error} MTProxy 没有运行,请检查 !" && exit 1
/etc/init.d/mtproxy stop
}
Restart_mtproxy(){
check_installed_status
check_pid
[[ ! -z ${PID} ]] && /etc/init.d/mtproxy stop
/etc/init.d/mtproxy start
sleep 1s
check_pid
[[ ! -z ${PID} ]] && View_mtproxy
}
Update_mtproxy(){
echo -e "${Tip} 因为官方无最新版本号,所以无法对比版本,请自行判断是否需要更新。是否更新?[Y/n]"
read -e -p "(默认: y):" yn
[[ -z "${yn}" ]] && yn="y"
if [[ ${yn} == [Yy] ]]; then
check_installed_status
check_pid
[[ ! -z $PID ]] && /etc/init.d/mtproxy stop
rm -rf ${mtproxy_file}
Download_mtproxy
echo -e "${Info} MTProxy 更新完成..."
Start_mtproxy
fi
}
Uninstall_mtproxy(){
check_installed_status
echo "确定要卸载 MTProxy ? (y/N)"
echo
read -e -p "(默认: n):" unyn
[[ -z ${unyn} ]] && unyn="n"
if [[ ${unyn} == [Yy] ]]; then
check_pid
[[ ! -z $PID ]] && kill -9 ${PID}
if [[ -e ${mtproxy_conf} ]]; then
port=$(cat ${mtproxy_conf}|grep 'PORT = '|awk -F 'PORT = ' '{print $NF}')
Del_iptables
Save_iptables
fi
if [[ ! -z $(crontab -l | grep "mtproxy.sh monitor") ]]; then
crontab_monitor_mtproxy_cron_stop
fi
if [[ ! -z $(crontab -l | grep "mtproxy.sh update") ]]; then
crontab_update_mtproxy_cron_stop
fi
rm -rf "${file}"
if [[ ${release} = "centos" ]]; then
chkconfig --del mtproxy
else
update-rc.d -f mtproxy remove
fi
rm -rf "/etc/init.d/mtproxy"
echo && echo "MTProxy 卸载完成 !" && echo
else
echo && echo "卸载已取消..." && echo
fi
}
getip(){
ip=$(wget -qO- -t1 -T2 ipinfo.io/ip)
if [[ -z "${ip}" ]]; then
ip=$(wget -qO- -t1 -T2 api.ip.sb/ip)
if [[ -z "${ip}" ]]; then
ip=$(wget -qO- -t1 -T2 members.3322.org/dyndns/getip)
if [[ -z "${ip}" ]]; then
ip="VPS_IP"
fi
fi
fi
}
View_mtproxy(){
check_installed_status
Read_config
getip
clear && echo
echo -e "Mtproto Proxy 用户配置:"
echo -e "————————————————"
echo -e " 地址\t: ${Green_font_prefix}${ip}${Font_color_suffix}"
echo -e " 端口\t: ${Green_font_prefix}${port}${Font_color_suffix}"
echo -e " 密匙\t: ${Green_font_prefix}dd${passwd}${Font_color_suffix}"
[[ ! -z "${nat}" ]] && echo -e " NAT \t: ${Green_font_prefix}${nat}${Font_color_suffix}"
[[ ! -z "${tag}" ]] && echo -e " TAG \t: ${Green_font_prefix}${tag}${Font_color_suffix}"
echo -e " 链接\t: ${Red_font_prefix}tg://proxy?server=${ip}&port=${port}&secret=dd${passwd}${Font_color_suffix}"
echo -e " 链接\t: ${Red_font_prefix}https://t.me/proxy?server=${ip}&port=${port}&secret=dd${passwd}${Font_color_suffix}"
echo
echo -e " ${Red_font_prefix}注意\t:${Font_color_suffix} 密匙头部的 ${Green_font_prefix}dd${Font_color_suffix} 字符是代表客户端启用${Green_font_prefix}随机填充混淆模式${Font_color_suffix},如果不需要请手动删除。\n \t 另外,在官方机器人处分享账号获取TAG标签时记得删除,获取TAG标签后分享时可以再加上。"
}
View_Log(){
check_installed_status
[[ ! -e ${mtproxy_log} ]] && echo -e "${Error} MTProxy 日志文件不存在 !" && exit 1
echo && echo -e "${Tip} 按 ${Red_font_prefix}Ctrl+C${Font_color_suffix} 终止查看日志" && echo -e "如果需要查看完整日志内容,请用 ${Red_font_prefix}cat ${mtproxy_log}${Font_color_suffix} 命令。" && echo
tail -f ${mtproxy_log}
}
Update_secret(){
rm -rf "${mtproxy_secret}"
Download_secret
Restart_mtproxy
}
Update_multi(){
rm -rf "${mtproxy_multi}"
Download_multi
Restart_mtproxy
}
# 显示 连接信息
debian_View_user_connection_info(){
format_1=$1
Read_config
user_IP=`netstat -anp |grep 'ESTABLISHED' |grep 'mtproto' |grep 'tcp' |grep ":${port} " |awk '{print $5}' |awk -F ":" '{print $1}' |sort -u |grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"`
if [[ -z ${user_IP} ]]; then
user_IP_total="0"
echo -e "端口: ${Green_font_prefix}"${port}"${Font_color_suffix}\t 链接IP总数: ${Green_font_prefix}"${user_IP_total}"${Font_color_suffix}\t 当前链接IP: "
else
user_IP_total=`echo -e "${user_IP}"|wc -l`
if [[ ${format_1} == "IP_address" ]]; then
echo -e "端口: ${Green_font_prefix}"${port}"${Font_color_suffix}\t 链接IP总数: ${Green_font_prefix}"${user_IP_total}"${Font_color_suffix}\t 当前链接IP: "
get_IP_address
echo
else
user_IP=$(echo -e "\n${user_IP}")
echo -e "端口: ${Green_font_prefix}"${user_port}"${Font_color_suffix}\t 链接IP总数: ${Green_font_prefix}"${user_IP_total}"${Font_color_suffix}\t 当前链接IP: ${Green_font_prefix}${user_IP}${Font_color_suffix}\n"
fi
fi
user_IP=""
}
centos_View_user_connection_info(){
format_1=$1
Read_config
user_IP=`netstat -anp |grep 'ESTABLISHED' |grep 'mtproto' |grep 'tcp' |grep ":${port} "|awk '{print $5}' |awk -F ":" '{print $1}' |sort -u |grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"`
if [[ -z ${user_IP} ]]; then
user_IP_total="0"
echo -e "端口: ${Green_font_prefix}"${port}"${Font_color_suffix}\t 链接IP总数: ${Green_font_prefix}"${user_IP_total}"${Font_color_suffix}\t 当前链接IP: "
else
user_IP_total=`echo -e "${user_IP}"|wc -l`
if [[ ${format_1} == "IP_address" ]]; then
echo -e "端口: ${Green_font_prefix}"${port}"${Font_color_suffix}\t 链接IP总数: ${Green_font_prefix}"${user_IP_total}"${Font_color_suffix}\t 当前链接IP: "
get_IP_address
echo
else
user_IP=$(echo -e "\n${user_IP}")
echo -e "端口: ${Green_font_prefix}"${port}"${Font_color_suffix}\t 链接IP总数: ${Green_font_prefix}"${user_IP_total}"${Font_color_suffix}\t 当前链接IP: ${Green_font_prefix}${user_IP}${Font_color_suffix}\n"
fi
fi
user_IP=""
}
View_user_connection_info(){
check_installed_status
echo && echo -e "请选择要显示的格式:
${Green_font_prefix}1.${Font_color_suffix} 显示 IP 格式
${Green_font_prefix}2.${Font_color_suffix} 显示 IP+IP归属地 格式" && echo
read -e -p "(默认: 1):" mtproxy_connection_info
[[ -z "${mtproxy_connection_info}" ]] && mtproxy_connection_info="1"
if [[ "${mtproxy_connection_info}" == "1" ]]; then
View_user_connection_info_1 ""
elif [[ "${mtproxy_connection_info}" == "2" ]]; then
echo -e "${Tip} 检测IP归属地(ipip.net),如果IP较多,可能时间会比较长..."
View_user_connection_info_1 "IP_address"
else
echo -e "${Error} 请输入正确的数字(1-2)" && exit 1
fi
}
View_user_connection_info_1(){
format=$1
if [[ ${release} = "centos" ]]; then
cat /etc/redhat-release |grep 7\..*|grep -i centos>/dev/null
if [[ $? = 0 ]]; then
debian_View_user_connection_info "$format"
else
centos_View_user_connection_info "$format"
fi
else
debian_View_user_connection_info "$format"
fi
}
get_IP_address(){
if [[ ! -z ${user_IP} ]]; then
for((integer_1 = ${user_IP_total}; integer_1 >= 1; integer_1--))
do
IP=$(echo "${user_IP}" |sed -n "$integer_1"p)
IP_address=$(wget -qO- -t1 -T2 http://freeapi.ipip.net/${IP}|sed 's/\"//g;s/,//g;s/\[//g;s/\]//g')
echo -e "${Green_font_prefix}${IP}${Font_color_suffix} (${IP_address})"
sleep 1s
done
fi
}
Set_crontab_monitor_mtproxy(){
check_crontab_installed_status
crontab_monitor_mtproxy_status=$(crontab -l|grep "mtproxy.sh monitor")
if [[ -z "${crontab_monitor_mtproxy_status}" ]]; then
echo && echo -e "当前监控运行状态模式: ${Red_font_prefix}未开启${Font_color_suffix}" && echo
echo -e "确定要开启 ${Green_font_prefix}MTProxy 服务端运行状态监控${Font_color_suffix} 功能吗?(当进程关闭则自动启动 MTProxy 服务端)[Y/n]"
read -e -p "(默认: y):" crontab_monitor_mtproxy_status_ny
[[ -z "${crontab_monitor_mtproxy_status_ny}" ]] && crontab_monitor_mtproxy_status_ny="y"
if [[ ${crontab_monitor_mtproxy_status_ny} == [Yy] ]]; then
crontab_monitor_mtproxy_cron_start
else
echo && echo " 已取消..." && echo
fi
else
echo && echo -e "当前监控运行状态模式: ${Green_font_prefix}已开启${Font_color_suffix}" && echo
echo -e "确定要关闭 ${Red_font_prefix}MTProxy 服务端运行状态监控${Font_color_suffix} 功能吗?(当进程关闭则自动启动 MTProxy 服务端)[y/N]"
read -e -p "(默认: n):" crontab_monitor_mtproxy_status_ny
[[ -z "${crontab_monitor_mtproxy_status_ny}" ]] && crontab_monitor_mtproxy_status_ny="n"
if [[ ${crontab_monitor_mtproxy_status_ny} == [Yy] ]]; then
crontab_monitor_mtproxy_cron_stop
else
echo && echo " 已取消..." && echo
fi
fi
}
crontab_monitor_mtproxy_cron_start(){
crontab -l > "$file_1/crontab.bak"
sed -i "/mtproxy.sh monitor/d" "$file_1/crontab.bak"
echo -e "\n* * * * * /bin/bash $file_1/mtproxy.sh monitor" >> "$file_1/crontab.bak"
crontab "$file_1/crontab.bak"
rm -r "$file_1/crontab.bak"
cron_config=$(crontab -l | grep "mtproxy.sh monitor")
if [[ -z ${cron_config} ]]; then
echo -e "${Error} MTProxy 服务端运行状态监控功能 启动失败 !" && exit 1
else
echo -e "${Info} MTProxy 服务端运行状态监控功能 启动成功 !"
fi
}
crontab_monitor_mtproxy_cron_stop(){
crontab -l > "$file_1/crontab.bak"
sed -i "/mtproxy.sh monitor/d" "$file_1/crontab.bak"
crontab "$file_1/crontab.bak"
rm -r "$file_1/crontab.bak"
cron_config=$(crontab -l | grep "mtproxy.sh monitor")
if [[ ! -z ${cron_config} ]]; then
echo -e "${Error} MTProxy 服务端运行状态监控功能 停止失败 !" && exit 1
else
echo -e "${Info} MTProxy 服务端运行状态监控功能 停止成功 !"
fi
}
crontab_monitor_mtproxy(){
check_installed_status
check_pid
#echo "${PID}"
if [[ -z ${PID} ]]; then
echo -e "${Error} [$(date "+%Y-%m-%d %H:%M:%S %u %Z")] 检测到 MTProxy服务端 未运行 , 开始启动..." | tee -a ${mtproxy_log}
/etc/init.d/mtproxy start
sleep 1s
check_pid
if [[ -z ${PID} ]]; then
echo -e "${Error} [$(date "+%Y-%m-%d %H:%M:%S %u %Z")] MTProxy服务端 启动失败..." | tee -a ${mtproxy_log}
else
echo -e "${Info} [$(date "+%Y-%m-%d %H:%M:%S %u %Z")] MTProxy服务端 启动成功..." | tee -a ${mtproxy_log}
fi
else
echo -e "${Info} [$(date "+%Y-%m-%d %H:%M:%S %u %Z")] MTProxy服务端 进程运行正常..." | tee -a ${mtproxy_log}
fi
}
Set_crontab_update_multi(){
check_crontab_installed_status
crontab_update_mtproxy_status=$(crontab -l|grep "mtproxy.sh update")
if [[ -z "${crontab_update_mtproxy_status}" ]]; then
echo && echo -e "当前自动更新 Telegram IP段功能: ${Red_font_prefix}未开启${Font_color_suffix}" && echo
echo -e "确定要开启 ${Green_font_prefix}MTProxy 自动更新 Telegram IP段${Font_color_suffix} 功能吗?[Y/n]"
read -e -p "(默认: y):" crontab_update_mtproxy_status_ny
[[ -z "${crontab_update_mtproxy_status_ny}" ]] && crontab_update_mtproxy_status_ny="y"
if [[ ${crontab_update_mtproxy_status_ny} == [Yy] ]]; then
crontab_update_mtproxy_cron_start
else
echo && echo " 已取消..." && echo
fi
else
echo && echo -e "当前自动更新 Telegram IP段功能: ${Green_font_prefix}已开启${Font_color_suffix}" && echo
echo -e "确定要关闭 ${Red_font_prefix}MTProxy 自动更新 Telegram IP段${Font_color_suffix} 功能吗?[y/N]"
read -e -p "(默认: n):" crontab_update_mtproxy_status_ny
[[ -z "${crontab_update_mtproxy_status_ny}" ]] && crontab_update_mtproxy_status_ny="n"
if [[ ${crontab_update_mtproxy_status_ny} == [Yy] ]]; then
crontab_update_mtproxy_cron_stop
else
echo && echo " 已取消..." && echo
fi
fi
}
crontab_update_mtproxy_cron_start(){
crontab -l > "$file_1/crontab.bak"
sed -i "/mtproxy.sh update/d" "$file_1/crontab.bak"
echo -e "\n10 3 * * * /bin/bash $file_1/mtproxy.sh update" >> "$file_1/crontab.bak"
crontab "$file_1/crontab.bak"
rm -r "$file_1/crontab.bak"
cron_config=$(crontab -l | grep "mtproxy.sh update")
if [[ -z ${cron_config} ]]; then
echo -e "${Error} MTProxy 自动更新 Telegram IP段功能 启动失败 !" && exit 1
else
echo -e "${Info} MTProxy 自动更新 Telegram IP段功能 启动成功 !"
fi
}
crontab_update_mtproxy_cron_stop(){
crontab -l > "$file_1/crontab.bak"
sed -i "/mtproxy.sh update/d" "$file_1/crontab.bak"
crontab "$file_1/crontab.bak"
rm -r "$file_1/crontab.bak"
cron_config=$(crontab -l | grep "mtproxy.sh update")
if [[ ! -z ${cron_config} ]]; then
echo -e "${Error} MTProxy 自动更新 Telegram IP段功能 停止失败 !" && exit 1
else
echo -e "${Info} MTProxy 自动更新 Telegram IP段功能 停止成功 !"
fi
}
crontab_update_mtproxy(){
check_installed_status
check_pid
rm -rf "${mtproxy_multi}"
Download_multi
echo -e "${Info} [$(date "+%Y-%m-%d %H:%M:%S %u %Z")] Telegram IP段自动更新完成..." | tee -a ${mtproxy_log}
/etc/init.d/mtproxy restart
}
Set_crontab_monitorip(){
check_crontab_installed_status
crontab_monitor_status=$(crontab -l|grep "mtproxy.sh monitorip")
if [[ -z "${crontab_monitor_status}" ]]; then
echo && echo -e "当前监控外网IP模式: ${Red_font_prefix}未开启${Font_color_suffix}" && echo
echo -e "确定要开启 ${Green_font_prefix}服务器外网IP变更监控${Font_color_suffix} 功能吗?(当服务器外网IP变化后,自动重新配置并重启服务端)[Y/n]"
read -e -p "(默认: y):" crontab_monitor_status_ny
[[ -z "${crontab_monitor_status_ny}" ]] && crontab_monitor_status_ny="y"
if [[ ${crontab_monitor_status_ny} == [Yy] ]]; then
crontab_monitor_cron_start2
else
echo && echo " 已取消..." && echo
fi
else
echo && echo -e "当前监控外网IP模式: ${Green_font_prefix}已开启${Font_color_suffix}" && echo
echo -e "确定要关闭 ${Red_font_prefix}服务器外网IP变更监控${Font_color_suffix} 功能吗?(当服务器外网IP变化后,自动重新配置并重启服务端)[Y/n]"
read -e -p "(默认: n):" crontab_monitor_status_ny
[[ -z "${crontab_monitor_status_ny}" ]] && crontab_monitor_status_ny="n"
if [[ ${crontab_monitor_status_ny} == [Yy] ]]; then
crontab_monitor_cron_stop2
else
echo && echo " 已取消..." && echo
fi
fi
}
crontab_monitor_cron_start2(){
crontab -l > "$file_1/crontab.bak"
sed -i "/mtproxy.sh monitorip/d" "$file_1/crontab.bak"
echo -e "\n* * * * * /bin/bash $file_1/mtproxy.sh monitorip" >> "$file_1/crontab.bak"
crontab "$file_1/crontab.bak"
rm -r "$file_1/crontab.bak"
cron_config=$(crontab -l | grep "mtproxy.sh monitorip")
if [[ -z ${cron_config} ]]; then
echo -e "${Error} 服务器外网IP变更监控功能 启动失败 !" && exit 1
else
echo -e "${Info} 服务器外网IP变更监控功能 启动成功 !"
fi
}
crontab_monitor_cron_stop2(){
crontab -l > "$file_1/crontab.bak"
sed -i "/mtproxy.sh monitorip/d" "$file_1/crontab.bak"
crontab "$file_1/crontab.bak"
rm -r "$file_1/crontab.bak"
cron_config=$(crontab -l | grep "mtproxy.sh monitorip")
if [[ ! -z ${cron_config} ]]; then
echo -e "${Error} 服务器外网IP变更监控功能 停止失败 !" && exit 1
else
echo -e "${Info} 服务器外网IP变更监控功能 停止成功 !"
fi
}
crontab_monitorip(){
check_installed_status
Read_config
getip
ipv4=$(echo "${nat}"|awk -F ':' '{print $2}')
nat_ipv4=$(echo "${nat}"|awk -F ':' '{print $1}')
if [[ "${ip}" != "VPS_IP" ]]; then
if [[ "${ip}" != "${ipv4}" ]]; then
echo -e "${Info} [$(date "+%Y-%m-%d %H:%M:%S %u %Z")] 检测到 服务器外网IP变更[旧: ${ipv4},新: ${ip}], 开始重新配置并准备重启服务端..." | tee -a ${mtproxy_log}
mtp_nat="${nat_ipv4}:${ip}"
mtp_port=${port}
mtp_passwd=${passwd}
mtp_tag=${tag}
Write_config
Restart_mtproxy
fi
else
echo -e "${Error} [$(date "+%Y-%m-%d %H:%M:%S %u %Z")] 服务器外网IPv4获取失败..." | tee -a ${mtproxy_log}
fi
}
Add_iptables(){
iptables -I INPUT -m state --state NEW -m tcp -p tcp --dport ${mtp_port} -j ACCEPT
#iptables -I INPUT -m state --state NEW -m udp -p udp --dport ${mtp_port} -j ACCEPT
}
Del_iptables(){
iptables -D INPUT -m state --state NEW -m tcp -p tcp --dport ${port} -j ACCEPT
#iptables -D INPUT -m state --state NEW -m udp -p udp --dport ${port} -j ACCEPT
}
Save_iptables(){
if [[ ${release} == "centos" ]]; then
service iptables save
else
iptables-save > /etc/iptables.up.rules
fi
}
Set_iptables(){
if [[ ${release} == "centos" ]]; then
service iptables save
chkconfig --level 2345 iptables on
else
iptables-save > /etc/iptables.up.rules
echo -e '#!/bin/bash\n/sbin/iptables-restore < /etc/iptables.up.rules' > /etc/network/if-pre-up.d/iptables
chmod +x /etc/network/if-pre-up.d/iptables
fi
}
Update_Shell(){
sh_new_ver=$(wget --no-check-certificate -qO- -t1 -T3 "https://raw.githubusercontent.com/ToyoDAdoubiBackup/doubi/master/mtproxy.sh"|grep 'sh_ver="'|awk -F "=" '{print $NF}'|sed 's/\"//g'|head -1) && sh_new_type="github"
[[ -z ${sh_new_ver} ]] && echo -e "${Error} 无法链接到 Github !" && exit 0
if [[ -e "/etc/init.d/mtproxy" ]]; then
rm -rf /etc/init.d/mtproxy
Service_mtproxy
fi
wget -N --no-check-certificate "https://raw.githubusercontent.com/ToyoDAdoubiBackup/doubi/master/mtproxy.sh" && chmod +x mtproxy.sh
echo -e "脚本已更新为最新版本[ ${sh_new_ver} ] !(注意:因为更新方式为直接覆盖当前运行的脚本,所以可能下面会提示一些报错,无视即可)" && exit 0
}
check_sys
action=$1
if [[ "${action}" == "monitor" ]]; then
crontab_monitor_mtproxy
elif [[ "${action}" == "update" ]]; then
crontab_update_mtproxy
elif [[ "${action}" == "monitorip" ]]; then
crontab_monitorip
else
echo && echo -e " MTProxy 一键管理脚本 ${Red_font_prefix}[v${sh_ver}]${Font_color_suffix}
---- Toyo | doub.io/shell-jc7 ----
${Green_font_prefix} 0.${Font_color_suffix} 升级脚本
————————————
${Green_font_prefix} 1.${Font_color_suffix} 安装 MTProxy
${Green_font_prefix} 2.${Font_color_suffix} 更新 MTProxy
${Green_font_prefix} 3.${Font_color_suffix} 卸载 MTProxy
————————————
${Green_font_prefix} 4.${Font_color_suffix} 启动 MTProxy
${Green_font_prefix} 5.${Font_color_suffix} 停止 MTProxy
${Green_font_prefix} 6.${Font_color_suffix} 重启 MTProxy
————————————
${Green_font_prefix} 7.${Font_color_suffix} 设置 账号配置
${Green_font_prefix} 8.${Font_color_suffix} 查看 账号信息
${Green_font_prefix} 9.${Font_color_suffix} 查看 日志信息
${Green_font_prefix}10.${Font_color_suffix} 查看 链接信息
————————————" && echo
if [[ -e ${mtproxy_file} ]]; then
check_pid
if [[ ! -z "${PID}" ]]; then
echo -e " 当前状态: ${Green_font_prefix}已安装${Font_color_suffix} 并 ${Green_font_prefix}已启动${Font_color_suffix}"
else
echo -e " 当前状态: ${Green_font_prefix}已安装${Font_color_suffix} 但 ${Red_font_prefix}未启动${Font_color_suffix}"
fi
else
echo -e " 当前状态: ${Red_font_prefix}未安装${Font_color_suffix}"
fi
echo
read -e -p " 请输入数字 [0-10]:" num
case "$num" in
0)
Update_Shell
;;
1)
Install_mtproxy
;;
2)
Update_mtproxy
;;
3)
Uninstall_mtproxy
;;
4)
Start_mtproxy
;;
5)
Stop_mtproxy
;;
6)
Restart_mtproxy
;;
7)
Set_mtproxy
;;
8)
View_mtproxy
;;
9)
View_Log
;;
10)
View_user_connection_info
;;
*)
echo "请输入正确数字 [0-10]"
;;
esac
fi |
#!/bin/bash
number=$RANDOM
time=$(date "+%Y-%m-%d %T")
if [ $(($number % 2)) -eq 0 ]
then
echo "[$(date "+%Y-%m-%d %T")] I rolled $number" >> results.txt
git add results.txt
git commit -m"I rolled $number!"
git push
fi
|
#!/bin/bash
#cd 'C:/Program Files/HDF_Group/HDF5/1.8.17/bin/'
#for dll in *.dll; do
# base_def=$(basename $dll .dll)
# def_file="${base_def}.def"
# lib_file="lib${base_def}.a"
# pexports $dll > $def_file
# dlltool -d $def_file -l $lib_file
#done
patch -p1 -d 'C:/Program Files/HDF_Group/HDF5/1.8.17/' < C:/projects/amici/scripts/hdf5-1.8.7-mingw.patch |
#!/bin/sh
set -ex
name="nccmp"
version=$1
software=$name-$version
compiler=${COMPILER:-"gnu-7.3.0"}
set +x
source $MODULESHOME/init/sh
module load $(echo $compiler | sed 's/-/\//g')
module load szip
module load hdf5
module load netcdf
module list
set -x
export CFLAGS="-fPIC"
export LDFLAGS="-L$NETCDF_ROOT/lib -L$HDF5_ROOT/lib -L$SZIP_ROOT/lib"
url="https://sourceforge.net/projects/nccmp/files/${software}.tar.gz"
cd ${PKGDIR:-"../pkg"}
# Enable header pad comparison, if netcdf-c src directory exists!
[[ -d "netcdf-c-$NETCDF_VERSION" ]] && extra_confs="--with-netcdf=$PWD/netcdf-c-$NETCDF_VERSION" || extra_confs=""
[[ -d $software ]] || ( wget $url; tar -xf $software.tar.gz )
[[ -d $software ]] && cd $software || ( echo "$software does not exist, ABORT!"; exit 1 )
[[ -d build ]] && rm -rf build
mkdir -p build && cd build
prefix="$PREFIX/$compiler/$name/$version"
[[ -d $prefix ]] && ( echo "$prefix exists, ABORT!"; exit 1 )
../configure --prefix=$prefix $extra_confs
make -j${NTHREADS:-4}
[[ "$CHECK" = "YES" ]] && make check
make install
$STACKROOT/ush/deploy_module.sh "compiler" $name $version
exit 0
|
#!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
# measure on speed perturbed data, but so slightly that fbank length remains the same
# with pad_to_max_duration, this reduces cuDNN benchmak's burn-in period to a single step
: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}}
: ${OUTPUT_DIR:=${3:-"/results"}}
: ${TRAIN_MANIFESTS:="$DATA_DIR/librispeech-train-clean-100-wav.json"}
# run for a number of epochs, but don't finalize the training
: ${EPOCHS_THIS_JOB:=2}
: ${EPOCHS:=100000}
: ${RESUME:=false}
: ${SAVE_FREQUENCY:=100000}
: ${EVAL_FREQUENCY:=100000}
: ${GRAD_ACCUMULATION_STEPS:=1}
: ${AMP:=false}
: ${EMA:=0}
: ${DALI_DEVICE:="gpu"}
: ${NUM_GPUS_SEQ:="1 4 8"}
: ${BATCH_SIZE_SEQ:="32"}
# A probable range of batch lengths for LibriSpeech
# with BS=64 and continuous speed perturbation (0.85, 1.15)
: ${PRE_ALLOCATE:="1408 1920"}
for NUM_GPUS in $NUM_GPUS_SEQ; do
for BATCH_SIZE in $BATCH_SIZE_SEQ; do
LOG_FILE="$OUTPUT_DIR/perf-train_dali-${DALI_DEVICE}_amp-${AMP}_ngpus${NUM_GPUS}_bs${BATCH_SIZE}.json"
bash ./scripts/train.sh "$@"
done
done
|
#!/bin/bash
trap 'echo "${BASH_SOURCE[0]}: line ${LINENO}: status ${?}: user ${USER}: func ${FUNCNAME[0]}"' ERR
set -o errexit
set -o errtrace
NEXUS_URL="http://nexus.example.com"
# NEXUS_URL="https://nexus.example.com""
NEXUS_USER="admin"
NEXUS_PASSWORD="admin123"
REPOSITORY_NAME="${1}"
######################################################################
function delete_repository {
curl --insecure \
--user ${NEXUS_USER}:${NEXUS_PASSWORD} \
--request DELETE \
--header "Accept:application/json" \
${NEXUS_URL}/service/rest/beta/repositories/${REPOSITORY_NAME}
}
######################################################################
delete_repository
|
#!/bin/bash
set BUILD_DIR="../../../../../../build/android/ndk/layers/glsl_shader/"
ndk-build NDK_LIBS_OUT=${BUILD_DIR}libs NDK_OUT=${BUILD_DIR}obj
adb push ${BUILD_DIR}libs/armeabi-v7a/libVkLayer_glsl_shader.so /data/local/debug/vulkan/libVkLayer_glsl_shader-armeabi-v7a.so
adb push ${BUILD_DIR}libs/arm64-v8a/libVkLayer_glsl_shader.so /data/local/debug/vulkan/libVkLayer_glsl_shader-arm64-v8a.so
adb push ${BUILD_DIR}libs/x86/libVkLayer_glsl_shader.so /data/local/debug/vulkan/libVkLayer_glsl_shader-x86.so
adb push ${BUILD_DIR}libs/x86_64/libVkLayer_glsl_shader.so /data/local/debug/vulkan/libVkLayer_glsl_shader-x86_64.so
|
#!/bin/bash
# Copyright 2017 Vimal Manohar
# Apache 2.0
# We assume the run-1-main.sh (because we are using model directories like
# exp/tri4) and later we assumme run-4-anydecode.sh was run to prepare
# data/dev10h.pem
lang=data/lang # Must match the one used to train the models
lang_test=data/lang # Lang directory for decoding.
data_dir=data/train
# Model directory used to align the $data_dir to get target labels for training
# SAD. This should typically be a speaker-adapted system.
sat_model_dir=exp/tri5_cleaned
# Model direcotry used to decode the whole-recording version of the $data_dir to
# get target labels for training SAD. This should typically be a
# speaker-independent system like LDA+MLLT system.
model_dir=exp/tri4
graph_dir= # If not provided, a new one will be created using $lang_test
# Uniform segmentation options for decoding whole recordings. All values are in
# seconds.
max_segment_duration=10
overlap_duration=2.5
max_remaining_duration=5 # If the last remaining piece when splitting uniformly
# is smaller than this duration, then the last piece
# is merged with the previous.
# List of weights on labels obtained from alignment,
# labels obtained from decoding and default labels in out-of-segment regions
merge_weights=1.0,0.1,0.5
prepare_targets_stage=-10
nstage=-10
train_stage=-10
affix=_1a
stage=-1
nj=80
reco_nj=40
# test options
test_nj=32
test_stage=-10
# Babel specific configuration. These two lines can be removed when adapting to other corpora.
[ ! -f ./lang.conf ] && echo 'Language configuration does not exist! Use the configurations in conf/lang/* as a startup' && exit 1
. ./lang.conf || exit 1;
. ./path.sh
. ./cmd.sh
set -e -u -o pipefail
. utils/parse_options.sh
if [ $# -ne 0 ]; then
exit 1
fi
dir=exp/segmentation${affix}
mkdir -p $dir
# See $lang/phones.txt and decide which should be garbage
garbage_phones="<oov> <vns>"
silence_phones="<sss> SIL"
for p in $garbage_phones; do
for a in "" "_B" "_E" "_I" "_S"; do
echo "$p$a"
done
done > $dir/garbage_phones.txt
for p in $silence_phones; do
for a in "" "_B" "_E" "_I" "_S"; do
echo "$p$a"
done
done > $dir/silence_phones.txt
if ! cat $dir/garbage_phones.txt $dir/silence_phones.txt | \
steps/segmentation/internal/verify_phones_list.py $lang/phones.txt; then
echo "$0: Invalid $dir/{silence,garbage}_phones.txt"
exit 1
fi
whole_data_dir=${data_dir}_whole
whole_data_id=$(basename $whole_data_dir)
if [ $stage -le 0 ]; then
utils/data/convert_data_dir_to_whole.sh $data_dir $whole_data_dir
fi
###############################################################################
# Extract features for the whole data directory
###############################################################################
if [ $stage -le 1 ]; then
if $use_pitch; then
steps/make_plp_pitch.sh --cmd "$train_cmd" --nj $reco_nj --write-utt2num-frames true \
${whole_data_dir} || exit 1
else
steps/make_plp.sh --cmd "$train_cmd" --nj $reco_nj --write-utt2num-frames true \
${whole_data_dir} || exit 1
fi
steps/compute_cmvn_stats.sh $whole_data_dir
utils/fix_data_dir.sh $whole_data_dir
fi
###############################################################################
# Prepare SAD targets for recordings
###############################################################################
targets_dir=$dir/${whole_data_id}_combined_targets_sub3
if [ $stage -le 3 ]; then
steps/segmentation/prepare_targets_gmm.sh --stage $prepare_targets_stage \
--train-cmd "$train_cmd" --decode-cmd "$decode_cmd" \
--nj $nj --reco-nj $reco_nj --lang-test $lang_test \
--garbage-phones-list $dir/garbage_phones.txt \
--silence-phones-list $dir/silence_phones.txt \
--merge-weights "$merge_weights" \
--graph-dir "$graph_dir" \
$lang $data_dir $whole_data_dir $sat_model_dir $model_dir $dir
fi
if [ $stage -le 4 ]; then
utils/copy_data_dir.sh ${whole_data_dir} ${whole_data_dir}_hires_bp
steps/make_mfcc.sh --mfcc-config conf/mfcc_hires_bp.conf --nj $reco_nj \
${whole_data_dir}_hires_bp
steps/compute_cmvn_stats.sh ${whole_data_dir}_hires_bp
fi
if [ $stage -le 5 ]; then
# Train a TDNN-LSTM network for SAD
local/segmentation/tuning/train_lstm_asr_sad_1a.sh \
--stage $nstage --train-stage $train_stage \
--targets-dir $targets_dir \
--data-dir ${whole_data_dir}_hires_bp
fi
if [ $stage -le 6 ]; then
# The options to this script must match the options used in the
# nnet training script.
# e.g. extra-left-context is 70, because the model is an LSTM trained with a
# chunk-left-context of 60.
# Note: frames-per-chunk is 150 even though the model was trained with
# chunk-width of 20. This is just for speed.
# See the script for details of the options.
steps/segmentation/detect_speech_activity.sh \
--extra-left-context 70 --extra-right-context 0 --frames-per-chunk 150 \
--extra-left-context-initial 0 --extra-right-context-final 0 \
--nj $test_nj --acwt 0.3 --stage $test_stage \
data/dev10h.pem \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a \
mfcc_hires_bp \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a/{,dev10h}
fi
if [ $stage -le 7 ]; then
# Do some diagnostics
steps/segmentation/evalute_segmentation.pl data/dev10h.pem/segments \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg/segments &> \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg/evalutate_segmentation.log
steps/segmentation/convert_utt2spk_and_segments_to_rttm.py \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg/utt2spk \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg/segments \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg/sys.rttm
export PATH=$PATH:$KALDI_ROOT/tools/sctk/bin
md-eval.pl -c 0.25 -r $dev10h_rttm_file \
-s exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg/sys.rttm > \
exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg/md_eval.log
fi
if [ $stage -le 8 ]; then
utils/copy_data_dir.sh exp/segmentation_1a/tdnn_lstm_asr_sad_1a/dev10h_seg \
data/dev10h.seg_asr_sad_1a
fi
# run-4-anydecode.sh --dir dev10h.seg_tdnn_lstm_asr_sad_1a
# %WER 40.6 | 21825 101803 | 63.6 26.3 10.1 4.1 40.6 29.8 | -0.469 | exp/chain_cleaned_pitch/tdnn_flstm_sp_bi/decode_dev10h.pem/score_11/dev10h.pem.ctm.sys
# %WER 41.1 | 21825 101803 | 63.5 26.1 10.4 4.5 41.1 31.8 | -0.523 | exp/chain_cleaned_pitch/tdnn_flstm_sp_bi/decode_dev10h.seg/score_11/dev10h.seg.ctm.sys
# %WER 40.9 | 21825 101803 | 63.5 26.1 10.4 4.4 40.9 31.4 | -0.527 | exp/chain_cleaned_pitch/tdnn_flstm_sp_bi/decode_dev10h.seg_1a_tdnn_stats_asr_sad_1a_acwt0_3/score_11/dev10h.seg_1a_tdnn_stats_asr_sad_1a_acwt0_3.ctm.sys
# %WER 41.0 | 21825 101803 | 63.5 26.1 10.4 4.5 41.0 31.5 | -0.522 | exp/chain_cleaned_pitch/tdnn_flstm_sp_bi/decode_dev10h.seg_asr_sad_1a/score_11/dev10h.seg_asr_sad_1a.ctm.sys
|
#!/usr/bin/env bash
################################################################################
# Travis-CI : deploy-doxygen
# --------------------------
#
# Description:
# Generates doxygen output, and pushes it to the gh-pages branch of the Scope
# repository
#
# Requirements:
# doxygen installed
# graphviz installed
# conan installed
#
# Environment:
# GITHUB_REPO_REF : Reference to the github repo
# GITHUB_TOKEN : Access token for github url
# TRAVIS_BUILD_NUMBER : The build number that generates the documentation
# TRAVIS_COMMIT : The commit hash that generates the documentatin
################################################################################
set -e
if [[ $# -gt 1 ]]; then
echo >&2 "Expected at most 1 argument. Received $#."
echo >&2 "Usage: $0 [latest]"
exit 1
fi
version="latest"
if [[ $# -eq 0 ]]; then
version="v$(conan inspect . --attribute version | sed 's@version: @@g')"
elif [[ $1 != "latest" ]]; then
echo >&2 "Expected at most 1 argument. Received $#."
echo >&2 "Usage: $0 [latest]"
exit 1
fi
# Generate documentation
doxygen_output_path="$(pwd)/build/doc"
mkdir -p "${doxygen_output_path}"
doxygen "$(pwd)/.codedocs"
if [[ ! -d "${doxygen_output_path}/html" ]] || [[ ! -f "${doxygen_output_path}/html/index.html" ]]; then
echo 'Error: No documentation (html) files have been found!' >&2
exit 1
fi
dist_path="$(pwd)/dist"
api_doc_path="${dist_path}/api/${version}"
# Clone a git repo for doxygen
git clone --single-branch -b gh-pages "https://${GITHUB_REPO_REF}" "${dist_path}"
git config --global push.default simple
# Add a .nojekyll file
touch "dist/.nojekyll"
# Exchange the old api content for the new content
rm -rf "${api_doc_path}"
mkdir -p "${api_doc_path}"
mv ${doxygen_output_path}/html/* "${api_doc_path}"
# Add everything and upload
(
cd "${dist_path}"
git add --all
git commit \
-m "Deploy codedocs to Github Pages" \
-m "Documentation updated by build ${TRAVIS_BUILD_NUMBER}." \
-m "Commit: '${TRAVIS_COMMIT}'" \
--author "Deployment Bot <[email protected]>" \
--no-gpg-sign
git push \
--force "https://${GITHUB_TOKEN}@${GITHUB_REPO_REF}" gh-pages \
> /dev/null 2>&1
) |
#!/usr/bin/env bash
set -e
# setting up colors
BLU='\033[0;34m'
YLW='\033[0;33m'
GRN='\033[0;32m'
RED='\033[0;31m'
NOC='\033[0m' # No Color
echo_info(){
printf "\n${BLU}%s${NOC}" "$1"
}
echo_step(){
printf "\n${BLU}>>>>>>> %s${NOC}\n" "$1"
}
echo_sub_step(){
printf "\n${BLU}>>> %s${NOC}\n" "$1"
}
echo_step_completed(){
printf "${GRN} [✔]${NOC}"
}
echo_success(){
printf "\n${GRN}%s${NOC}\n" "$1"
}
echo_warn(){
printf "\n${YLW}%s${NOC}" "$1"
}
echo_error(){
printf "\n${RED}%s${NOC}" "$1"
exit 1
}
# ------------------------------
projectdir="$( cd "$( dirname "${BASH_SOURCE[0]}")"/../.. && pwd )"
# get the build environment variables from the special build.vars target in the main makefile
eval $(make --no-print-directory -C ${projectdir} build.vars)
# ------------------------------
SAFEHOSTARCH="${SAFEHOSTARCH:-amd64}"
BUILD_IMAGE="${BUILD_REGISTRY}/${PROJECT_NAME}-${SAFEHOSTARCH}"
CONTROLLER_IMAGE="${BUILD_REGISTRY}/${PROJECT_NAME}-controller-${SAFEHOSTARCH}"
version_tag="$(cat ${projectdir}/_output/version)"
# tag as latest version to load into kind cluster
PACKAGE_CONTROLLER_IMAGE="${DOCKER_REGISTRY}/${PROJECT_NAME}-controller:${VERSION}"
K8S_CLUSTER="${K8S_CLUSTER:-${BUILD_REGISTRY}-inttests}"
CROSSPLANE_NAMESPACE="crossplane-system"
PACKAGE_NAME="provider-styra"
# cleanup on exit
if [ "$skipcleanup" != true ]; then
function cleanup {
echo_step "Cleaning up..."
export KUBECONFIG=
"${KIND}" delete cluster --name="${K8S_CLUSTER}"
}
trap cleanup EXIT
fi
# setup package cache
echo_step "setting up local package cache"
CACHE_PATH="${projectdir}/.work/inttest-package-cache"
mkdir -p "${CACHE_PATH}"
echo "created cache dir at ${CACHE_PATH}"
docker save "${BUILD_IMAGE}" -o "${CACHE_PATH}/${PACKAGE_NAME}.xpkg" && chmod 644 "${CACHE_PATH}/${PACKAGE_NAME}.xpkg"
# create kind cluster with extra mounts
echo_step "creating k8s cluster using kind"
KIND_CONFIG="$( cat <<EOF
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraMounts:
- hostPath: "${CACHE_PATH}/"
containerPath: /cache
EOF
)"
echo "${KIND_CONFIG}" | "${KIND}" create cluster --name="${K8S_CLUSTER}" --wait=5m --config=-
# tag controller image and load it into kind cluster
docker tag "${CONTROLLER_IMAGE}" "${PACKAGE_CONTROLLER_IMAGE}"
"${KIND}" load docker-image "${PACKAGE_CONTROLLER_IMAGE}" --name="${K8S_CLUSTER}"
# files are not synced properly from host to kind node container on Jenkins, so
# we must manually copy image from host to node
echo_step "pre-cache package by copying to kind node"
docker cp "${CACHE_PATH}/${PACKAGE_NAME}.xpkg" "${K8S_CLUSTER}-control-plane":"/cache/${PACKAGE_NAME}.xpkg"
echo_step "create crossplane-system namespace"
"${KUBECTL}" create ns crossplane-system
echo_step "create persistent volume and claim for mounting package-cache"
PV_YAML="$( cat <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: package-cache
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 5Mi
accessModes:
- ReadWriteOnce
hostPath:
path: "/cache"
EOF
)"
echo "${PV_YAML}" | "${KUBECTL}" create -f -
PVC_YAML="$( cat <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: package-cache
namespace: crossplane-system
spec:
accessModes:
- ReadWriteOnce
volumeName: package-cache
storageClassName: manual
resources:
requests:
storage: 1Mi
EOF
)"
echo "${PVC_YAML}" | "${KUBECTL}" create -f -
# install crossplane from master channel
echo_step "installing crossplane from master channel"
"${HELM3}" repo add crossplane-master https://charts.crossplane.io/master/
chart_version="$("${HELM3}" search repo crossplane-master/crossplane --devel | awk 'FNR == 2 {print $2}')"
echo_info "using crossplane version ${chart_version}"
echo
# we replace empty dir with our PVC so that the /cache dir in the kind node
# container is exposed to the crossplane pod
"${HELM3}" install crossplane --namespace crossplane-system crossplane-master/crossplane --version ${chart_version} --devel --wait --set packageCache.pvc=package-cache
# ----------- integration tests
echo_step "--- INTEGRATION TESTS ---"
# install package
echo_step "installing ${PROJECT_NAME} into \"${CROSSPLANE_NAMESPACE}\" namespace"
INSTALL_YAML="$( cat <<EOF
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: "${PACKAGE_NAME}"
spec:
package: "${PACKAGE_NAME}"
packagePullPolicy: Never
EOF
)"
echo "${INSTALL_YAML}" | "${KUBECTL}" apply -f -
# printing the cache dir contents can be useful for troubleshooting failures
echo_step "check kind node cache dir contents"
docker exec "${K8S_CLUSTER}-control-plane" ls -la /cache
echo_step "waiting for provider to be installed"
kubectl wait "provider.pkg.crossplane.io/${PACKAGE_NAME}" --for=condition=healthy --timeout=60s
echo_step "uninstalling ${PROJECT_NAME}"
echo "${INSTALL_YAML}" | "${KUBECTL}" delete -f -
# check pods deleted
timeout=60
current=0
step=3
while [[ $(kubectl get providerrevision.pkg.crossplane.io -o name | wc -l) != "0" ]]; do
echo "waiting for provider to be deleted for another $step seconds"
current=$current+$step
if ! [[ $timeout > $current ]]; then
echo_error "timeout of ${timeout}s has been reached"
fi
sleep $step;
done
echo_success "Integration tests succeeded!"
|
#!/bin/sh
if [[ -z "$STS_API_KEY" ]]; then
echo "You must set an STS_API_KEY environment variable to run the StackState Trace Agent container"
exit 1
fi
if [[ -z "$STS_PROCESS_AGENT_URL" ]]; then
echo "You must set an STS_APM_URL environment variable to run the StackState Trace Agent container"
exit 1
fi
/opt/stackstate-agent/bin/agent/process-agent -config /etc/stackstate-agent/stackstate-docker.yaml
|